Skip to content

Commit

Permalink
Pre-create and pre-connect hosts in tests
Browse files Browse the repository at this point in the history
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
  • Loading branch information
hsanjuan committed Mar 29, 2018
1 parent d8ee74e commit 028384b
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 45 deletions.
49 changes: 31 additions & 18 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,37 @@ go:
- '1.9'
services:
- docker
before_install:
- docker pull ipfs/go-ipfs
- sudo apt-get update
- sudo apt-get install -y jq curl
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- go get github.com/golang/lint/golint
- make deps
script:
- make docker
- make check
- make service && make ctl && ./coverage.sh
- make install
- make test_sharness && make clean_sharness

cache:
directories:
- $GOPATH/src/gx

install: true

jobs:
include:
- stage: "build and test (1: tests | 2: checks | 3: docker | 4: sharness)"
script:
- go get -u github.com/mattn/goveralls
- go get -u golang.org/x/tools/cmd/cover
- make deps
- ./coverage.sh
- script:
- go get -u github.com/golang/lint/golint
- make deps
- make check
- make service
- make ctl
- script:
- make docker
- script:
- sudo apt-get update
- sudo apt-get install -y jq curl
- make deps
- make install
- docker pull ipfs/go-ipfs
- make test_sharness && make clean_sharness

env:
global:
- secure: M3K3y9+D933tCda7+blW3qqVV8fA6PBDRdJoQvmQc1f0XYbWinJ+bAziFp6diKkF8sMQ+cPwLMONYJuaNT2h7/PkG+sIwF0PuUo5VVCbhGmSDrn2qOjmSnfawNs8wW31f44FQA8ICka1EFZcihohoIMf0e5xZ0tXA9jqw+ngPJiRnv4zyzC3r6t4JMAZcbS9w4KTYpIev5Yj72eCvk6lGjadSVCDVXo2sVs27tNt+BSgtMXiH6Sv8GLOnN2kFspGITgivHgB/jtU6QVtFXB+cbBJJAs3lUYnzmQZ5INecbjweYll07ilwFiCVNCX67+L15gpymKGJbQggloIGyTWrAOa2TMaB/bvblzwwQZ8wE5P3Rss5L0TFkUAcdU+3BUHM+TwV4e8F9x10v1PjgWNBRJQzd1sjKKgGUBCeyCY7VeYDKn9AXI5llISgY/AAfCZwm2cbckMHZZJciMjm+U3Q1FCF+rfhlvUcMG1VEj8r9cGpmWIRjFYVm0NmpUDDNjlC3/lUfTCOOJJyM254EUw63XxabbK6EtDN1yQe8kYRcXH//2rtEwgtMBgqHVY+OOkekzGz8Ra3EBkh6jXrAQL3zKu/GwRlK7/a1OU5MQ7dWcTjbx1AQ6Zfyjg5bZ+idqPgMbqM9Zn2+OaSby8HEEXS0QeZVooDVf/6wdYO4MQ/0A=
Expand All @@ -35,6 +51,3 @@ deploy:
script: docker run -v $(pwd):$(pwd) -t snapcore/snapcraft sh -c "apt update -qq
&& cd $(pwd) && snapcraft && snapcraft push *.snap --release edge"
skip_cleanup: true
cache:
directories:
- $GOPATH/src/gx
2 changes: 1 addition & 1 deletion ci/Jenkinsfile
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
golang([test: "go test -v -loglevel ERROR ./..."])
golang([test: "go test -v ./..."])

67 changes: 41 additions & 26 deletions ipfscluster_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package ipfscluster

import (
"context"
"flag"
"fmt"
"math/rand"
Expand All @@ -22,17 +23,19 @@ import (
"github.com/ipfs/ipfs-cluster/state"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
peerstore "github.com/libp2p/go-libp2p-peerstore"

cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)

//TestClusters*
var (
// number of clusters to create
nClusters = 6
nClusters = 5

// number of pins to pin/unpin/check
nPins = 100
Expand Down Expand Up @@ -124,16 +127,16 @@ func createComponents(t *testing.T, i int, clusterSecret []byte) (*Config, *raft
return clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf, mock
}

func createCluster(t *testing.T, clusterCfg *Config, consensusCfg *raft.Config, api API, ipfs IPFSConnector, state state.State, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer) *Cluster {
cl, err := NewCluster(nil, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf)
func createCluster(t *testing.T, host host.Host, clusterCfg *Config, consensusCfg *raft.Config, api API, ipfs IPFSConnector, state state.State, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer) *Cluster {
cl, err := NewCluster(host, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf)
checkErr(t, err)
<-cl.Ready()
return cl
}

func createOnePeerCluster(t *testing.T, nth int, clusterSecret []byte) (*Cluster, *test.IpfsMock) {
clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf, mock := createComponents(t, nth, clusterSecret)
cl := createCluster(t, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf)
cl := createCluster(t, nil, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf)
return cl, mock
}

Expand All @@ -149,6 +152,8 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
allocs := make([]PinAllocator, nClusters, nClusters)
infs := make([]Informer, nClusters, nClusters)
ipfsMocks := make([]*test.IpfsMock, nClusters, nClusters)

hosts := make([]host.Host, nClusters, nClusters)
clusters := make([]*Cluster, nClusters, nClusters)

// Uncomment when testing with fixed ports
Expand Down Expand Up @@ -197,37 +202,45 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
// ----------------------------------------------

// Alternative way of starting using bootstrap
// Create hosts
var err error
for i := 0; i < nClusters; i++ {
hosts[i], err = NewClusterHost(context.Background(), cfgs[i])
if err != nil {
t.Fatal(err)
}
}

// open connections among all hosts
for _, h := range hosts {
for _, h2 := range hosts {
if h.ID() != h2.ID() {

h.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.PermanentAddrTTL)
_, err := h.Network().DialPeer(context.Background(), h2.ID())
if err != nil {
t.Fatal(err)
}
}
}
}

// Start first node
clusters[0] = createCluster(t, cfgs[0], concfgs[0], apis[0], ipfss[0], states[0], trackers[0], mons[0], allocs[0], infs[0])
clusters[0] = createCluster(t, hosts[0], cfgs[0], concfgs[0], apis[0], ipfss[0], states[0], trackers[0], mons[0], allocs[0], infs[0])
// Find out where it binded
bootstrapAddr, _ := ma.NewMultiaddr(fmt.Sprintf("%s/ipfs/%s", clusters[0].host.Addrs()[0], clusters[0].id.Pretty()))
// Use first node to bootstrap
for i := 1; i < nClusters; i++ {
cfgs[i].Bootstrap = []ma.Multiaddr{bootstrapAddr}
}
time.Sleep(200 * time.Millisecond)
waitForLeaderLoop(t, clusters[0:1])

// Start the rest
// We don't do this in parallel because it causes libp2p dial backoffs
for i := 1; i < nClusters; i++ {
clusters[i] = createCluster(t, cfgs[i], concfgs[i], apis[i], ipfss[i], states[i], trackers[i], mons[i], allocs[i], infs[i])
time.Sleep(200 * time.Millisecond)
}

// open connections among all peers. This ensures smoother operations.
// Best effort. Some errors do happen.
for _, c := range clusters {
peers, err := c.consensus.Peers()
if err != nil {
shutdownClusters(t, clusters, ipfsMocks)
t.Fatal(err)
}
for _, p := range peers {
if p != c.id {
c.host.Network().DialPeer(c.ctx, p)
}
}
clusters[i] = createCluster(t, hosts[i], cfgs[i], concfgs[i], apis[i], ipfss[i], states[i], trackers[i], mons[i], allocs[i], infs[i])
}
waitForLeader(t, clusters)

// ---------------------------------------------

Expand Down Expand Up @@ -288,9 +301,13 @@ func ttlDelay() {
// Makes sure new metrics have come in for the new leader.
func waitForLeader(t *testing.T, clusters []*Cluster) {
ttlDelay()
waitForLeaderLoop(t, clusters)
ttlDelay()
}

func waitForLeaderLoop(t *testing.T, clusters []*Cluster) {
timer := time.NewTimer(time.Minute)
ticker := time.NewTicker(time.Second / 4)
ticker := time.NewTicker(100 * time.Millisecond)

loop:
for {
Expand All @@ -310,8 +327,6 @@ loop:
break loop
}
}

ttlDelay()
}

func TestClustersVersion(t *testing.T) {
Expand Down

0 comments on commit 028384b

Please sign in to comment.