Skip to content

Commit

Permalink
Merge pull request #350 from ipfs/feat/339-faster-tests
Browse files Browse the repository at this point in the history
Feat #339: faster tests
  • Loading branch information
hsanjuan committed Apr 5, 2018
2 parents 95ae174 + 0069c00 commit da0915a
Show file tree
Hide file tree
Showing 19 changed files with 347 additions and 207 deletions.
49 changes: 31 additions & 18 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,37 @@ go:
- '1.9'
services:
- docker
before_install:
- docker pull ipfs/go-ipfs
- sudo apt-get update
- sudo apt-get install -y jq curl
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- go get github.com/golang/lint/golint
- make deps
script:
- make docker
- make check
- make service && make ctl && ./coverage.sh
- make install
- make test_sharness && make clean_sharness

cache:
directories:
- $GOPATH/src/gx

install: true

jobs:
include:
- stage: "build and test (1: tests | 2: checks | 3: docker | 4: sharness)"
script:
- go get -u github.com/mattn/goveralls
- go get -u golang.org/x/tools/cmd/cover
- make deps
- ./coverage.sh
- script:
- go get -u github.com/golang/lint/golint
- make deps
- make check
- make service
- make ctl
- script:
- make docker
- script:
- sudo apt-get update
- sudo apt-get install -y jq curl
- make deps
- make install
- docker pull ipfs/go-ipfs
- make test_sharness && make clean_sharness

env:
global:
- secure: M3K3y9+D933tCda7+blW3qqVV8fA6PBDRdJoQvmQc1f0XYbWinJ+bAziFp6diKkF8sMQ+cPwLMONYJuaNT2h7/PkG+sIwF0PuUo5VVCbhGmSDrn2qOjmSnfawNs8wW31f44FQA8ICka1EFZcihohoIMf0e5xZ0tXA9jqw+ngPJiRnv4zyzC3r6t4JMAZcbS9w4KTYpIev5Yj72eCvk6lGjadSVCDVXo2sVs27tNt+BSgtMXiH6Sv8GLOnN2kFspGITgivHgB/jtU6QVtFXB+cbBJJAs3lUYnzmQZ5INecbjweYll07ilwFiCVNCX67+L15gpymKGJbQggloIGyTWrAOa2TMaB/bvblzwwQZ8wE5P3Rss5L0TFkUAcdU+3BUHM+TwV4e8F9x10v1PjgWNBRJQzd1sjKKgGUBCeyCY7VeYDKn9AXI5llISgY/AAfCZwm2cbckMHZZJciMjm+U3Q1FCF+rfhlvUcMG1VEj8r9cGpmWIRjFYVm0NmpUDDNjlC3/lUfTCOOJJyM254EUw63XxabbK6EtDN1yQe8kYRcXH//2rtEwgtMBgqHVY+OOkekzGz8Ra3EBkh6jXrAQL3zKu/GwRlK7/a1OU5MQ7dWcTjbx1AQ6Zfyjg5bZ+idqPgMbqM9Zn2+OaSby8HEEXS0QeZVooDVf/6wdYO4MQ/0A=
Expand All @@ -35,6 +51,3 @@ deploy:
script: docker run -v $(pwd):$(pwd) -t snapcore/snapcraft sh -c "apt update -qq
&& cd $(pwd) && snapcraft && snapcraft push *.snap --release edge"
skip_cleanup: true
cache:
directories:
- $GOPATH/src/gx
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ check:
golint -set_exit_status -min_confidence 0.3 ./...

test: deps
go test -timeout 20m -loglevel "CRITICAL" -v ./...
go test -loglevel "CRITICAL" -v ./...

test_sharness: $(sharness)
@sh sharness/run-sharness-tests.sh
Expand Down
7 changes: 4 additions & 3 deletions allocate.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,12 @@ func (c *Cluster) allocate(hash *cid.Cid, rplMin, rplMax int, blacklist []peer.I
priorityMetrics := make(map[peer.ID]api.Metric)

// Divide metrics between current and candidates.
// All metrics in metrics are valid (at least the
// moment they were compiled by the monitor)
for _, m := range metrics {
switch {
case m.Discard() || containsPeer(blacklist, m.Peer):
// discard peers with invalid metrics and
// those in the blacklist
case containsPeer(blacklist, m.Peer):
// discard blacklisted peers
continue
case containsPeer(currentAllocs, m.Peer):
currentMetrics[m.Peer] = m
Expand Down
2 changes: 1 addition & 1 deletion allocator/ascendalloc/ascendalloc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ var (
testCid, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
)

var inAMinute = time.Now().Add(time.Minute).Format(time.RFC3339Nano)
var inAMinute = time.Now().Add(time.Minute).UnixNano()

var testCases = []testcase{
{ // regular sort
Expand Down
2 changes: 1 addition & 1 deletion allocator/descendalloc/descendalloc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ var (
testCid, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
)

var inAMinute = time.Now().Add(time.Minute).Format(time.RFC3339Nano)
var inAMinute = time.Now().Add(time.Minute).UnixNano()

var testCases = []testcase{
{ // regular sort
Expand Down
26 changes: 7 additions & 19 deletions api/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -602,8 +602,8 @@ type Metric struct {
Name string
Peer peer.ID // filled-in by Cluster.
Value string
Expire string // RFC3339Nano
Valid bool // if the metric is not valid it will be discarded
Expire int64 // UnixNano
Valid bool // if the metric is not valid it will be discarded
}

// SetTTL sets Metric to expire after the given seconds
Expand All @@ -615,31 +615,19 @@ func (m *Metric) SetTTL(seconds int) {
// SetTTLDuration sets Metric to expire after the given time.Duration
func (m *Metric) SetTTLDuration(d time.Duration) {
exp := time.Now().Add(d)
m.Expire = exp.UTC().Format(time.RFC3339Nano)
m.Expire = exp.UnixNano()
}

// GetTTL returns the time left before the Metric expires
func (m *Metric) GetTTL() time.Duration {
if m.Expire == "" {
return 0
}
exp, err := time.Parse(time.RFC3339Nano, m.Expire)
if err != nil {
panic(err)
}
return exp.Sub(time.Now())
expDate := time.Unix(0, m.Expire)
return expDate.Sub(time.Now())
}

// Expired returns if the Metric has expired
func (m *Metric) Expired() bool {
if m.Expire == "" {
return true
}
exp, err := time.Parse(time.RFC3339Nano, m.Expire)
if err != nil {
panic(err)
}
return time.Now().After(exp)
expDate := time.Unix(0, m.Expire)
return time.Now().After(expDate)
}

// Discard returns if the metric not valid or has expired
Expand Down
2 changes: 1 addition & 1 deletion ci/Jenkinsfile
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
golang([test: "go test -v -timeout 20m ./..."])
golang([test: "go test -v ./..."])

8 changes: 4 additions & 4 deletions cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,6 @@ func (c *Cluster) pushInformerMetrics() {
// The following control how often to make and log
// a retry
retries := 0
retryDelay := 500 * time.Millisecond
retryWarnMod := 60
for {
select {
Expand All @@ -293,7 +292,7 @@ func (c *Cluster) pushInformerMetrics() {
retries++
}
// retry in retryDelay
timer.Reset(retryDelay)
timer.Reset(metric.GetTTL() / 4)
continue
}

Expand Down Expand Up @@ -345,8 +344,7 @@ func (c *Cluster) alertsHandler() {
// detects any changes in the peerset and saves the configuration. When it
// detects that we have been removed from the peerset, it shuts down this peer.
func (c *Cluster) watchPeers() {
// TODO: Config option?
ticker := time.NewTicker(5 * time.Second)
ticker := time.NewTicker(c.config.PeerWatchInterval)
lastPeers := PeersFromMultiaddrs(c.config.Peers)

for {
Expand Down Expand Up @@ -462,11 +460,13 @@ This might be due to one or several causes:
if len(peers) == 1 {
logger.Info(" - No other peers")
}

for _, p := range peers {
if p != c.id {
logger.Infof(" - %s", p.Pretty())
}
}

close(c.readyCh)
c.readyB = true
logger.Info("** IPFS Cluster is READY **")
Expand Down
20 changes: 18 additions & 2 deletions cluster_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ const (
DefaultStateSyncInterval = 60 * time.Second
DefaultIPFSSyncInterval = 130 * time.Second
DefaultMonitorPingInterval = 15 * time.Second
DefaultPeerWatchInterval = 5 * time.Second
DefaultReplicationFactor = -1
DefaultLeaveOnShutdown = false
)
Expand Down Expand Up @@ -104,10 +105,16 @@ type Config struct {
// possible.
ReplicationFactorMin int

// MonitorPingInterval is frequency by which a cluster peer pings the
// monitoring component. The ping metric has a TTL set to the double
// MonitorPingInterval is the frequency with which a cluster peer pings
// the monitoring component. The ping metric has a TTL set to the double
// of this value.
MonitorPingInterval time.Duration

// PeerWatchInterval is the frequency that we use to watch for changes
// in the consensus peerset and save new peers to the configuration
// file. This also affects how soon we realize that we have
// been removed from a cluster.
PeerWatchInterval time.Duration
}

// configJSON represents a Cluster configuration as it will look when it is
Expand All @@ -128,6 +135,7 @@ type configJSON struct {
ReplicationFactorMin int `json:"replication_factor_min"`
ReplicationFactorMax int `json:"replication_factor_max"`
MonitorPingInterval string `json:"monitor_ping_interval"`
PeerWatchInterval string `json:"peer_watch_interval"`
}

// ConfigKey returns a human-readable string to identify
Expand Down Expand Up @@ -207,6 +215,10 @@ func (cfg *Config) Validate() error {
return errors.New("cluster.monitoring_interval is invalid")
}

if cfg.PeerWatchInterval <= 0 {
return errors.New("cluster.peer_watch_interval is invalid")
}

rfMax := cfg.ReplicationFactorMax
rfMin := cfg.ReplicationFactorMin

Expand Down Expand Up @@ -256,6 +268,7 @@ func (cfg *Config) setDefaults() {
cfg.ReplicationFactorMin = DefaultReplicationFactor
cfg.ReplicationFactorMax = DefaultReplicationFactor
cfg.MonitorPingInterval = DefaultMonitorPingInterval
cfg.PeerWatchInterval = DefaultPeerWatchInterval
}

// LoadJSON receives a raw json-formatted configuration and
Expand Down Expand Up @@ -353,10 +366,12 @@ func (cfg *Config) LoadJSON(raw []byte) error {
stateSyncInterval := parseDuration(jcfg.StateSyncInterval)
ipfsSyncInterval := parseDuration(jcfg.IPFSSyncInterval)
monitorPingInterval := parseDuration(jcfg.MonitorPingInterval)
peerWatchInterval := parseDuration(jcfg.PeerWatchInterval)

config.SetIfNotDefault(stateSyncInterval, &cfg.StateSyncInterval)
config.SetIfNotDefault(ipfsSyncInterval, &cfg.IPFSSyncInterval)
config.SetIfNotDefault(monitorPingInterval, &cfg.MonitorPingInterval)
config.SetIfNotDefault(peerWatchInterval, &cfg.PeerWatchInterval)

cfg.LeaveOnShutdown = jcfg.LeaveOnShutdown

Expand Down Expand Up @@ -407,6 +422,7 @@ func (cfg *Config) ToJSON() (raw []byte, err error) {
jcfg.StateSyncInterval = cfg.StateSyncInterval.String()
jcfg.IPFSSyncInterval = cfg.IPFSSyncInterval.String()
jcfg.MonitorPingInterval = cfg.MonitorPingInterval.String()
jcfg.PeerWatchInterval = cfg.PeerWatchInterval.String()

raw, err = json.MarshalIndent(jcfg, "", " ")
return
Expand Down
2 changes: 1 addition & 1 deletion cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ func TestClusterRecoverAllLocal(t *testing.T) {
t.Fatal("pin should have worked:", err)
}

time.Sleep(time.Second)
pinDelay()

recov, err := cl.RecoverAllLocal()
if err != nil {
Expand Down
6 changes: 5 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ import (

var logger = logging.Logger("config")

// ConfigSaveInterval specifies how often to save the configuration file if
// it needs saving.
var ConfigSaveInterval = time.Second

// The ComponentConfig interface allows components to define configurations
// which can be managed as part of the ipfs-cluster configuration file by the
// Manager.
Expand Down Expand Up @@ -116,7 +120,7 @@ func (cfg *Manager) watchSave(save <-chan struct{}) {
defer cfg.wg.Done()

// Save once per second mostly
ticker := time.NewTicker(time.Second)
ticker := time.NewTicker(ConfigSaveInterval)
defer ticker.Stop()

thingsToSave := false
Expand Down
25 changes: 13 additions & 12 deletions config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,29 +17,30 @@ var testingClusterCfg = []byte(`{
"secret": "2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": true,
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/127.0.0.1/tcp/10000",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "1s"
"monitor_ping_interval": "150ms",
"peer_watch_interval": "100ms"
}
`)

var testingRaftCfg = []byte(`{
"data_folder": "raftFolderFromTests",
"wait_for_leader_timeout": "30s",
"commit_retries": 1,
"commit_retry_delay": "1s",
"network_timeout": "20s",
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"wait_for_leader_timeout": "10s",
"commit_retries": 2,
"commit_retry_delay": "50ms",
"network_timeout": "5s",
"heartbeat_timeout": "100ms",
"election_timeout": "100ms",
"commit_timeout": "50ms",
"max_append_entries": 64,
"max_append_entries": 256,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
"leader_lease_timeout": "80ms"
}`)

var testingAPICfg = []byte(`{
Expand Down Expand Up @@ -71,11 +72,11 @@ var testingTrackerCfg = []byte(`
`)

var testingMonCfg = []byte(`{
"check_interval": "1s"
"check_interval": "300ms"
}`)

var testingDiskInfCfg = []byte(`{
"metric_ttl": "1s",
"metric_ttl": "150ms",
"metric_type": "freespace"
}`)

Expand Down
2 changes: 2 additions & 0 deletions consensus/raft/consensus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ func makeTestingHost(t *testing.T) host.Host {
}

func testingConsensus(t *testing.T, idn int) *Consensus {
cleanRaft(idn)
h := makeTestingHost(t)
st := mapstate.NewMapState()

Expand All @@ -72,6 +73,7 @@ func TestShutdownConsensus(t *testing.T) {
// Bring it up twice to make sure shutdown cleans up properly
// but also to make sure raft comes up ok when re-initialized
cc := testingConsensus(t, 1)
defer cleanRaft(1)
err := cc.Shutdown()
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
Expand Down
4 changes: 2 additions & 2 deletions coverage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ for dir in $dirs;
do
if ls "$dir"/*.go &> /dev/null;
then
cmdflags="-timeout 20m -v -coverprofile=profile.out -covermode=count $dir"
cmdflags="-v -coverprofile=profile.out -covermode=count $dir"
if [ "$dir" == "." ]; then
cmdflags="-timeout 20m -v -coverprofile=profile.out -covermode=count -loglevel CRITICAL ."
cmdflags="-v -coverprofile=profile.out -covermode=count -loglevel CRITICAL ."
fi
echo go test $cmdflags
go test $cmdflags
Expand Down
Loading

0 comments on commit da0915a

Please sign in to comment.