Skip to content

Commit

Permalink
gometalinter: add gosimple
Browse files Browse the repository at this point in the history
... and fix warnings reported by it:

agent/exec/dockerapi/adapter.go:147:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
agent/testutils/fakes.go:143:2:warning: should use a simple channel send/receive instead of select with a single case (S1000) (gosimple)
agent/testutils/fakes.go:151:2:warning: should use a simple channel send/receive instead of select with a single case (S1000) (gosimple)
ca/certificates_test.go:708:2:warning: should use for range instead of for { select {} } (S1000) (gosimple)
ca/config.go:630:12:warning: should use time.Until instead of t.Sub(time.Now()) (S1024) (gosimple)
ca/config_test.go:790:3:warning: should use a simple channel send/receive instead of select with a single case (S1000) (gosimple)
ca/external_test.go:116:3:warning: should use a simple channel send/receive instead of select with a single case (S1000) (gosimple)
cmd/swarm-bench/collector.go:26:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
cmd/swarmctl/node/common.go:51:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
cmd/swarmctl/node/common.go:89:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
cmd/swarmctl/node/common.go:172:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
ioutils/ioutils_test.go:28:5:warning: should use !bytes.Equal(actual, expected) instead (S1004) (gosimple)
manager/allocator/cnmallocator/networkallocator.go:818:3:warning: should merge variable declaration with assignment on next line (S1021) (gosimple)
manager/constraint/constraint.go:59:7:warning: should omit comparison to bool constant, can be simplified to !matched (S1002) (gosimple)
manager/constraint/constraint.go:67:7:warning: should omit comparison to bool constant, can be simplified to !matched (S1002) (gosimple)
manager/dispatcher/dispatcher.go:1095:5:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
manager/dispatcher/dispatcher.go:1095:5:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
manager/dispatcher/dispatcher.go:1095:5:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
manager/dispatcher/dispatcher_test.go:2090:2:warning: redundant return statement (S1023) (gosimple)
manager/manager.go:1005:4:warning: should replace loop with m.config.NetworkConfig.DefaultAddrPool = append(m.config.NetworkConfig.DefaultAddrPool, cluster.DefaultAddressPool...) (S1011) (gosimple)
manager/metrics/collector.go:191:2:warning: redundant return statement (S1023) (gosimple)
manager/metrics/collector.go:222:2:warning: redundant return statement (S1023) (gosimple)
manager/orchestrator/replicated/update_test.go:53:3:warning: should use for range instead of for { select {} } (S1000) (gosimple)
manager/orchestrator/taskinit/init.go:83:32:warning: should use time.Until instead of t.Sub(time.Now()) (S1024) (gosimple)
manager/state/raft/raft.go:1185:2:warning: should use 'return <expr>' instead of 'if <expr> { return <bool> }; return <bool>' (S1008) (gosimple)
manager/state/raft/raft.go:1594:2:warning: 'if err != nil { return err }; return nil' can be simplified to 'return err' (S1013) (gosimple)
node/node.go:1209:2:warning: redundant return statement (S1023) (gosimple)
node/node.go:1219:2:warning: redundant return statement (S1023) (gosimple)
watch/sinks_test.go:42:2:warning: should merge variable declaration with assignment on next line (S1021) (gosimple)

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
  • Loading branch information
kolyshkin committed Oct 4, 2018
1 parent 278edc2 commit 0e8bb70
Show file tree
Hide file tree
Showing 21 changed files with 75 additions and 124 deletions.
1 change: 1 addition & 0 deletions .gometalinter.json
Expand Up @@ -9,6 +9,7 @@
"gofmt",
"goimports",
"golint",
"gosimple",
"ineffassign",
"deadcode",
"unconvert"
Expand Down
8 changes: 3 additions & 5 deletions agent/exec/dockerapi/adapter.go
Expand Up @@ -144,15 +144,13 @@ func (c *containerAdapter) removeNetworks(ctx context.Context) error {
}

func (c *containerAdapter) create(ctx context.Context) error {
if _, err := c.client.ContainerCreate(ctx,
_, err := c.client.ContainerCreate(ctx,
c.container.config(),
c.container.hostConfig(),
c.container.networkingConfig(),
c.container.name()); err != nil {
return err
}
c.container.name())

return nil
return err
}

func (c *containerAdapter) start(ctx context.Context) error {
Expand Down
8 changes: 2 additions & 6 deletions agent/testutils/fakes.go
Expand Up @@ -140,17 +140,13 @@ func (m *MockDispatcher) UpdateTaskStatus(context.Context, *api.UpdateTaskStatus

// Tasks keeps an open stream until canceled
func (m *MockDispatcher) Tasks(_ *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
select {
case <-stream.Context().Done():
}
<-stream.Context().Done()
return nil
}

// Assignments keeps an open stream until canceled
func (m *MockDispatcher) Assignments(_ *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
select {
case <-stream.Context().Done():
}
<-stream.Context().Done()
return nil
}

Expand Down
10 changes: 4 additions & 6 deletions ca/certificates_test.go
Expand Up @@ -706,12 +706,10 @@ func TestGetRemoteSignedCertificateWithPending(t *testing.T) {
var node *api.Node
// wait for a new node to show up
for node == nil {
select {
case event := <-updates: // we want to skip the first node, which is the test CA
n := event.(api.EventCreateNode).Node.Copy()
if n.Certificate.Status.State == api.IssuanceStatePending {
node = n
}
event := <-updates // we want to skip the first node, which is the test CA
n := event.(api.EventCreateNode).Node.Copy()
if n.Certificate.Status.State == api.IssuanceStatePending {
node = n
}
}

Expand Down
2 changes: 1 addition & 1 deletion ca/config.go
Expand Up @@ -627,7 +627,7 @@ func calculateRandomExpiry(validFrom, validUntil time.Time) time.Duration {
randomExpiry = rand.Intn(maxValidity-minValidity) + minValidity
}

expiry := validFrom.Add(time.Duration(randomExpiry) * time.Minute).Sub(time.Now())
expiry := time.Until(validFrom.Add(time.Duration(randomExpiry) * time.Minute))
if expiry < 0 {
return 0
}
Expand Down
34 changes: 16 additions & 18 deletions ca/config_test.go
Expand Up @@ -787,24 +787,22 @@ func TestRenewTLSConfigUpdatesRootNonUnknownAuthError(t *testing.T) {
go func() {
updates, cancel := state.Watch(tc.MemoryStore.WatchQueue(), api.EventCreateNode{})
defer cancel()
select {
case event := <-updates: // we want to skip the first node, which is the test CA
n := event.(api.EventCreateNode).Node
if n.Certificate.Status.State == api.IssuanceStatePending {
signErr <- tc.MemoryStore.Update(func(tx store.Tx) error {
node := store.GetNode(tx, n.ID)
certChain, err := rootCA.ParseValidateAndSignCSR(node.Certificate.CSR, node.Certificate.CN, ca.WorkerRole, tc.Organization)
if err != nil {
return err
}
node.Certificate.Certificate = cautils.ReDateCert(t, certChain, cert, key, time.Now().Add(-5*time.Hour), time.Now().Add(-4*time.Hour))
node.Certificate.Status = api.IssuanceStatus{
State: api.IssuanceStateIssued,
}
return store.UpdateNode(tx, node)
})
return
}
event := <-updates // we want to skip the first node, which is the test CA
n := event.(api.EventCreateNode).Node
if n.Certificate.Status.State == api.IssuanceStatePending {
signErr <- tc.MemoryStore.Update(func(tx store.Tx) error {
node := store.GetNode(tx, n.ID)
certChain, err := rootCA.ParseValidateAndSignCSR(node.Certificate.CSR, node.Certificate.CN, ca.WorkerRole, tc.Organization)
if err != nil {
return err
}
node.Certificate.Certificate = cautils.ReDateCert(t, certChain, cert, key, time.Now().Add(-5*time.Hour), time.Now().Add(-4*time.Hour))
node.Certificate.Status = api.IssuanceStatus{
State: api.IssuanceStateIssued,
}
return store.UpdateNode(tx, node)
})
return
}
}()

Expand Down
4 changes: 1 addition & 3 deletions ca/external_test.go
Expand Up @@ -113,9 +113,7 @@ func TestExternalCASignRequestTimesOut(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/", func(http.ResponseWriter, *http.Request) {
// hang forever
select {
case <-allDone:
}
<-allDone
})

server := httptest.NewServer(mux)
Expand Down
5 changes: 1 addition & 4 deletions cmd/swarm-bench/collector.go
Expand Up @@ -23,10 +23,7 @@ type Collector struct {
func (c *Collector) Listen(port int) error {
var err error
c.ln, err = net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return err
}
return nil
return err
}

// Collect blocks until `count` tasks phoned home.
Expand Down
18 changes: 3 additions & 15 deletions cmd/swarmctl/node/common.go
Expand Up @@ -48,11 +48,7 @@ func changeNodeAvailability(cmd *cobra.Command, args []string, availability api.
Spec: spec,
})

if err != nil {
return err
}

return nil
return err
}

func changeNodeRole(cmd *cobra.Command, args []string, role api.NodeRole) error {
Expand Down Expand Up @@ -86,11 +82,7 @@ func changeNodeRole(cmd *cobra.Command, args []string, role api.NodeRole) error
Spec: spec,
})

if err != nil {
return err
}

return nil
return err
}

func getNode(ctx context.Context, c api.ControlClient, input string) (*api.Node, error) {
Expand Down Expand Up @@ -169,9 +161,5 @@ func updateNode(cmd *cobra.Command, args []string) error {
Spec: spec,
})

if err != nil {
return err
}

return nil
return err
}
2 changes: 1 addition & 1 deletion ioutils/ioutils_test.go
Expand Up @@ -25,7 +25,7 @@ func TestAtomicWriteToFile(t *testing.T) {
t.Fatalf("Error reading from file: %v", err)
}

if bytes.Compare(actual, expected) != 0 {
if !bytes.Equal(actual, expected) {
t.Fatalf("Data mismatch, expected %q, got %q", expected, actual)
}
}
3 changes: 1 addition & 2 deletions manager/allocator/cnmallocator/networkallocator.go
Expand Up @@ -815,8 +815,7 @@ func (na *cnmNetworkAllocator) resolveDriver(n *api.Network) (*networkDriver, er

d, drvcap := na.drvRegistry.Driver(dName)
if d == nil {
var err error
err = na.loadDriver(dName)
err := na.loadDriver(dName)
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions manager/constraint/constraint.go
Expand Up @@ -56,15 +56,15 @@ func Parse(env []string) ([]Constraint, error) {
part0 := strings.TrimSpace(parts[0])
// validate key
matched := alphaNumeric.MatchString(part0)
if matched == false {
if !matched {
return nil, fmt.Errorf("key '%s' is invalid", part0)
}

part1 := strings.TrimSpace(parts[1])

// validate Value
matched = valuePattern.MatchString(part1)
if matched == false {
if !matched {
return nil, fmt.Errorf("value '%s' is invalid", part1)
}
// TODO(dongluochen): revisit requirements to see if globing or regex are useful
Expand Down
12 changes: 4 additions & 8 deletions manager/dispatcher/dispatcher.go
Expand Up @@ -1090,14 +1090,10 @@ func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
task.Status.State = api.TaskStateOrphaned
}

if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}

return nil
}); err != nil {
err := batch.Update(func(tx store.Tx) error {
return store.UpdateTask(tx, task)
})
if err != nil {
return err
}

Expand Down
1 change: 0 additions & 1 deletion manager/dispatcher/dispatcher_test.go
Expand Up @@ -2087,7 +2087,6 @@ func (m *mockPluginGetter) GetAllManagedPluginsByCap(capability string) []plugin
return nil
}
func (m *mockPluginGetter) Handle(capability string, callback func(string, *plugins.Client)) {
return
}

// MockPlugin mocks a v2 docker plugin
Expand Down
4 changes: 1 addition & 3 deletions manager/manager.go
Expand Up @@ -1002,9 +1002,7 @@ func (m *Manager) becomeLeader(ctx context.Context) {
cluster = store.GetCluster(tx, clusterID)
})
if cluster.DefaultAddressPool != nil {
for _, address := range cluster.DefaultAddressPool {
m.config.NetworkConfig.DefaultAddrPool = append(m.config.NetworkConfig.DefaultAddrPool, address)
}
m.config.NetworkConfig.DefaultAddrPool = append(m.config.NetworkConfig.DefaultAddrPool, cluster.DefaultAddressPool...)
m.config.NetworkConfig.SubnetSize = cluster.SubnetSize
}
}
Expand Down
3 changes: 0 additions & 3 deletions manager/metrics/collector.go
Expand Up @@ -188,7 +188,6 @@ func (c *Collector) handleNodeEvent(event events.Event) {
if newNode != nil {
nodesMetric.WithValues(strings.ToLower(newNode.Status.State.String())).Inc(1)
}
return
}

func (c *Collector) handleTaskEvent(event events.Event) {
Expand Down Expand Up @@ -218,8 +217,6 @@ func (c *Collector) handleTaskEvent(event events.Event) {
strings.ToLower(newTask.Status.State.String()),
).Inc(1)
}

return
}

func (c *Collector) handleServiceEvent(event events.Event) {
Expand Down
60 changes: 29 additions & 31 deletions manager/orchestrator/replicated/update_test.go
Expand Up @@ -51,38 +51,36 @@ func testUpdaterRollback(t *testing.T, rollbackFailureAction api.UpdateConfig_Fa
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(api.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
e := <-watchUpdate
task := e.(api.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
Expand Down
2 changes: 1 addition & 1 deletion manager/orchestrator/taskinit/init.go
Expand Up @@ -80,7 +80,7 @@ func CheckTasks(ctx context.Context, s *store.MemoryStore, readTx store.ReadTx,
}
if err == nil {
restartTime := timestamp.Add(restartDelay)
calculatedRestartDelay := restartTime.Sub(time.Now())
calculatedRestartDelay := time.Until(restartTime)
if calculatedRestartDelay < restartDelay {
restartDelay = calculatedRestartDelay
}
Expand Down
10 changes: 2 additions & 8 deletions manager/state/raft/raft.go
Expand Up @@ -1182,11 +1182,8 @@ func (n *Node) CanRemoveMember(id uint64) bool {
}

nquorum := (len(members)-1)/2 + 1
if nreachable < nquorum {
return false
}

return true
return nreachable >= nquorum
}

func (n *Node) removeMember(ctx context.Context, id uint64) error {
Expand Down Expand Up @@ -1591,10 +1588,7 @@ func (n *Node) ProposeValue(ctx context.Context, storeAction []api.StoreAction,
defer cancel()
_, err := n.processInternalRaftRequest(ctx, &api.InternalRaftRequest{Action: storeAction}, cb)

if err != nil {
return err
}
return nil
return err
}

// GetVersion returns the sequence information for the current raft round.
Expand Down
5 changes: 1 addition & 4 deletions node/node.go
Expand Up @@ -1204,19 +1204,16 @@ func (s *persistentRemotes) Observe(peer api.Peer, weight int) {
s.c.Broadcast()
if err := s.save(); err != nil {
logrus.Errorf("error writing cluster state file: %v", err)
return
}
return
}

func (s *persistentRemotes) Remove(peers ...api.Peer) {
s.Lock()
defer s.Unlock()
s.Remotes.Remove(peers...)
if err := s.save(); err != nil {
logrus.Errorf("error writing cluster state file: %v", err)
return
}
return
}

func (s *persistentRemotes) save() error {
Expand Down
3 changes: 1 addition & 2 deletions watch/sinks_test.go
Expand Up @@ -39,8 +39,7 @@ func TestTimeoutDropErrSinkGen(t *testing.T) {
<-ch2.Done()

// Make sure that closing a sink closes the channel
var errClose error
errClose = sink.Close()
errClose := sink.Close()
<-ch.Done()
require.NoError(errClose)

Expand Down

0 comments on commit 0e8bb70

Please sign in to comment.