Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix test/e2e/apps staticcheck #92750

Merged
merged 1 commit into from Jul 3, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.staticcheck_failures
Expand Up @@ -9,7 +9,6 @@ pkg/volume/azure_dd
pkg/volume/gcepd
pkg/volume/rbd
pkg/volume/testing
test/e2e/apps
test/e2e/autoscaling
test/e2e_node
test/integration/examples
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/cronjob.go
Expand Up @@ -453,7 +453,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
aliveJobs := filterNotDeletedJobs(jobs)
if len(aliveJobs) > 1 {
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
return false, fmt.Errorf("more than one job is running %+v", jobs.Items)
} else if len(aliveJobs) == 0 {
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
return false, nil
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/apps/daemon_set.go
Expand Up @@ -606,7 +606,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
if err != nil {
return nil, err
} else if len(newLabels) != len(labels) {
return nil, fmt.Errorf("Could not set daemon set test labels as expected")
return nil, fmt.Errorf("could not set daemon set test labels as expected")
}

return newNode, nil
Expand Down Expand Up @@ -698,11 +698,11 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Could not get daemon set from v1")
return fmt.Errorf("could not get daemon set from v1")
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
}
return nil
}
Expand Down
6 changes: 4 additions & 2 deletions test/e2e/apps/deployment.go
Expand Up @@ -378,7 +378,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
numPodCreation := 1
for {
select {
case event, _ := <-w.ResultChan():
case event := <-w.ResultChan():
if event.Type != watch.Added {
continue
}
Expand Down Expand Up @@ -455,6 +455,7 @@ func testRolloverDeployment(f *framework.Framework) {
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
framework.ExpectNoError(err)
// Check if it's updated to revision 1 correctly
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expand Down Expand Up @@ -626,6 +627,7 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false
})
framework.ExpectNoError(err)
}

framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
Expand Down Expand Up @@ -799,7 +801,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
_, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas
})
framework.ExpectNoError(err)
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/apps/disruption.go
Expand Up @@ -396,9 +396,11 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
old := getPDBStatusOrDie(dc, ns, name)
patchBytes, err := f(old)
framework.ExpectNoError(err)
if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
return err
}
framework.ExpectNoError(err)
return nil
})

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/network_partition.go
Expand Up @@ -123,7 +123,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
e2eskipper.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
}
})
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/apps/rc.go
Expand Up @@ -251,6 +251,7 @@ var _ = SIGDescribe("ReplicationController", func() {
}
return true, nil
})
framework.ExpectNoError(err, "Failed to find updated ready replica count")
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")

ginkgo.By("fetching ReplicationController status")
Expand Down Expand Up @@ -445,9 +446,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
}
}
framework.ExpectNoError(err)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/apps/replica_set.go
Expand Up @@ -148,9 +148,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
}
}
framework.ExpectNoError(err)
Expand Down
11 changes: 5 additions & 6 deletions test/e2e/apps/statefulset.go
Expand Up @@ -843,7 +843,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
waitForStatus(c, ss)

ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
Expand Down Expand Up @@ -1151,7 +1151,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
e2estatefulset.SortStatefulPods(pods)
err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
newImage := NewWebserverImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image

Expand All @@ -1172,7 +1172,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
e2estatefulset.SortStatefulPods(pods)
err = restorePodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
Expand All @@ -1195,9 +1195,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
ginkgo.By("Rolling back to a previous revision")
err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expand All @@ -1211,7 +1210,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
pods = e2estatefulset.GetPodList(c, ss)
e2estatefulset.SortStatefulPods(pods)
restorePodHTTPProbe(ss, &pods.Items[1])
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
Expand Down