Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix staticcheck failures of test/e2e/storage #85906

Merged
merged 1 commit into from Dec 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.staticcheck_failures
Expand Up @@ -67,7 +67,6 @@ test/e2e/autoscaling
test/e2e/instrumentation/logging/stackdriver
test/e2e/instrumentation/monitoring
test/e2e/manifest
test/e2e/storage
test/e2e/storage/drivers
test/e2e/storage/testsuites
test/e2e/storage/utils
Expand Down
8 changes: 5 additions & 3 deletions test/e2e/storage/csi_mock_volume.go
Expand Up @@ -592,19 +592,21 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
})

func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
var err error
reg, err := regexp.Compile(`max.+volume.+count`)
if err != nil {
return err
}
waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
conditions := pod.Status.Conditions
for _, condition := range conditions {
matched, _ := regexp.MatchString("max.+volume.+count", condition.Message)
matched := reg.MatchString(condition.Message)
if condition.Reason == v1.PodReasonUnschedulable && matched {
return true, nil
}

}
return false, nil
})
Expand Down
1 change: 1 addition & 0 deletions test/e2e/storage/empty_dir_wrapper.go
Expand Up @@ -406,6 +406,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
}()

pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
framework.ExpectNoError(err, "error creating pods")

ginkgo.By("Ensuring each pod is running")

Expand Down
3 changes: 2 additions & 1 deletion test/e2e/storage/flexvolume_mounted_volume_resize.go
Expand Up @@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
VolumeMode: pvc.Spec.VolumeMode,
})

pv, err = e2epv.CreatePV(c, pv)
_, err = e2epv.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)

ginkgo.By("Waiting for PVC to be in bound phase")
Expand Down Expand Up @@ -173,6 +173,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {

ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0]

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/flexvolume_online_resize.go
Expand Up @@ -137,7 +137,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
VolumeMode: pvc.Spec.VolumeMode,
})

pv, err = e2epv.CreatePV(c, pv)
_, err = e2epv.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)

ginkgo.By("Waiting for PVC to be in bound phase")
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/mounted_volume_resize.go
Expand Up @@ -145,6 +145,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {

ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0]

Expand Down Expand Up @@ -177,7 +178,6 @@ func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
return false, err
})
Expand Down
8 changes: 6 additions & 2 deletions test/e2e/storage/pd.go
Expand Up @@ -489,13 +489,17 @@ func detachPD(nodeName types.NodeName, pdName string) error {
return err

} else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New())
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
}
client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/")
awsVolumeID := tokens[len(tokens)-1]
request := ec2.DetachVolumeInput{
VolumeId: aws.String(awsVolumeID),
}
_, err := client.DetachVolume(&request)
_, err = client.DetachVolume(&request)
if err != nil {
return fmt.Errorf("error detaching EBS volume: %v", err)
}
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/storage/subpath.go
Expand Up @@ -37,13 +37,13 @@ var _ = utils.SIGDescribe("Subpath", func() {
ginkgo.BeforeEach(func() {
ginkgo.By("Setting up data")
secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}}
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "while creating secret")
}

configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}}
configmap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "while creating configmap")
}
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/storage/volume_metrics.go
Expand Up @@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err)

err = e2epod.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name)
framework.ExpectNoError(err, "Error starting pod %s", pod.Name)

framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
Expand Down Expand Up @@ -212,7 +212,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err)

err = e2epod.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)

pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expand Down Expand Up @@ -269,7 +269,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err)

err = e2epod.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)

pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expand Down Expand Up @@ -300,7 +300,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err)

err = e2epod.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)

pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expand Down Expand Up @@ -337,7 +337,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)

Expand Down
17 changes: 11 additions & 6 deletions test/e2e/storage/volume_provisioning.go
Expand Up @@ -69,14 +69,20 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
volumeID := tokens[len(tokens)-1]

zone := framework.TestContext.CloudConfig.Zone

awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
}

if len(zone) > 0 {
region := zone[:len(zone)-1]
cfg := aws.Config{Region: &region}
framework.Logf("using region %s", region)
client = ec2.New(session.New(), &cfg)
client = ec2.New(awsSession, &cfg)
} else {
framework.Logf("no region configured")
client = ec2.New(session.New())
client = ec2.New(awsSession)
}

request := &ec2.DescribeVolumesInput{
Expand Down Expand Up @@ -414,14 +420,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
var suffix string = "unmananged"

ginkgo.By("Discovering an unmanaged zone")
allZones := sets.NewString() // all zones in the project
managedZones := sets.NewString() // subset of allZones
allZones := sets.NewString() // all zones in the project

gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)

// Get all k8s managed zones (same as zones with nodes in them for test)
managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
framework.ExpectNoError(err)

// Get a list of all zones in the project
Expand Down Expand Up @@ -864,7 +869,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr
}

sc, err = c.StorageV1().StorageClasses().Update(sc)
_, err = c.StorageV1().StorageClasses().Update(sc)
framework.ExpectNoError(err)

expectedDefault := false
Expand Down