Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix staticcheck in test/e2e/network/ #85893

Merged
merged 1 commit into from Dec 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.staticcheck_failures
Expand Up @@ -69,7 +69,6 @@ test/e2e/autoscaling
test/e2e/instrumentation/logging/stackdriver
test/e2e/instrumentation/monitoring
test/e2e/manifest
test/e2e/network
test/e2e/storage
test/e2e/storage/drivers
test/e2e/storage/testsuites
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/network/dns.go
Expand Up @@ -212,7 +212,7 @@ var _ = SIGDescribe("DNS", func() {
// All the names we need to be able to resolve.
// for headless service.
namesToResolve := []string{
fmt.Sprintf("%s", headlessService.Name),
headlessService.Name,
fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
Expand Down
14 changes: 1 addition & 13 deletions test/e2e/network/dns_common.go
Expand Up @@ -81,10 +81,6 @@ func (t *dnsTestCommon) init() {
}
}

func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) {
t.checkDNSRecordFrom(name, predicate, "kube-dns", timeout)
}

func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) bool, target string, timeout time.Duration) {
var actual []string

Expand Down Expand Up @@ -118,7 +114,6 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
case "cluster-dns":
case "cluster-dns-ipv6":
cmd = append(cmd, "AAAA")
break
default:
panic(fmt.Errorf("invalid target: " + target))
}
Expand Down Expand Up @@ -269,6 +264,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
options := metav1.ListOptions{LabelSelector: label.String()}

pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
framework.ExpectNoError(err, "failed to list pods of kube-system with label %q", label.String())
podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem)

for _, pod := range pods.Items {
Expand Down Expand Up @@ -614,14 +610,6 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}

func reverseArray(arr []string) []string {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}

func generateDNSUtilsPod() *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/network/ingress.go
Expand Up @@ -181,7 +181,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
return true, nil
})
if pollErr != nil {
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
framework.ExpectNoError(fmt.Errorf("timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
}

// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/network/kube_proxy.go
Expand Up @@ -172,6 +172,8 @@ var _ = SIGDescribe("Network", func() {
}

jsonBytes, err := json.Marshal(options)
framework.ExpectNoError(err, "could not marshal")

cmd := fmt.Sprintf(
`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
`'%v' 2>/dev/null`,
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/network/network_tiers.go
Expand Up @@ -134,7 +134,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)

// Wait until the ingress IP changes and verifies the LB.
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
})
})

Expand Down
4 changes: 2 additions & 2 deletions test/e2e/network/no_snat.go
Expand Up @@ -24,7 +24,7 @@ import (
"strings"
"time"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
Expand Down Expand Up @@ -117,7 +117,7 @@ func getIP(iptype v1.NodeAddressType, node *v1.Node) (string, error) {

func getSchedulable(nodes []v1.Node) (*v1.Node, error) {
for _, node := range nodes {
if node.Spec.Unschedulable == false {
if !node.Spec.Unschedulable {
return &node, nil
}
}
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/network/scale/ingress.go
Expand Up @@ -23,7 +23,7 @@ import (
"time"

appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
Expand Down Expand Up @@ -118,7 +118,7 @@ func (f *IngressScaleFramework) PrepareScaleTest() error {
Cloud: f.CloudConfig,
}
if err := f.GCEController.Init(); err != nil {
return fmt.Errorf("Failed to initialize GCE controller: %v", err)
return fmt.Errorf("failed to initialize GCE controller: %v", err)
}

f.ScaleTestSvcs = []*v1.Service{}
Expand All @@ -135,22 +135,22 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error {
for _, ing := range f.ScaleTestIngs {
if ing != nil {
if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
}
}
}
f.Logger.Infof("Cleaning up services...")
for _, svc := range f.ScaleTestSvcs {
if svc != nil {
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil {
errs = append(errs, fmt.Errorf("Error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
}
}
}
if f.ScaleTestDeploy != nil {
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil {
errs = append(errs, fmt.Errorf("Error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
}
}

Expand All @@ -170,15 +170,15 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(testDeploy)
if err != nil {
errs = append(errs, fmt.Errorf("Failed to create deployment %s: %v", testDeploy.Name, err))
errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err))
return errs
}
f.ScaleTestDeploy = testDeploy

if f.EnableTLS {
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
errs = append(errs, fmt.Errorf("Failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
return errs
}
}
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/network/service.go
Expand Up @@ -933,24 +933,24 @@ var _ = SIGDescribe("Services", func() {
// Change the services back to ClusterIP.

ginkgo.By("changing TCP service back to type=ClusterIP")
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
_, err = tcpJig.UpdateService(func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
framework.ExpectNoError(err)
// Wait for the load balancer to be destroyed asynchronously
tcpService, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
_, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
framework.ExpectNoError(err)

ginkgo.By("changing UDP service back to type=ClusterIP")
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
_, err = udpJig.UpdateService(func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
framework.ExpectNoError(err)
if loadBalancerSupportsUDP {
// Wait for the load balancer to be destroyed asynchronously
udpService, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
_, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
}

Expand Down Expand Up @@ -1381,7 +1381,7 @@ var _ = SIGDescribe("Services", func() {
service = t.BuildServiceSpec()
service.Spec.Type = v1.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = nodePort
service, err = t.CreateService(service)
_, err = t.CreateService(service)
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
})

Expand Down Expand Up @@ -2516,12 +2516,12 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
}
if isTransitionTest {
svc, err = jig.UpdateService(func(svc *v1.Service) {
_, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
svc, err = jig.UpdateService(func(svc *v1.Service) {
_, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
Expand Down
9 changes: 4 additions & 5 deletions test/e2e/network/service_latency.go
Expand Up @@ -258,11 +258,10 @@ func (eq *endpointQueries) join() {
delete(eq.requests, got.Name)
req.endpoints = got
close(req.result)
} else {
// We've already recorded a result, but
// haven't gotten the request yet. Only
// keep the first result.
}
// We've already recorded a result, but
// haven't gotten the request yet. Only
// keep the first result.
} else {
// We haven't gotten the corresponding request
// yet, save this result.
Expand Down Expand Up @@ -352,7 +351,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie
framework.Logf("Created: %v", gotSvc.Name)

if e := q.request(gotSvc.Name); e == nil {
return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
return 0, fmt.Errorf("never got a result for endpoint %v", gotSvc.Name)
}
stopTime := time.Now()
d := stopTime.Sub(startTime)
Expand Down