Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix static check for test/e2e/framework #81715

Merged
merged 1 commit into from
Aug 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
11 changes: 0 additions & 11 deletions hack/.staticcheck_failures
Original file line number Diff line number Diff line change
Expand Up @@ -131,17 +131,6 @@ test/e2e/apps
test/e2e/auth
test/e2e/autoscaling
test/e2e/common
test/e2e/framework
test/e2e/framework/ingress
test/e2e/framework/kubelet
test/e2e/framework/node
test/e2e/framework/pod
test/e2e/framework/podlogs
test/e2e/framework/providers/aws
test/e2e/framework/providers/gce
test/e2e/framework/psp
test/e2e/framework/service
test/e2e/framework/volume
test/e2e/instrumentation/logging/stackdriver
test/e2e/instrumentation/monitoring
test/e2e/lifecycle
Expand Down
6 changes: 0 additions & 6 deletions test/e2e/framework/ingress/ingress_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,6 @@ const (
// IngressReqTimeout is the timeout on a single http request.
IngressReqTimeout = 10 * time.Second

// healthz port used to verify glbc restarted correctly on the master.
glbcHealthzPort = 8086

// General cloud resource poll timeout (eg: create static ip, firewall etc)
cloudResourcePollTimeout = 5 * time.Minute

// NEGAnnotation is NEG annotation.
NEGAnnotation = "cloud.google.com/neg"

Expand Down
2 changes: 0 additions & 2 deletions test/e2e/framework/kubelet/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,14 +304,12 @@ func GetOneTimeResourceUsageOnNode(
// Process container infos that are relevant to us.
containers := containerNames()
usageMap := make(ResourceUsagePerContainer, len(containers))
observedContainers := []string{}
for _, pod := range summary.Pods {
for _, container := range pod.Containers {
isInteresting := false
for _, interestingContainerName := range containers {
if container.Name == interestingContainerName {
isInteresting = true
observedContainers = append(observedContainers, container.Name)
break
}
}
Expand Down
16 changes: 0 additions & 16 deletions test/e2e/framework/networking_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -621,18 +621,6 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
}
}

func (config *NetworkingTestConfig) cleanup() {
nsClient := config.getNamespacesClient()
nsList, err := nsClient.List(metav1.ListOptions{})
if err == nil {
for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
nsClient.Delete(ns.Name, nil)
}
}
}
}

// shuffleNodes copies nodes from the specified slice into a copy in random
// order. It returns a new slice.
func shuffleNodes(nodes []v1.Node) []v1.Node {
Expand Down Expand Up @@ -713,10 +701,6 @@ func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInte
return config.f.ClientSet.CoreV1().Services(config.Namespace)
}

func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
return config.f.ClientSet.CoreV1().Namespaces()
}

// CheckReachabilityFromPod checks reachability from the specified pod.
func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/framework/node/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,10 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
if !silent {
e2elog.Logf(msg)
}
Expand Down
14 changes: 0 additions & 14 deletions test/e2e/framework/pod/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,20 +58,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}

// TODO: Move to its own subpkg.
// expectNoErrorWithRetries checks if an error occurs with the given retry count.
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...)
}

func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/framework/podlogs/podlogs.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,10 +251,10 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
)
} else if cst.State.Running != nil {
fmt.Fprintf(buffer, "RUNNING")
} else if cst.State.Waiting != nil {
} else if cst.State.Terminated != nil {
fmt.Fprintf(buffer, "TERMINATED: %s - %s",
cst.State.Waiting.Reason,
cst.State.Waiting.Message,
cst.State.Terminated.Reason,
cst.State.Terminated.Message,
)
}
fmt.Fprintf(buffer, "\n")
Expand Down
18 changes: 15 additions & 3 deletions test/e2e/framework/providers/aws/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,21 @@ type Provider struct {

// ResizeGroup resizes an instance group
func (p *Provider) ResizeGroup(group string, size int32) error {
client := autoscaling.New(session.New())
awsSession, err := session.NewSession()
if err != nil {
return err
}
client := autoscaling.New(awsSession)
return awscloud.ResizeInstanceGroup(client, group, int(size))
}

// GroupSize returns the size of an instance group
func (p *Provider) GroupSize(group string) (int, error) {
client := autoscaling.New(session.New())
awsSession, err := session.NewSession()
if err != nil {
return -1, err
}
client := autoscaling.New(awsSession)
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
if err != nil {
return -1, fmt.Errorf("error describing instance group: %v", err)
Expand Down Expand Up @@ -151,5 +159,9 @@ func newAWSClient(zone string) *ec2.EC2 {
region := zone[:len(zone)-1]
cfg = &aws.Config{Region: aws.String(region)}
}
return ec2.New(session.New(), cfg)
session, err := session.NewSession()
if err != nil {
e2elog.Logf("Warning: failed to create aws session")
}
return ec2.New(session, cfg)
}
3 changes: 0 additions & 3 deletions test/e2e/framework/providers/gce/ingress.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,8 @@ type backendType string
// IngressController manages implementation details of Ingress on GCE/GKE.
type IngressController struct {
Ns string
rcPath string
UID string
staticIPName string
rc *v1.ReplicationController
svc *v1.Service
Client clientset.Interface
Cloud framework.CloudConfig
}
Expand Down
1 change: 1 addition & 0 deletions test/e2e/framework/providers/gce/recreate_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))

ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
framework.ExpectNoError(err)
allPods := ps.List()
originalPods := e2epod.FilterNonRestartablePods(allPods)
originalPodNames = make([]string, len(originalPods))
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/psp/psp.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}

psp := privilegedPSP(podSecurityPolicyPrivileged)
psp, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
if !apierrs.IsAlreadyExists(err) {
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/framework/service/jig.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string,
func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool,
tweak func(svc *v1.Service)) *v1.Service {
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local")
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
// We need to turn affinity off for our LB distribution tests
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
Expand All @@ -244,7 +244,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri
j.RunOrFail(namespace, nil)
}
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
return svc
}
Expand All @@ -253,7 +253,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri
// for it to acquire an ingress IP.
func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service {
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer")
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
// We need to turn affinity off for our LB distribution tests
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
Expand All @@ -263,7 +263,7 @@ func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeo
})

ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
return svc
}
Expand Down
4 changes: 0 additions & 4 deletions test/e2e/framework/suites.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)

var (
cloudConfig = &TestContext.CloudConfig
)

// SetupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
Expand Down
58 changes: 10 additions & 48 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"

"golang.org/x/net/websocket"
Expand Down Expand Up @@ -120,9 +119,6 @@ const (
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute

// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute

// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 3 * time.Minute

Expand All @@ -149,10 +145,6 @@ const (
// PodReadyBeforeTimeout is how long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute

// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)

podRespondingTimeout = 15 * time.Minute
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
ClaimProvisionTimeout = 5 * time.Minute

Expand Down Expand Up @@ -214,13 +206,6 @@ var (
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")

// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}

// ServeHostnameImage is a serve hostname image name.
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
)
Expand Down Expand Up @@ -438,7 +423,7 @@ func getDefaultClusterIPFamily(c clientset.Interface) string {
// ProviderIs returns true if the provider is included is the providers. Otherwise false.
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
if strings.EqualFold(provider, TestContext.Provider) {
return true
}
}
Expand All @@ -448,7 +433,7 @@ func ProviderIs(providers ...string) bool {
// MasterOSDistroIs returns true if the master OS distro is included in the supportedMasterOsDistros. Otherwise false.
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
for _, distro := range supportedMasterOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
if strings.EqualFold(distro, TestContext.MasterOSDistro) {
return true
}
}
Expand All @@ -458,7 +443,7 @@ func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
// NodeOSDistroIs returns true if the node OS distro is included in the supportedNodeOsDistros. Otherwise false.
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
if strings.EqualFold(distro, TestContext.NodeOSDistro) {
return true
}
}
Expand Down Expand Up @@ -522,32 +507,6 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}

type podCondition func(pod *v1.Pod) (bool, error)

// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}

buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}

// WaitForDaemonSets for all daemonsets in the given namespace to be ready
// (defined as all but 'allowedNotReadyNodes' pods associated with that
// daemonset are ready).
Expand Down Expand Up @@ -1557,14 +1516,14 @@ func (b KubectlBuilder) ExecOrDie() string {

func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
case net.Error:
if err.Timeout() {
return true
}
}
return false
}
Expand Down Expand Up @@ -2472,6 +2431,9 @@ func RestartKubelet(host string) error {
sudoPresent = true
}
sshResult, err = e2essh.SSH("systemctl --version", host, TestContext.Provider)
if err != nil {
return fmt.Errorf("Failed to execute command 'systemctl' on host %s with error %v", host, err)
}
if !strings.Contains(sshResult.Stderr, "command not found") {
cmd = "systemctl restart kubelet"
} else {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/volume/fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo
},
},
}
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
_, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")

return config, pod, ip
Expand Down