Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor: replace framework.Failf with e2elog.Failf #79162

Merged
merged 1 commit into from
Jun 21, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions test/e2e/apps/daemon_restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
}
if len(newKeys.List()) != len(existingKeys.List()) ||
!newKeys.IsSuperset(existingKeys) {
framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
e2elog.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
}
})

Expand Down Expand Up @@ -312,7 +312,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
e2elog.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
}
})
})
4 changes: 2 additions & 2 deletions test/e2e/apps/daemon_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
case newDS.Spec.Template.Spec.Containers[0].Image:
newPods = append(newPods, &pod)
default:
framework.Failf("unexpected pod found, image = %s", image)
e2elog.Failf("unexpected pod found, image = %s", image)
}
}
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c)
Expand Down Expand Up @@ -655,7 +655,7 @@ func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
nodeInfo.SetNode(&node)
fit, _, err := daemon.Predicates(newPod, nodeInfo)
if err != nil {
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
e2elog.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
return false
}
return fit
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/apps/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
}
return false, nil
}); err != nil {
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
e2elog.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
}
}

Expand Down Expand Up @@ -382,14 +382,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
numPodCreation--
if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
e2elog.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
}
pod, ok := event.Object.(*v1.Pod)
if !ok {
framework.Failf("Expect event Object to be a pod")
e2elog.Failf("Expect event Object to be a pod")
}
if pod.Spec.Containers[0].Name != RedisImageName {
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
e2elog.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
}
case <-stopCh:
return
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/apps/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"

"github.com/onsi/ginkgo"
Expand Down Expand Up @@ -206,7 +207,7 @@ var _ = SIGDescribe("Job", func() {
// updates we need to allow more than backoff+1
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
if len(pods.Items) < backoff+1 {
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
e2elog.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
}
for _, pod := range pods.Items {
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed))
Expand Down
30 changes: 15 additions & 15 deletions test/e2e/apps/network_partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
}
}
if !expected {
framework.Failf("Failed to observe node ready status change to %v", isReady)
e2elog.Failf("Failed to observe node ready status change to %v", isReady)
}
}

Expand Down Expand Up @@ -120,7 +120,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
}
})

Expand Down Expand Up @@ -155,12 +155,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true
})
if len(nodes.Items) <= 0 {
framework.Failf("No eligible node were found: %d", len(nodes.Items))
e2elog.Failf("No eligible node were found: %d", len(nodes.Items))
}
node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
}

ginkgo.By("Set up watch on node status")
Expand Down Expand Up @@ -216,7 +216,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
expectNodeReadiness(true, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
e2elog.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
}
}()

Expand All @@ -227,7 +227,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
}
})
})
Expand Down Expand Up @@ -276,7 +276,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}

// sleep a bit, to allow Watch in NodeController to catch up.
Expand Down Expand Up @@ -343,7 +343,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
})
Expand Down Expand Up @@ -416,7 +416,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}

ginkgo.By("waiting for pods to be running again")
Expand Down Expand Up @@ -464,7 +464,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
})
Expand Down Expand Up @@ -498,12 +498,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true
})
if len(nodes.Items) <= 0 {
framework.Failf("No eligible node were found: %d", len(nodes.Items))
e2elog.Failf("No eligible node were found: %d", len(nodes.Items))
}
node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
Expand Down Expand Up @@ -609,7 +609,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
}))
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
}

sleepTime := maxTolerationTime + 20*time.Second
Expand All @@ -629,7 +629,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
if pod.DeletionTimestamp == nil {
seenRunning = append(seenRunning, namespacedName)
if shouldBeTerminating {
framework.Failf("Pod %v should have been deleted but was seen running", namespacedName)
e2elog.Failf("Pod %v should have been deleted but was seen running", namespacedName)
}
}
}
Expand All @@ -643,7 +643,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}
}
if !running {
framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
e2elog.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
}
}
})
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/rc.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
}

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/replica_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
}

Expand Down
14 changes: 7 additions & 7 deletions test/e2e/apps/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ var _ = SIGDescribe("StatefulSet", func() {

ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
if err := f.WaitForPodRunning(podName); err != nil {
framework.Failf("Pod %v did not start running: %v", podName, err)
e2elog.Failf("Pod %v did not start running: %v", podName, err)
}

var initialStatefulPodUID types.UID
Expand All @@ -767,7 +767,7 @@ var _ = SIGDescribe("StatefulSet", func() {
return false, nil
})
if err != nil {
framework.Failf("Pod %v expected to be re-created at least once", statefulPodName)
e2elog.Failf("Pod %v expected to be re-created at least once", statefulPodName)
}

ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
Expand Down Expand Up @@ -803,7 +803,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get scale subresource: %v", err)
e2elog.Failf("Failed to get scale subresource: %v", err)
}
gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
Expand All @@ -812,14 +812,14 @@ var _ = SIGDescribe("StatefulSet", func() {
scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale)
if err != nil {
framework.Failf("Failed to put scale subresource: %v", err)
e2elog.Failf("Failed to put scale subresource: %v", err)
}
gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))

ginkgo.By("verifying the statefulset Spec.Replicas was modified")
ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err)
e2elog.Failf("Failed to get statefulset resource: %v", err)
}
gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2)))
})
Expand Down Expand Up @@ -880,7 +880,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
}
e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
}
framework.Failf("Failed to execute \"%v\" with retries: %v", args, err)
e2elog.Failf("Failed to execute \"%v\" with retries: %v", args, err)
return
}

Expand Down Expand Up @@ -917,7 +917,7 @@ func (c *clusterAppTester) run() {

ginkgo.By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
}

Expand Down
8 changes: 4 additions & 4 deletions test/e2e/auth/service_accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}

if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
e2elog.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
} else {
e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
}
Expand All @@ -427,7 +427,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
"ca.crt": string(cfg.TLSClientConfig.CAData),
},
}); err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
e2elog.Failf("Unexpected err creating kube-ca-crt: %v", err)
}

tenMin := int64(10 * 60)
Expand Down Expand Up @@ -493,7 +493,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {

e2elog.Logf("created pod")
if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
e2elog.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
}

e2elog.Logf("pod is ready")
Expand All @@ -516,7 +516,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
return true, nil
}); err != nil {
framework.Failf("Unexpected error: %v\n%s", err, logs)
e2elog.Failf("Unexpected error: %v\n%s", err, logs)
}
})
})
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/autoscaling/cluster_size_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
e2elog.Failf("TEST_GPU_TYPE not defined")
return
}

Expand All @@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
e2elog.Failf("TEST_GPU_TYPE not defined")
return
}

Expand Down Expand Up @@ -267,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
e2elog.Failf("TEST_GPU_TYPE not defined")
return
}

Expand Down Expand Up @@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
e2elog.Failf("TEST_GPU_TYPE not defined")
return
}

Expand Down Expand Up @@ -498,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer func() {
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
Expand Down Expand Up @@ -1300,7 +1300,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}
framework.Failf("Failed to reserve memory within timeout")
e2elog.Failf("Failed to reserve memory within timeout")
return nil
}

Expand Down Expand Up @@ -1871,7 +1871,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
}
}
if finalErr != nil {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}

Expand Down