Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use log functions of core framework on test/e2e/scheduling #81982

Merged
merged 1 commit into from
Sep 11, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion test/e2e/scheduling/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ go_library(
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
Expand Down
25 changes: 12 additions & 13 deletions test/e2e/scheduling/limit_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"

"github.com/onsi/ginkgo"
Expand Down Expand Up @@ -86,10 +85,10 @@ var _ = SIGDescribe("LimitRange", func() {
if err == nil {
select {
case listCompleted <- true:
e2elog.Logf("observed the limitRanges list")
framework.Logf("observed the limitRanges list")
return limitRanges, err
default:
e2elog.Logf("channel blocked")
framework.Logf("channel blocked")
}
}
return limitRanges, err
Expand All @@ -112,13 +111,13 @@ var _ = SIGDescribe("LimitRange", func() {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
e2elog.Failf("Failed to observe limitRange creation : %v", event)
framework.Failf("Failed to observe limitRange creation : %v", event)
}
case <-time.After(e2eservice.RespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange creation")
framework.Failf("Timeout while waiting for LimitRange creation")
}
case <-time.After(e2eservice.RespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange list complete")
framework.Failf("Timeout while waiting for LimitRange list complete")
}

ginkgo.By("Fetching the LimitRange to ensure it has proper values")
Expand All @@ -141,7 +140,7 @@ var _ = SIGDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
framework.Logf("Pod %+v does not have the expected requirements", pod)
framework.ExpectNoError(err)
}
}
Expand All @@ -162,7 +161,7 @@ var _ = SIGDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
framework.Logf("Pod %+v does not have the expected requirements", pod)
framework.ExpectNoError(err)
}
}
Expand Down Expand Up @@ -212,18 +211,18 @@ var _ = SIGDescribe("LimitRange", func() {
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)

if err != nil {
e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
framework.Logf("Unable to retrieve LimitRanges: %v", err)
return false, nil
}

if len(limitRanges.Items) == 0 {
e2elog.Logf("limitRange is already deleted")
framework.Logf("limitRange is already deleted")
return true, nil
}

if len(limitRanges.Items) > 0 {
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
e2elog.Logf("deletion has not yet been observed")
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
Expand All @@ -244,12 +243,12 @@ var _ = SIGDescribe("LimitRange", func() {
})

func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests)
if err != nil {
return err
}
e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
err = equalResourceList(expected.Limits, actual.Limits)
return err
}
Expand Down
35 changes: 17 additions & 18 deletions test/e2e/scheduling/nvidia-gpus.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
Expand Down Expand Up @@ -87,25 +86,25 @@ func logOSImages(f *framework.Framework) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
e2elog.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
}
}

func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
e2elog.Logf("Getting list of Nodes from API server")
framework.Logf("Getting list of Nodes from API server")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if node.Spec.Unschedulable {
continue
}
e2elog.Logf("gpuResourceName %s", gpuResourceName)
framework.Logf("gpuResourceName %s", gpuResourceName)
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
e2elog.Logf("Nvidia GPUs not available on Node: %q", node.Name)
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
return false
}
}
e2elog.Logf("Nvidia GPUs exist on all schedulable nodes")
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
return true
}

Expand Down Expand Up @@ -133,34 +132,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
}
gpuResourceName = gpu.NVIDIAGPUResourceName

e2elog.Logf("Using %v", dsYamlURL)
framework.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlURL)
framework.ExpectNoError(err)
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
e2elog.Logf("Successfully created daemonset to install Nvidia drivers.")
framework.Logf("Successfully created daemonset to install Nvidia drivers.")

pods, err := e2epod.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")

devicepluginPods, err := e2epod.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
if err == nil {
e2elog.Logf("Adding deviceplugin addon pod.")
framework.Logf("Adding deviceplugin addon pod.")
pods.Items = append(pods.Items, devicepluginPods.Items...)
}

var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
e2elog.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}

// Wait for Nvidia GPUs to be available on nodes
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
gomega.Eventually(func() bool {
return areGPUsAvailableOnAllSchedulableNodes(f)
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
Expand All @@ -182,19 +181,19 @@ func getGPUsPerPod() int64 {
func testNvidiaGPUs(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
gpuPodNum := getGPUsAvailable(f) / getGPUsPerPod()
e2elog.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
podList := []*v1.Pod{}
for i := int64(0); i < gpuPodNum; i++ {
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
}
e2elog.Logf("Wait for all test pods to succeed")
framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed
for _, pod := range podList {
f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute)
logContainers(f, pod)
}

e2elog.Logf("Stopping ResourceUsageGather")
framework.Logf("Stopping ResourceUsageGather")
constraints := make(map[string]framework.ResourceConstraint)
// For now, just gets summary. Can pass valid constraints in the future.
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
Expand All @@ -206,7 +205,7 @@ func logContainers(f *framework.Framework, pod *v1.Pod) {
for _, container := range pod.Spec.Containers {
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, container.Name)
framework.ExpectNoError(err, "Should be able to get container logs for container: %s", container.Name)
e2elog.Logf("Got container logs for %s:\n%v", container.Name, logs)
framework.Logf("Got container logs for %s:\n%v", container.Name, logs)
}
}

Expand Down Expand Up @@ -273,7 +272,7 @@ func StartJob(f *framework.Framework, completions int32) {
ns := f.Namespace.Name
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
framework.ExpectNoError(err)
e2elog.Logf("Created job %v", testJob)
framework.Logf("Created job %v", testJob)
}

// VerifyJobNCompletions verifies that the job has completions number of successful pods
Expand All @@ -283,7 +282,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
framework.ExpectNoError(err)
createdPods := pods.Items
createdPodNames := podNames(createdPods)
e2elog.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
framework.Logf("Got the following pods for job cuda-add: %v", createdPodNames)

successes := int32(0)
for _, podName := range createdPodNames {
Expand All @@ -296,7 +295,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
}
}
if successes != completions {
e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions)
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions)
}
}

Expand Down
23 changes: 11 additions & 12 deletions test/e2e/scheduling/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
Expand Down Expand Up @@ -95,11 +94,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}

// TODO: write a wrapper for ExpectNoErrorWithOffset()
Expand All @@ -109,7 +108,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(err)

for _, node := range nodeList.Items {
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
e2ekubelet.PrintAllKubeletPods(cs, node.Name)
}

Expand All @@ -124,7 +123,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
totalPodCapacity = 0

for _, node := range nodeList.Items {
e2elog.Logf("Node: %v", node)
framework.Logf("Node: %v", node)
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
framework.ExpectEqual(found, true)
totalPodCapacity += podCapacity.Value()
Expand All @@ -144,7 +143,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
}), true, e2elog.Logf))
}), true, framework.Logf))
}
podName := "additional-pod"
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
Expand Down Expand Up @@ -179,7 +178,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
e2elog.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
framework.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
}
}
Expand All @@ -189,9 +188,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {

ephemeralStoragePerPod = nodeMaxAllocatable / maxNumberOfPods

e2elog.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
framework.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
for name, leftAllocatable := range nodeToAllocatableMap {
e2elog.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
framework.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
podsNeededForSaturation += (int)(leftAllocatable / ephemeralStoragePerPod)
}

Expand All @@ -214,7 +213,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
},
},
}), true, e2elog.Logf))
}), true, framework.Logf))
}
podName := "additional-pod"
conf := pausePodConfig{
Expand Down Expand Up @@ -284,7 +283,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
}
}
Expand All @@ -294,7 +293,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
fillerPods := []*v1.Pod{}
for nodeName, cpu := range nodeToAllocatableMap {
requestedCPU := cpu * 7 / 10
e2elog.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
framework.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
Name: "filler-pod-" + string(uuid.NewUUID()),
Resources: &v1.ResourceRequirements{
Expand Down