Skip to content
Permalink
Browse files

Add fixups to original PR (squash)

  • Loading branch information...
damemi committed Sep 4, 2019
1 parent fb59c24 commit 294faa3bbef2151e56afdff87c4474a3d6a9b93b
Showing with 115 additions and 57 deletions.
  1. +115 −57 test/e2e/scheduling/preemption.go
@@ -24,7 +24,7 @@ import (
"k8s.io/client-go/tools/cache"

appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -102,45 +102,65 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
pods := make([]*v1.Pod, 0)
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for i, node := range nodeList.Items {
currentCpuUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu and memory usage %v, %v", currentCpuUsage, currentMemUsage)
currentNode, err := cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
cpuAllocatable, found := currentNode.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
currentCPUUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu and memory usage %v, %v", currentCPUUsage, currentMemUsage)
cpuAllocatable, found := node.Status.Allocatable["cpu"]
framework.ExpectEqual(found, true)
milliCPU := cpuAllocatable.MilliValue()
milliCPU = milliCPU * 40 / 100
memAllocatable, found := currentNode.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
milliCPU = int64(float64(milliCPU-currentCPUUsage) * float64(0.6))
memAllocatable, found := node.Status.Allocatable["memory"]
framework.ExpectEqual(found, true)
memory := memAllocatable.Value()
memory = memory * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
memory = int64(float64(memory-currentMemUsage) * float64(0.6))
// If a node is already heavily utilized let not's create a pod there.
if milliCPU <= 0 || memory <= 0 {
framework.Logf("Node is heavily utilized, let's not create a pod here")
continue
}
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)

// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
pods = append(pods, createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
NodeName: node.Name,
}))
if len(pods) < 2 {
framework.Skipf("We need at least two pods to be created but" +
"all nodes are already heavily utilized, so preemption tests cannot be run")
}
framework.Logf("Created pod: %v", pods[i].Name)
}
ginkgo.By("Wait for pods to be scheduled.")
lowerPriorityPodExists := false
if pods[0].Spec.PriorityClassName == lowPriorityClassName {
lowerPriorityPodExists = true
}
for _, pod := range pods {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
}
if lowerPriorityPodExists {
// We want this pod to be preempted
podRes = pods[0].Spec.Containers[0].Resources.Requests
} else {
// All the pods are medium priority pods, so it doesn't matter which one gets preempted.
podRes = pods[1].Spec.Containers[0].Resources.Requests
}

ginkgo.By("Run a high priority pod that use 60% of a node resources.")
ginkgo.By("Run a high priority pod that has same requirements as that of lower priority pod")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
@@ -149,17 +169,25 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
gomega.Expect(podDeleted).To(gomega.BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
podPreempted := false
if lowerPriorityPodExists {
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podPreempted = (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
} else {
// This means one of the medium priority pods got preempted
for i := 0; i < len(pods); i++ {
midPriority, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
podPreempted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && midPriority.DeletionTimestamp != nil)
if podPreempted {
// We have atleast one pod that got preempted because of our pod
break
}
}
}
framework.ExpectEqual(podPreempted, true)
})

// This test verifies that when a critical pod is created and no node with
@@ -169,24 +197,28 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
pods := make([]*v1.Pod, 0)
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for i, node := range nodeList.Items {
currentCpuUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu usage and memory usage is %v, %v", currentCpuUsage, currentMemUsage)
currentNode, err := cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
cpuAllocatable, found := currentNode.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
currentCPUUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu usage and memory usage is %v, %v", currentCPUUsage, currentMemUsage)
cpuAllocatable, found := node.Status.Allocatable["cpu"]
framework.ExpectEqual(found, true)
milliCPU := cpuAllocatable.MilliValue()
milliCPU = milliCPU * 40 / 100
memAllocatable, found := currentNode.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
milliCPU = int64(float64(milliCPU-currentCPUUsage) * float64(0.6))
memAllocatable, found := node.Status.Allocatable["memory"]
framework.ExpectEqual(found, true)
memory := memAllocatable.Value()
memory = memory * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
memory = int64(float64(memory-currentMemUsage) * float64(0.6))
podRes = v1.ResourceList{}
// If a node is already heavily utilized let not's create a pod there.
if milliCPU <= 0 || memory <= 0 {
framework.Logf("Node is heavily utilized, let's not create a pod there")
continue
}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)

// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
@@ -199,13 +231,30 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
NodeName: node.Name,
})
framework.Logf("Created pod: %v", pods[i].Name)
if len(pods) < 2 {
framework.Skipf("We need atleast two pods to be created but" +
"all nodes are already heavily utilized, so preemption tests cannot be run")
}
}
ginkgo.By("Wait for pods to be scheduled.")
lowerPriorityPodExists := false
if pods[0].Spec.PriorityClassName == lowPriorityClassName {
lowerPriorityPodExists = true
}
for _, pod := range pods {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
}
if lowerPriorityPodExists {
// We want this pod to be preempted
podRes = pods[0].Spec.Containers[0].Resources.Requests
} else {
// All the pods are medium priority pods, so it doesn't matter which one gets preempted.
podRes = pods[1].Spec.Containers[0].Resources.Requests
}
ginkgo.By("Run a critical pod that use same resources as that of a lower priority pod")

ginkgo.By("Run a critical pod that use 60% of a node resources.")
// Create a critical pod and make sure it is scheduled.
@@ -217,22 +266,31 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})

defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
gomega.Expect(podDeleted).To(gomega.BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
podPreempted := false
if lowerPriorityPodExists {
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podPreempted = (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
} else {
// This means one of the medium priority pods got preempted
for i := 0; i < len(pods); i++ {
midPriority, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
podPreempted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && midPriority.DeletionTimestamp != nil)
if podPreempted {
// We have atleast one pod that got preempted because of our pod
break
}
}
}
framework.ExpectEqual(podPreempted, true)
})
})

@@ -525,18 +583,18 @@ func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
return rs
}

func getCurrentPodUsageOnTheNode(nodeName string, pods []corev1.Pod, resource *corev1.ResourceRequirements) (int64, int64) {
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
func getCurrentPodUsageOnTheNode(nodeName string, pods []v1.Pod, resource *v1.ResourceRequirements) (int64, int64) {
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
for _, pod := range pods {
if pod.Spec.NodeName == nodeName {
if v1qos.GetPodQOS(&pod) == corev1.PodQOSBestEffort {
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
}
}
result := getNonZeroRequests(&pod)
totalRequestedCpuResource += result.MilliCPU
totalRequestedCPUResource += result.MilliCPU
totalRequestedMemResource += result.Memory
}
return totalRequestedCpuResource, totalRequestedMemResource
return totalRequestedCPUResource, totalRequestedMemResource
}

0 comments on commit 294faa3

Please sign in to comment.
You can’t perform that action at this time.