Skip to content

Commit

Permalink
Merge pull request #140 from openshift-cherrypick-robot/cherry-pick-1…
Browse files Browse the repository at this point in the history
…38-to-release-4.4

[release-4.4] fix guaranteed and cpu affinity tests
  • Loading branch information
openshift-merge-robot committed Mar 16, 2020
2 parents 7d4b382 + 81a67b5 commit 71610f8
Showing 1 changed file with 15 additions and 17 deletions.
32 changes: 15 additions & 17 deletions functests/performance/cpu_management.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var _ = Describe("[performance] CPU Management", func() {
var balanceIsolated bool
var reservedCPU, isolatedCPU string
var listReservedCPU []int
var listIsolatedCPU []int
var reservedCPUSet cpuset.CPUSet

BeforeEach(func() {
workerRTNodes, err := nodes.GetByRole(testclient.Client, testutils.RoleWorkerRT)
Expand All @@ -55,12 +55,6 @@ var _ = Describe("[performance] CPU Management", func() {
balanceIsolated = *profile.Spec.CPU.BalanceIsolated
}

Expect(profile.Spec.CPU.Isolated).NotTo(BeNil())
isolatedCPU = string(*profile.Spec.CPU.Isolated)
isolatedCPUSet, err := cpuset.Parse(isolatedCPU)
Expect(err).ToNot(HaveOccurred())
listIsolatedCPU = isolatedCPUSet.ToSlice()

Expect(profile.Spec.CPU.Reserved).NotTo(BeNil())
reservedCPU = string(*profile.Spec.CPU.Reserved)
reservedCPUSet, err := cpuset.Parse(reservedCPU)
Expand Down Expand Up @@ -92,7 +86,11 @@ var _ = Describe("[performance] CPU Management", func() {

By("checking CPU affinity mask for kernel scheduler")
cmd = []string{"/bin/bash", "-c", "taskset -pc $(pgrep rcu_sched)"}
Expect(execCommandOnWorker(cmd, workerRTNode)).To(ContainSubstring(fmt.Sprintf("current affinity list: %s", reservedCPU)))
mask := strings.SplitAfter(execCommandOnWorker(cmd, workerRTNode), " ")
maskSet, err := cpuset.Parse(mask[len(mask)-1])
Expect(err).ToNot(HaveOccurred())

Expect(reservedCPUSet.IsSubsetOf(maskSet)).To(Equal(true))
})

})
Expand All @@ -114,24 +112,24 @@ var _ = Describe("[performance] CPU Management", func() {
testpod = getStressPod(workerRTNode.Name)
testpod.Namespace = testutils.NamespaceTesting

//list worker cpus
cmd := []string{"/bin/bash", "-c", "lscpu | grep On-line | awk '{print $4}'"}
cpus, err := cpuset.Parse(execCommandOnWorker(cmd, workerRTNode))
Expect(err).ToNot(HaveOccurred())
listCPU = cpus.ToSlice()

if guaranteed {
listCPU = listIsolatedCPU
listCPU = cpus.Difference(reservedCPUSet).ToSlice()
testpod.Spec.Containers[0].Resources.Limits = map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}
} else if balanceIsolated {
// when balanceIsolated is True - non-guaranteed pod can take ANY cpu
cmd := []string{"/bin/bash", "-c", "lscpu | grep On-line | awk '{print $4}'"}
cpus, err := cpuset.Parse(execCommandOnWorker(cmd, workerRTNode))
Expect(err).ToNot(HaveOccurred())
listCPU = cpus.ToSlice()
} else {
} else if !balanceIsolated {
// when balanceIsolated is False - non-guaranteed pod should run on reserved cpu
listCPU = listReservedCPU
}

err := testclient.Client.Create(context.TODO(), testpod)
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())

err = pods.WaitForCondition(testclient.Client, testpod, corev1.PodReady, corev1.ConditionTrue, 60*time.Second)
Expand Down

0 comments on commit 71610f8

Please sign in to comment.