Skip to content

Commit

Permalink
e2e:wait: return updated pod object explicitly
Browse files Browse the repository at this point in the history
In some of the tests we're implicitly relaying on the fact
that the pod data returned from the `pods.WaitFor*` functions is
the most updated one.

This is not the case in some of the functions on which
we are storing the data on different pointer to pod object and
never return it.

On the other hand, those wait functions changing the passed object
is an hidden implementation detail.

Now the functions accepts only a key object in order to explicilty
indicate no mutation to the object.

In addition, the functions returns an updated copy of the object.

Signed-off-by: Talor Itzhak <titzhak@redhat.com>
  • Loading branch information
Tal-or committed Aug 6, 2023
1 parent 96b0671 commit f44889d
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 44 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())

err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred())

Expand Down Expand Up @@ -284,7 +284,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())

err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred(), "failed to create guaranteed pod %v", testpod)

Expand Down Expand Up @@ -396,7 +396,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {

err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred())

Expand Down Expand Up @@ -451,7 +451,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
err := testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())

err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred())
})
Expand Down Expand Up @@ -542,7 +542,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
err := testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())

currentPod, err := pods.WaitForPredicate(testpod, 10*time.Minute, func(pod *corev1.Pod) (bool, error) {
currentPod, err := pods.WaitForPredicate(client.ObjectKeyFromObject(testpod), 10*time.Minute, func(pod *corev1.Pod) (bool, error) {
if pod.Status.Phase != corev1.PodPending {
return true, nil
}
Expand Down Expand Up @@ -717,7 +717,7 @@ func startHTtestPod(cpuCount int) *corev1.Pod {
By(fmt.Sprintf("Creating test pod with %d cpus", cpuCount))
err := testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred(), "Start pod failed")
// Sanity check for QoS Class == Guaranteed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"sigs.k8s.io/controller-runtime/pkg/client"

performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components/machineconfig"
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
Expand Down Expand Up @@ -142,7 +144,7 @@ var _ = Describe("[performance]Hugepages", Ordered, func() {
}
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
Expect(err).ToNot(HaveOccurred())

cmd2 := []string{"/bin/bash", "-c", "tmux new -d 'LD_PRELOAD=libhugetlbfs.so HUGETLB_MORECORE=yes top -b > /dev/null'"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() {
testlog.Infof("IRQ Default affinity on %q when test ends: {%s}", targetNode.Name, irqAffBegin)
}()

err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
logEventsForPod(testpod)
Expect(err).ToNot(HaveOccurred())

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ var _ = Describe("[rfe_id: 43186][memorymanager] Memorymanager feature", func()
By("creating test pod")
err = testclient.Client.Create(context.TODO(), testPod)
Expect(err).ToNot(HaveOccurred(), "Failed to create test pod")
err = pods.WaitForCondition(testPod, corev1.PodConditionType(corev1.PodFailed), corev1.ConditionFalse, 2*time.Minute)
testPod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testPod), corev1.PodConditionType(corev1.PodFailed), corev1.ConditionFalse, 2*time.Minute)
// Even though number of hugepage requests can be satisfied by 2 numa nodes together
// Number of cpus are only 2 which only requires 1 numa node , So minimum number of numa nodes needed to satisfy is only 1.
// According to Restricted TM policy: only allow allocations from the minimum number of NUMA nodes.
Expand Down Expand Up @@ -392,7 +392,7 @@ var _ = Describe("[rfe_id: 43186][memorymanager] Memorymanager feature", func()
By("creating test pod")
err = testclient.Client.Create(context.TODO(), testPod2)
Expect(err).ToNot(HaveOccurred(), "failed to create testpod2")
err = pods.WaitForCondition(testPod2, corev1.PodConditionType(corev1.PodFailed), corev1.ConditionTrue, 2*time.Minute)
testPod2, err = pods.WaitForCondition(client.ObjectKeyFromObject(testPod2), corev1.PodConditionType(corev1.PodFailed), corev1.ConditionTrue, 2*time.Minute)
Expect(err).To(HaveOccurred(), "testpod2 did not go in to failed condition")
err = checkPodEvent(testPod2, "FailedScheduling")
Expect(err).ToNot(HaveOccurred(), "failed to find expected event: failedScheduling")
Expand Down Expand Up @@ -520,7 +520,7 @@ var _ = Describe("[rfe_id: 43186][memorymanager] Memorymanager feature", func()
By("creating test pod")
err = testclient.Client.Create(context.TODO(), testPod)
Expect(err).ToNot(HaveOccurred(), "failed to create testpod")
err = pods.WaitForCondition(testPod, corev1.PodConditionType(corev1.PodFailed), corev1.ConditionFalse, 2*time.Minute)
testPod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testPod), corev1.PodConditionType(corev1.PodFailed), corev1.ConditionFalse, 2*time.Minute)
err := checkPodEvent(testPod, "TopologyAffinityError")
Expect(err).ToNot(HaveOccurred(), "pod did not fail with TopologyAffinityError")
Expect(testPod.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), "Test pod does not have QoS class of Guaranteed")
Expand Down Expand Up @@ -802,7 +802,7 @@ func initializePod(testPod *corev1.Pod) error {
if err != nil {
testlog.Errorf("Failed to create test pod %v", testPod)
}
err = pods.WaitForCondition(testPod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testPod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testPod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
if err != nil {
testlog.Errorf("%v failed to start", testPod)
}
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/performanceprofile/functests/4_latency/latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
Expand Down Expand Up @@ -452,7 +453,7 @@ func createLatencyTestPod(testPod *corev1.Pod) {

By("Waiting two minutes to download the latencyTest image")
podKey := fmt.Sprintf("%s/%s", testPod.Namespace, testPod.Name)
currentPod, err := pods.WaitForPredicate(testPod, 2*time.Minute, func(pod *corev1.Pod) (bool, error) {
currentPod, err := pods.WaitForPredicate(client.ObjectKeyFromObject(testPod), 2*time.Minute, func(pod *corev1.Pod) (bool, error) {
if pod.Status.Phase == corev1.PodRunning {
return true, nil
}
Expand All @@ -474,7 +475,7 @@ func createLatencyTestPod(testPod *corev1.Pod) {

By("Waiting another two minutes to give enough time for the cluster to move the pod to Succeeded phase")
podTimeout := time.Duration(timeout + latencyTestDelay + 120)
err = pods.WaitForPhase(testPod, corev1.PodSucceeded, podTimeout*time.Second)
testPod, err = pods.WaitForPhase(client.ObjectKeyFromObject(testPod), corev1.PodSucceeded, podTimeout*time.Second)
if err != nil {
logEventsForPod(testPod)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific
By("creating test pod")
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(testpod.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), "Test pod does not have QoS class of Guaranteed")

Expand Down Expand Up @@ -791,7 +791,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific
By("creating test pod")
err = testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
testpod, err = pods.WaitForCondition(client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(testpod.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), "Test pod does not have QoS class of Guaranteed")

Expand Down
4 changes: 2 additions & 2 deletions test/e2e/performanceprofile/functests/9_reboot/devices.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ var _ = Describe("[disruptive][node][kubelet][devicemanager] Device management t

// things should be settled now so we can use again a short timeout
testlog.Infof("post reboot: running a fresh pod %s/%s resource=%q", wlPod.Namespace, wlPod.Name, sriovDeviceResourceName)
updatedPod, err := testpods.WaitForPredicate(wlPod, 1*time.Minute, func(pod *corev1.Pod) (bool, error) {
updatedPod, err := testpods.WaitForPredicate(client.ObjectKeyFromObject(wlPod), 1*time.Minute, func(pod *corev1.Pod) (bool, error) {
return isPodReady(*pod), nil
})
Expect(err).ToNot(HaveOccurred(), "error checking the workload pod post reboot")
Expand Down Expand Up @@ -230,7 +230,7 @@ var _ = Describe("[disruptive][node][kubelet][devicemanager] Device management t

// things should be settled now so we can use again a short timeout
testlog.Infof("post restart: running a fresh pod %s/%s resource=%q", wlPod2.Namespace, wlPod2.Name, sriovDeviceResourceName)
updatedPod2, err := testpods.WaitForPredicate(wlPod2, 1*time.Minute, func(pod *corev1.Pod) (bool, error) {
updatedPod2, err := testpods.WaitForPredicate(client.ObjectKeyFromObject(wlPod2), 1*time.Minute, func(pod *corev1.Pod) (bool, error) {
return isPodReady(*pod), nil
})
Expect(err).ToNot(HaveOccurred(), "error checking the workload pod post kubelet restart")
Expand Down
39 changes: 13 additions & 26 deletions test/e2e/performanceprofile/functests/utils/pods/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,45 +51,36 @@ func GetTestPod() *corev1.Pod {

// WaitForDeletion waits until the pod will be removed from the cluster
func WaitForDeletion(pod *corev1.Pod, timeout time.Duration) error {
key := types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
}
return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pod := &corev1.Pod{}
if err := testclient.Client.Get(context.TODO(), key, pod); errors.IsNotFound(err) {
if err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(pod), pod); errors.IsNotFound(err) {
return true, nil
}
return false, nil
})
}

// WaitForCondition waits until the pod will have specified condition type with the expected status
func WaitForCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, conditionStatus corev1.ConditionStatus, timeout time.Duration) error {
key := types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
}
return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
updatedPod := &corev1.Pod{}
if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil {
func WaitForCondition(podKey client.ObjectKey, conditionType corev1.PodConditionType, conditionStatus corev1.ConditionStatus, timeout time.Duration) (*corev1.Pod, error) {
updatedPod := &corev1.Pod{}
err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
if err := testclient.Client.Get(context.TODO(), podKey, updatedPod); err != nil {
return false, nil
}

for _, c := range updatedPod.Status.Conditions {
if c.Type == conditionType && c.Status == conditionStatus {
return true, nil
}
}
return false, nil
})
return updatedPod, err
}

// WaitForPredicate waits until the given predicate against the pod returns true or error.
func WaitForPredicate(pod *corev1.Pod, timeout time.Duration, pred func(pod *corev1.Pod) (bool, error)) (*corev1.Pod, error) {
func WaitForPredicate(podKey client.ObjectKey, timeout time.Duration, pred func(pod *corev1.Pod) (bool, error)) (*corev1.Pod, error) {
updatedPod := &corev1.Pod{}
err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
if err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(pod), updatedPod); err != nil {
if err := testclient.Client.Get(context.TODO(), podKey, updatedPod); err != nil {
return false, nil
}

Expand All @@ -103,23 +94,19 @@ func WaitForPredicate(pod *corev1.Pod, timeout time.Duration, pred func(pod *cor
}

// WaitForPhase waits until the pod will have specified phase
func WaitForPhase(pod *corev1.Pod, phase corev1.PodPhase, timeout time.Duration) error {
key := types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
}
return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
updatedPod := &corev1.Pod{}
if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil {
func WaitForPhase(podKey client.ObjectKey, phase corev1.PodPhase, timeout time.Duration) (*corev1.Pod, error) {
updatedPod := &corev1.Pod{}
err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
if err := testclient.Client.Get(context.TODO(), podKey, updatedPod); err != nil {
return false, nil
}

if updatedPod.Status.Phase == phase {
return true, nil
}

return false, nil
})
return updatedPod, err
}

// GetLogs returns logs of the specified pod
Expand Down

0 comments on commit f44889d

Please sign in to comment.