Skip to content

Commit

Permalink
test/e2e: use proper context
Browse files Browse the repository at this point in the history
Eliminate all context.TODO() from the e2e tests and use ginkgo context
instead. This ensures that calls involving context are properly
cancelled and return fast in case the tests get aborted.
  • Loading branch information
marquiz committed Apr 18, 2023
1 parent 8592f3e commit ad8bd05
Show file tree
Hide file tree
Showing 9 changed files with 213 additions and 214 deletions.
226 changes: 113 additions & 113 deletions test/e2e/node_feature_discovery_test.go

Large diffs are not rendered by default.

69 changes: 34 additions & 35 deletions test/e2e/topology_updater_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"time"

"github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

Expand Down Expand Up @@ -58,7 +57,7 @@ var _ = SIGDescribe("NFD topology updater", func() {

f := framework.NewDefaultFramework("node-topology-updater")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
JustBeforeEach(func() {
JustBeforeEach(func(ctx context.Context) {
var err error

if extClient == nil {
Expand All @@ -72,43 +71,43 @@ var _ = SIGDescribe("NFD topology updater", func() {
}

By("Creating the node resource topologies CRD")
Expect(testutils.CreateNodeResourceTopologies(extClient)).ToNot(BeNil())
Expect(testutils.CreateNodeResourceTopologies(ctx, extClient)).ToNot(BeNil())

By("Configuring RBAC")
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
Expect(testutils.ConfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())

By("Creating nfd-topology-updater daemonset")
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), topologyUpdaterDaemonSet, metav1.CreateOptions{})
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, topologyUpdaterDaemonSet, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

By("Waiting for daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())

label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: label.String()})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).ToNot(BeEmpty())

topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(context.TODO(), pods.Items[0].Spec.NodeName, metav1.GetOptions{})
topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(ctx, pods.Items[0].Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

kubeletConfig, err = kubelet.GetCurrentKubeletConfig(topologyUpdaterNode.Name, "", true)
Expect(err).NotTo(HaveOccurred())

workerNodes, err = testutils.GetWorkerNodes(f)
workerNodes, err = testutils.GetWorkerNodes(ctx, f)
Expect(err).NotTo(HaveOccurred())
})

ginkgo.AfterEach(func() {
AfterEach(func(ctx context.Context) {
framework.Logf("Node Feature Discovery topology updater CRD and RBAC removal")
err := testutils.DeconfigureRBAC(f.ClientSet, f.Namespace.Name)
err := testutils.DeconfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)
if err != nil {
framework.Failf("AfterEach: Failed to delete RBAC resources: %v", err)
}
})

Context("with topology-updater daemonset running", func() {
ginkgo.BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())

Expand All @@ -118,15 +117,15 @@ var _ = SIGDescribe("NFD topology updater", func() {
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})

It("should fill the node resource topologies CR with the data", func() {
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
It("should fill the node resource topologies CR with the data", func(ctx context.Context) {
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
isValid := testutils.IsValidNodeTopology(nodeTopology, kubeletConfig)
Expect(isValid).To(BeTrue(), "received invalid topology: %v", nodeTopology)
})

It("it should not account for any cpus if a container doesn't request exclusive cpus (best effort QOS)", func() {
It("it should not account for any cpus if a container doesn't request exclusive cpus (best effort QOS)", func(ctx context.Context) {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
sleeperPod := testpod.BestEffortSleeper()

Expand All @@ -140,7 +139,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
// the object, hance the resource version must NOT change, so we can only sleep
time.Sleep(cooldown)
By("checking the changes in the updated topology - expecting none")
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
finalNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)

initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
Expand All @@ -164,9 +163,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
})

It("it should not account for any cpus if a container doesn't request exclusive cpus (guaranteed QOS, nonintegral cpu request)", func() {
It("it should not account for any cpus if a container doesn't request exclusive cpus (guaranteed QOS, nonintegral cpu request)", func(ctx context.Context) {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
Expand All @@ -185,7 +184,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
// the object, hence the resource version must NOT change, so we can only sleep
time.Sleep(cooldown)
By("checking the changes in the updated topology - expecting none")
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
finalNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)

initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
Expand All @@ -209,15 +208,15 @@ var _ = SIGDescribe("NFD topology updater", func() {
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
})

It("it should account for containers requesting exclusive cpus", func() {
It("it should account for containers requesting exclusive cpus", func(ctx context.Context) {
nodes, err := testutils.FilterNodesWithEnoughCores(workerNodes, "1000m")
Expect(err).NotTo(HaveOccurred())
if len(nodes) < 1 {
Skip("not enough allocatable cores for this test")
}

By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming exclusive CPUs")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
Expand All @@ -238,7 +237,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
Expand All @@ -264,7 +263,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
})

When("sleep interval disabled", func() {
ginkgo.BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())

Expand All @@ -273,7 +272,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(dockerImage()), testpod.SpecWithContainerExtraArgs("-sleep-interval=0s")}
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("should still create CRs using a reactive updates", func() {
It("should still create CRs using a reactive updates", func(ctx context.Context) {
nodes, err := testutils.FilterNodesWithEnoughCores(workerNodes, "1000m")
Expect(err).NotTo(HaveOccurred())
if len(nodes) < 1 {
Expand All @@ -298,7 +297,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
defer testpod.DeleteAsync(f, podMap)

By("checking initial CR created")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)

By("creating additional pod consuming exclusive CPUs")
sleeperPod2 := testpod.GuaranteedSleeper(testpod.WithLimits(
Expand All @@ -319,7 +318,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
Expand Down Expand Up @@ -348,12 +347,12 @@ var _ = SIGDescribe("NFD topology updater", func() {
})

When("topology-updater configure to exclude memory", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cm := testutils.NewConfigMap("nfd-topology-updater-conf", "nfd-topology-updater.conf", `
excludeList:
'*': [memory]
`)
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{})
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())

cfg, err := testutils.GetConfig()
Expand All @@ -369,10 +368,10 @@ excludeList:
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})

It("noderesourcetopology should not advertise the memory resource", func() {
It("noderesourcetopology should not advertise the memory resource", func(ctx context.Context) {
Eventually(func() bool {
memoryFound := false
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
for _, zone := range nodeTopology.Zones {
for _, res := range zone.Resources {
if res.Name == string(corev1.ResourceMemory) {
Expand All @@ -387,7 +386,7 @@ excludeList:
})
})
When("topology-updater configure to compute pod fingerprint", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())

Expand All @@ -400,10 +399,10 @@ excludeList:
}
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("noderesourcetopology should advertise pod fingerprint in top-level attribute", func() {
It("noderesourcetopology should advertise pod fingerprint in top-level attribute", func(ctx context.Context) {
Eventually(func() bool {
// get node topology
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)

// look for attribute
podFingerprintAttribute, err := findAttribute(nodeTopology.Attributes, podfingerprint.Attribute)
Expand All @@ -412,7 +411,7 @@ excludeList:
return false
}
// get pods in node
pods, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.nodeName=" + topologyUpdaterNode.Name})
pods, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{FieldSelector: "spec.nodeName=" + topologyUpdaterNode.Name})
if err != nil {
framework.Logf("podFingerprint error while recovering %q node pods: %v", topologyUpdaterNode.Name, err)
return false
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/utils/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ func NewConfigMap(name, key, data string) *corev1.ConfigMap {
}

// UpdateConfigMap is a helper for updating a ConfigMap object.
func UpdateConfigMap(c clientset.Interface, name, namespace, key, data string) error {
cm, err := c.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
func UpdateConfigMap(ctx context.Context, c clientset.Interface, name, namespace, key, data string) error {
cm, err := c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("configmap %s is not found", name)
}
cm.Data[key] = data
_, err = c.CoreV1().ConfigMaps(namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
_, err = c.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error while updating configmap with name %s", name)
}
Expand Down
26 changes: 13 additions & 13 deletions test/e2e/utils/crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import (
var packagePath string

// CreateNfdCRDs creates the NodeFeatureRule CRD in the API server.
func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDefinition, error) {
func CreateNfdCRDs(ctx context.Context, cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDefinition, error) {
crds, err := crdsFromFile(filepath.Join(packagePath, "..", "..", "..", "deployment", "base", "nfd-crds", "nfd-api-crds.yaml"))
if err != nil {
return nil, err
Expand All @@ -49,13 +49,13 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
newCRDs := make([]*apiextensionsv1.CustomResourceDefinition, len(crds))
for i, crd := range crds {
// Delete existing CRD (if any) with this we also get rid of stale objects
err = cli.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
err = cli.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete %q CRD: %w", crd.Name, err)
} else if err == nil {
// Wait for CRD deletion to complete before trying to re-create it
err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
_, err = cli.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
_, err = cli.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
if err == nil {
return false, nil
} else if errors.IsNotFound(err) {
Expand All @@ -68,7 +68,7 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
}
}

newCRDs[i], err = cli.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{})
newCRDs[i], err = cli.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{})
if err != nil {
return nil, err
}
Expand All @@ -77,7 +77,7 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
}

// CreateOrUpdateNodeFeaturesFromFile creates/updates a NodeFeature object from a given file located under test data directory.
func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, namespace, nodename string) ([]string, error) {
func CreateOrUpdateNodeFeaturesFromFile(ctx context.Context, cli nfdclientset.Interface, filename, namespace, nodename string) ([]string, error) {
objs, err := nodeFeaturesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return nil, err
Expand All @@ -91,13 +91,13 @@ func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, na
}
obj.Labels[nfdv1alpha1.NodeFeatureObjNodeNameLabel] = nodename

if oldObj, err := cli.NfdV1alpha1().NodeFeatures(namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}); errors.IsNotFound(err) {
if _, err := cli.NfdV1alpha1().NodeFeatures(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}); err != nil {
if oldObj, err := cli.NfdV1alpha1().NodeFeatures(namespace).Get(ctx, obj.Name, metav1.GetOptions{}); errors.IsNotFound(err) {
if _, err := cli.NfdV1alpha1().NodeFeatures(namespace).Create(ctx, obj, metav1.CreateOptions{}); err != nil {
return names, fmt.Errorf("failed to create NodeFeature %w", err)
}
} else if err == nil {
obj.SetResourceVersion(oldObj.GetResourceVersion())
if _, err = cli.NfdV1alpha1().NodeFeatures(namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatures(namespace).Update(ctx, obj, metav1.UpdateOptions{}); err != nil {
return names, fmt.Errorf("failed to update NodeFeature object: %w", err)
}
} else {
Expand All @@ -109,35 +109,35 @@ func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, na
}

// CreateNodeFeatureRuleFromFile creates a NodeFeatureRule object from a given file located under test data directory.
func CreateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string) error {
func CreateNodeFeatureRulesFromFile(ctx context.Context, cli nfdclientset.Interface, filename string) error {
objs, err := nodeFeatureRulesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return err
}

for _, obj := range objs {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Create(context.TODO(), obj, metav1.CreateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Create(ctx, obj, metav1.CreateOptions{}); err != nil {
return err
}
}
return nil
}

// UpdateNodeFeatureRulesFromFile updates existing NodeFeatureRule object from a given file located under test data directory.
func UpdateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string) error {
func UpdateNodeFeatureRulesFromFile(ctx context.Context, cli nfdclientset.Interface, filename string) error {
objs, err := nodeFeatureRulesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return err
}

for _, obj := range objs {
var nfr *nfdv1alpha1.NodeFeatureRule
if nfr, err = cli.NfdV1alpha1().NodeFeatureRules().Get(context.TODO(), obj.Name, metav1.GetOptions{}); err != nil {
if nfr, err = cli.NfdV1alpha1().NodeFeatureRules().Get(ctx, obj.Name, metav1.GetOptions{}); err != nil {
return fmt.Errorf("failed to get NodeFeatureRule %w", err)
}

obj.SetResourceVersion(nfr.GetResourceVersion())
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Update(context.TODO(), obj, metav1.UpdateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Update(ctx, obj, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update NodeFeatureRule %w", err)
}
}
Expand Down
Loading

0 comments on commit ad8bd05

Please sign in to comment.