From eb665b6442f089940fb9510909e8cfc474af8a9d Mon Sep 17 00:00:00 2001 From: Alessandro Olivero Date: Thu, 1 Jul 2021 08:39:11 +0000 Subject: [PATCH] node cleanup test this commit adds some unit test for the node cleanup process --- Makefile | 2 +- .../resourceoffer_controller_test.go | 149 +++++++++++++++++- .../liqoNodeProvider/nodeProvider_test.go | 115 ++++++++++++++ 3 files changed, 264 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 81a5f42227..b3301883ae 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ GOBIN=$(shell go env GOBIN) endif # Set the capsule version to use -CAPSULE_VERSION = "v0.1.0-rc2" +CAPSULE_VERSION = v0.1.0-rc2 gen: generate fmt vet manifests rbacs docs diff --git a/pkg/liqo-controller-manager/resourceoffer-controller/resourceoffer_controller_test.go b/pkg/liqo-controller-manager/resourceoffer-controller/resourceoffer_controller_test.go index 784984f84d..da83cf28d8 100644 --- a/pkg/liqo-controller-manager/resourceoffer-controller/resourceoffer_controller_test.go +++ b/pkg/liqo-controller-manager/resourceoffer-controller/resourceoffer_controller_test.go @@ -17,12 +17,14 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" configv1alpha1 "github.com/liqotech/liqo/apis/config/v1alpha1" discoveryv1alpha1 "github.com/liqotech/liqo/apis/discovery/v1alpha1" sharingv1alpha1 "github.com/liqotech/liqo/apis/sharing/v1alpha1" crdreplicator "github.com/liqotech/liqo/internal/crdReplicator" "github.com/liqotech/liqo/pkg/clusterid" + "github.com/liqotech/liqo/pkg/consts" "github.com/liqotech/liqo/pkg/discovery" testUtils "github.com/liqotech/liqo/pkg/utils/testUtils" "github.com/liqotech/liqo/pkg/vkMachinery/forge" @@ -227,7 +229,7 @@ var _ = Describe("ResourceOffer Controller", func() { return reflect.DeepEqual(deploymentList.Items[0], *vkDeploy) }, timeout, interval).Should(BeTrue()) - // check tht the deployment has the controller reference annotation + // check that the deployment has the controller reference annotation Eventually(func() string { vkDeploy, err := controller.getVirtualKubeletDeployment(ctx, resourceOffer) if err != nil || vkDeploy == nil { @@ -306,3 +308,148 @@ var _ = Describe("ResourceOffer Controller", func() { }) }) + +var _ = Describe("ResourceOffer Operator util functions", func() { + + Context("canDeleteVirtualKubeletDeployment", func() { + + type canDeleteVirtualKubeletDeploymentTestcase struct { + resourceOffer *sharingv1alpha1.ResourceOffer + expected OmegaMatcher + } + + DescribeTable("canDeleteVirtualKubeletDeployment table", + + func(c canDeleteVirtualKubeletDeploymentTestcase) { + Expect(canDeleteVirtualKubeletDeployment(c.resourceOffer)).To(c.expected) + }, + + Entry("refused ResourceOffer", canDeleteVirtualKubeletDeploymentTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + Status: sharingv1alpha1.ResourceOfferStatus{ + Phase: sharingv1alpha1.ResourceOfferRefused, + }, + }, + expected: BeTrue(), + }), + + Entry("accepted ResourceOffer", canDeleteVirtualKubeletDeploymentTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + Status: sharingv1alpha1.ResourceOfferStatus{ + Phase: sharingv1alpha1.ResourceOfferAccepted, + }, + }, + expected: BeFalse(), + }), + + Entry("accepted ResourceOffer with deletion timestamp", canDeleteVirtualKubeletDeploymentTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{ + Time: time.Now(), + }, + Finalizers: []string{}, + }, + Status: sharingv1alpha1.ResourceOfferStatus{ + Phase: sharingv1alpha1.ResourceOfferAccepted, + }, + }, + expected: BeTrue(), + }), + + Entry("refused ResourceOffer with finalizer", canDeleteVirtualKubeletDeploymentTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + consts.NodeFinalizer, + }, + }, + Status: sharingv1alpha1.ResourceOfferStatus{ + Phase: sharingv1alpha1.ResourceOfferRefused, + }, + }, + expected: BeFalse(), + }), + + Entry("accepted ResourceOffer with deletion timestamp and finalizer", canDeleteVirtualKubeletDeploymentTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{ + Time: time.Now(), + }, + Finalizers: []string{ + consts.NodeFinalizer, + }, + }, + Status: sharingv1alpha1.ResourceOfferStatus{ + Phase: sharingv1alpha1.ResourceOfferAccepted, + }, + }, + expected: BeFalse(), + }), + ) + + }) + + Context("getRequestFromObject", func() { + + type getRequestFromObjectTestcase struct { + resourceOffer *sharingv1alpha1.ResourceOffer + expectedErr OmegaMatcher + expectedErrString OmegaMatcher + expectedResult OmegaMatcher + } + + DescribeTable("getRequestFromObject table", + + func(c getRequestFromObjectTestcase) { + res, err := getReconcileRequestFromObject(c.resourceOffer) + Expect(err).To(c.expectedErr) + if err != nil { + Expect(err.Error()).To(c.expectedErrString) + } + Expect(res).To(c.expectedResult) + }, + + Entry("Object with no annotation", getRequestFromObjectTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{}, + }, + }, + expectedErr: HaveOccurred(), + expectedErrString: ContainSubstring("%v annotation not found in object %v/%v", resourceOfferAnnotation, "namespace", "name"), + expectedResult: Equal(reconcile.Request{}), + }), + + Entry("Object with annotation", getRequestFromObjectTestcase{ + resourceOffer: &sharingv1alpha1.ResourceOffer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + resourceOfferAnnotation: "name", + }, + }, + }, + expectedErr: Not(HaveOccurred()), + expectedResult: Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "name", + Namespace: "namespace", + }, + }), + }), + ) + + }) + +}) diff --git a/pkg/virtualKubelet/liqoNodeProvider/nodeProvider_test.go b/pkg/virtualKubelet/liqoNodeProvider/nodeProvider_test.go index ada17e83b1..f2c7e5374f 100644 --- a/pkg/virtualKubelet/liqoNodeProvider/nodeProvider_test.go +++ b/pkg/virtualKubelet/liqoNodeProvider/nodeProvider_test.go @@ -2,6 +2,7 @@ package liqonodeprovider import ( "context" + "fmt" "path/filepath" "testing" "time" @@ -13,9 +14,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/utils/pointer" netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1" sharingv1alpha1 "github.com/liqotech/liqo/apis/sharing/v1alpha1" @@ -377,4 +380,116 @@ var _ = Describe("NodeProvider", func() { Expect(ok).To(BeFalse()) }) + Context("Node Cleanup", func() { + + It("Cordon Node", func() { + + err = nodeProvider.cordonNode(ctx) + Expect(err).ToNot(HaveOccurred()) + + client := kubernetes.NewForConfigOrDie(cluster.GetCfg()) + Eventually(func() bool { + node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + return false + } + return node.Spec.Unschedulable + }, timeout, interval).Should(BeTrue()) + + }) + + It("Drain Node", func() { + + client := kubernetes.NewForConfigOrDie(cluster.GetCfg()) + + By("creating pods on our virtual node") + + nPods := 10 + for i := 0; i < nPods; i++ { + // put some pods to our node, some other in other nodes + var nodeName string + if i%2 == 0 { + nodeName = nodeProvider.nodeName + } else { + nodeName = "other-node" + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%v", i), + Namespace: v1.NamespaceDefault, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + Containers: []v1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + _, err = client.CoreV1().Pods(v1.NamespaceDefault).Create(ctx, pod, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } + + By("Draining node") + + // set a deadline for the draining + drainCtx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second)) + defer cancel() + + // the drain function needs to be launched in a different goroutine since + // it is blocking until the pods deletion + completed := false + go func() { + err := nodeProvider.drainNode(drainCtx) + if err == nil { + completed = true + } + }() + + Eventually(func() bool { + podList, err := client.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{ + "spec.nodeName": nodeProvider.nodeName, + }).String(), + }) + if err != nil { + return true + } + + // check if every pod has a deletion timestamp set, if it is, the eviction has been created + for i := range podList.Items { + if podList.Items[i].GetDeletionTimestamp().IsZero() { + return true + } + // delete the evicted pods to make the drain function to terminate, + // we have to do it manually since no API server is running + Expect(client.CoreV1().Pods(v1.NamespaceDefault).Delete(ctx, podList.Items[i].Name, metav1.DeleteOptions{ + GracePeriodSeconds: pointer.Int64Ptr(0), + })).ToNot(HaveOccurred()) + } + return false + }, timeout, interval).Should(BeFalse()) + + // the drain function has completed successfully + Eventually(func() bool { + return completed + }, timeout, interval).Should(BeTrue()) + + By("Checking that the pods on other nodes are still alive") + + podList, err := client.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(len(podList.Items)).To(BeNumerically("==", nPods/2)) + for _, pod := range podList.Items { + Expect(pod.Spec.NodeName).ToNot(Equal(nodeProvider.nodeName)) + Expect(pod.GetDeletionTimestamp().IsZero()).To(BeTrue()) + } + + }) + + }) + })