Skip to content

Commit

Permalink
chore(scheduler): added new scheduler tests (#634)
Browse files Browse the repository at this point in the history
Signed-off-by: RealAnna <anna.reale@dynatrace.com>
  • Loading branch information
RealAnna committed Jan 18, 2023
1 parent acf81d6 commit 2e47b92
Showing 1 changed file with 143 additions and 81 deletions.
224 changes: 143 additions & 81 deletions scheduler/test/e2e/scheduler_test.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
package e2e

import (
"time"

testv1alpha2 "github.com/keptn/lifecycle-toolkit/scheduler/test/e2e/fake/v1alpha2"
common2 "github.com/keptn/lifecycle-toolkit/scheduler/test/e2e/fake/v1alpha2/common"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
types2 "github.com/onsi/gomega/types"
"github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -19,8 +24,10 @@ const AppAnnotation = "keptn.sh/app"
const K8sRecommendedWorkloadAnnotations = "app.kubernetes.io/name"
const K8sRecommendedVersionAnnotations = "app.kubernetes.io/version"
const K8sRecommendedAppAnnotations = "app.kubernetes.io/part-of"
const KeptnScheduler = "keptn-scheduler"

// clean example of E2E test/ integration test --
var SchedulingError = errors.New("Pod is not scheduled nor existing, this tests works only on a real installation have you setup your kind env?")
var SchedulingInPending = errors.New("Pod is pending")

var _ = Describe("[E2E] KeptnScheduler", Ordered, func() {
BeforeAll(func() {
Expand All @@ -36,118 +43,166 @@ var _ = Describe("[E2E] KeptnScheduler", Ordered, func() {
AppAnnotation: "myapp",
}

pause := imageutils.GetPauseImageName()
var (
workloadinstance *testv1alpha2.KeptnWorkloadInstance
pod *apiv1.Pod
)
pod := &apiv1.Pod{}

BeforeEach(func() {
DeferCleanup(func() {
err := k8sClient.Delete(ctx, pod)
logErrorIfPresent(err)
})

//create a test Pod
name := names.SimpleNameGenerator.GenerateName("my-testpod-")

pod = WithContainer(st.MakePod().
Namespace("default").
Name(name).
Req(map[apiv1.ResourceName]string{apiv1.ResourceMemory: "50"}).
ZeroTerminationGracePeriod().
Obj(), pause)
pod.Annotations = annotations

err := k8sClient.Create(ctx, pod)
Expect(ignoreAlreadyExists(err)).NotTo(HaveOccurred(), "could not add pod")
*pod = initPod(*pod, annotations, nil, KeptnScheduler)
})

Context("a new Pod", func() {

It(" should stay pending if no workload instance is available", func() {

newPod := &apiv1.Pod{}
Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, newPod)
return err
}).Should(Succeed())

Expect(newPod.Status.Phase).To(Equal(apiv1.PodPending))
Context("a new Pod ", func() {

It("should stay pending if no workload instance is available", func() {
checkPending(pod)
})

It(" should be scheduled when workload instance pre-evaluation checks are done", func() {

workloadinstance = initWorkloadInstance()

err := k8sClient.Create(ctx, workloadinstance)
Expect(ignoreAlreadyExists(err)).To(BeNil())

Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: "myapp-myworkload-1.0.0"}, workloadinstance)
return err
}).Should(Succeed())
workloadinstance.Status.PreDeploymentEvaluationStatus = "Succeeded"
err = k8sClient.Status().Update(ctx, workloadinstance)

Expect(err).To(BeNil())
Eventually(func() error {
return podRunning(pod.Namespace, pod.Name)
}).Should(Succeed())

err = k8sClient.Delete(ctx, workloadinstance)
Expect(err).NotTo(HaveOccurred(), "could not remove workloadinstance")

It("should be scheduled when workload instance pre-evaluation checks are done", func() {
checkWorkload("myapp-myworkload-1.0.0", *pod, "Succeeded")
})
})
})

Describe("If NOT annotated for keptn-scheduler", func() {
pause := imageutils.GetPauseImageName()
var (
pod *apiv1.Pod
)
Describe("If NOT annotated or labeled for keptn-scheduler", func() {
pod := &apiv1.Pod{}
BeforeEach(func() {
DeferCleanup(func() {
err := k8sClient.Delete(ctx, pod)
logErrorIfPresent(err)
})
*pod = initPod(*pod, nil, nil, "default")
})

//create a test Pod
name := names.SimpleNameGenerator.GenerateName("my-testpod-")

pod = WithContainer(st.MakePod().
Namespace("default").
Name(name).
Req(map[apiv1.ResourceName]string{apiv1.ResourceMemory: "50"}).
ZeroTerminationGracePeriod().
Obj(), pause)
Context("a new Pod ", func() {

err := k8sClient.Create(ctx, pod)
Expect(ignoreAlreadyExists(err)).NotTo(HaveOccurred(), "could not add pod")
It("should be immediately scheduled", func() {
assertScheduled(*pod).Should(Succeed())
})
})
})

Describe("If labeled for keptn-scheduler", func() {
labels1 := map[string]string{
K8sRecommendedWorkloadAnnotations: "myworkload",
K8sRecommendedVersionAnnotations: "1.0.1",
K8sRecommendedAppAnnotations: "mylabeledapp",
}
labels2 := map[string]string{
K8sRecommendedWorkloadAnnotations: "myworkload",
K8sRecommendedVersionAnnotations: "1.0.2",
K8sRecommendedAppAnnotations: "mylabeledapp",
}

Context("a new Pod", func() {
pod1 := &apiv1.Pod{}
pod2 := &apiv1.Pod{}

It(" should be immediately scheduled", func() {
BeforeEach(func() {
DeferCleanup(func() {
err := k8sClient.Delete(ctx, pod1)
logErrorIfPresent(err)
err = k8sClient.Delete(ctx, pod2)
logErrorIfPresent(err)
})
*pod1 = initPod(*pod1, nil, labels1, KeptnScheduler)
*pod2 = initPod(*pod2, nil, labels2, KeptnScheduler)
})

Eventually(func() error {
return podRunning(pod.Namespace, pod.Name)
}).Should(Succeed())
Context("a new Pod ", func() {
It("should stay pending if no workload instance is available", func() {
checkPending(pod1)
})
It("should be scheduled when workload instance pre-evaluation checks are done", func() {
checkWorkload("mylabeledapp-myworkload-1.0.1", *pod1, "Succeeded")
})

It("should NOT be scheduled when workload instance pre-evaluation checks fails", func() {
checkWorkload("mylabeledapp-myworkload-1.0.2", *pod2, "Failed")
})
})
})

})

func initWorkloadInstance() *testv1alpha2.KeptnWorkloadInstance {
func initPod(pod apiv1.Pod, annotations map[string]string, labels map[string]string, scheduler string) apiv1.Pod {

//create a test Pod
name := names.SimpleNameGenerator.GenerateName("my-testpod-")
pauseImg := imageutils.GetPauseImageName()
pod = *WithContainer(st.MakePod().
Namespace("default").
Name(name).
Req(map[apiv1.ResourceName]string{apiv1.ResourceMemory: "5"}).
ZeroTerminationGracePeriod().
Obj(), pauseImg)
if annotations != nil {
pod.Annotations = annotations
}
if labels != nil {
pod.Labels = labels
}
if scheduler == KeptnScheduler {
pod.Spec.SchedulerName = scheduler
}

err := k8sClient.Create(ctx, &pod)
Expect(ignoreAlreadyExists(err)).NotTo(HaveOccurred(), "could not add pod")
return pod
}

func checkPending(pod *apiv1.Pod) {

newPod := &apiv1.Pod{}
Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, newPod)
return err
}).Should(Succeed())

Expect(newPod.Status.Phase).To(Equal(apiv1.PodPending))

}

func checkWorkload(workloadname string, pod apiv1.Pod, status common2.KeptnState) {
workloadinstance := initWorkloadInstance(workloadname)

err := k8sClient.Create(ctx, workloadinstance)
Expect(ignoreAlreadyExists(err)).To(BeNil())

Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: workloadname}, workloadinstance)
return err
}).Should(Succeed())
workloadinstance.Status.PreDeploymentEvaluationStatus = status
err = k8sClient.Status().Update(ctx, workloadinstance)

Expect(err).To(BeNil())
assertion := assertScheduled(pod)

if status == "Failed" {
assertion.ShouldNot(Succeed())
} else {
assertion.Should(Succeed())
}

err = k8sClient.Delete(ctx, workloadinstance)
Expect(err).NotTo(HaveOccurred(), "could not remove workloadinstance")
}

func assertScheduled(pod apiv1.Pod) types2.AsyncAssertion {
return Eventually(func() error {
return podScheduled(pod.Namespace, pod.Name)
}).WithTimeout(time.Second * 60).WithPolling(3 * time.Second)
}

func initWorkloadInstance(name string) *testv1alpha2.KeptnWorkloadInstance {

var fakeInstance = testv1alpha2.KeptnWorkloadInstance{
TypeMeta: metav1.TypeMeta{
Kind: "KeptnWorkloadInstance",
APIVersion: "lifecycle.keptn.sh/v1alpha2",
},
ObjectMeta: metav1.ObjectMeta{Name: "myapp-myworkload-1.0.0", Namespace: "default"},
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"},
Spec: testv1alpha2.KeptnWorkloadInstanceSpec{
KeptnWorkloadSpec: testv1alpha2.KeptnWorkloadSpec{
ResourceReference: testv1alpha2.ResourceReference{Name: "myfakeres"},
Expand All @@ -159,21 +214,28 @@ func initWorkloadInstance() *testv1alpha2.KeptnWorkloadInstance {
return &fakeInstance
}

func podRunning(namespace, name string) error {
func podScheduled(namespace, name string) error {
pod := apiv1.Pod{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, &pod)
if err != nil {
// This could be a connection error so we want to retry.
// This could be a connection error, we want to retry.
GinkgoLogr.Error(err, "Failed to get", "pod", klog.KRef(namespace, name))
return err
}

if pod.Status.Phase == apiv1.PodSucceeded || pod.Status.Phase == apiv1.PodFailed || pod.Status.Phase == apiv1.PodRunning {
return nil
}

for _, c := range pod.Status.Conditions {
if c.Type == "PodScheduled" {
return nil
if c.Type == apiv1.PodScheduled {
if c.Status == apiv1.ConditionTrue {
return nil
}
return SchedulingInPending
}
}
return err
return SchedulingError
}

func WithContainer(pod *apiv1.Pod, image string) *apiv1.Pod {
Expand Down

0 comments on commit 2e47b92

Please sign in to comment.