diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml index 58a9d2b1b1..2d9246845e 100644 --- a/operator/config/rbac/role.yaml +++ b/operator/config/rbac/role.yaml @@ -14,14 +14,6 @@ rules: - statefulsets verbs: - get -- apiGroups: - - apps - resources: - - deployments - - replicasets - - statefulsets - verbs: - - get - list - watch - apiGroups: diff --git a/operator/controllers/common/errors.go b/operator/controllers/common/errors.go index a14a5ab0cd..f891c3d5aa 100644 --- a/operator/controllers/common/errors.go +++ b/operator/controllers/common/errors.go @@ -8,6 +8,7 @@ var ErrRetryCountExceeded = fmt.Errorf("retryCount for evaluation exceeded") var ErrNoValues = fmt.Errorf("no values") var ErrInvalidOperator = fmt.Errorf("invalid operator") var ErrCannotMarshalParams = fmt.Errorf("could not marshal parameters") +var ErrUnsupportedWorkloadInstanceResourceReference = fmt.Errorf("unsupported Resource Reference") var ErrCannotRetrieveInstancesMsg = "could not retrieve instances: %w" var ErrCannotFetchAppMsg = "could not retrieve KeptnApp: %w" diff --git a/operator/controllers/keptnworkloadinstance/controller.go b/operator/controllers/keptnworkloadinstance/controller.go index 9f7dbe773e..c44202220d 100644 --- a/operator/controllers/keptnworkloadinstance/controller.go +++ b/operator/controllers/keptnworkloadinstance/controller.go @@ -62,7 +62,7 @@ type KeptnWorkloadInstanceReconciler struct { //+kubebuilder:rbac:groups=lifecycle.keptn.sh,resources=keptntasks/finalizers,verbs=update //+kubebuilder:rbac:groups=core,resources=events,verbs=create;watch;patch //+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch -//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments;statefulsets,verbs=get;list;watch +//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments;statefulsets;daemonsets,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/operator/controllers/keptnworkloadinstance/controller_test.go b/operator/controllers/keptnworkloadinstance/controller_test.go index 2adf2c37c2..161a2d3ecf 100644 --- a/operator/controllers/keptnworkloadinstance/controller_test.go +++ b/operator/controllers/keptnworkloadinstance/controller_test.go @@ -3,6 +3,7 @@ package keptnworkloadinstance import ( "context" klcv1alpha1 "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1" + "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1/common" "github.com/stretchr/testify/require" testrequire "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -13,45 +14,165 @@ import ( "testing" ) -func TestKeptnWorkloadInstanceReconciler_isReferencedWorkloadRunning(t *testing.T) { +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_FailedReplicaSet(t *testing.T) { rep := int32(1) replicasetFail := makeReplicaSet("myrep", "default", &rep, 0) - statefulsetFail := makeStatefulSet("mystat", "default", &rep, 0) + + fakeClient := fake.NewClientBuilder().WithObjects(replicasetFail).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) + testrequire.Nil(t, err) + + workloadInstance := makeWorkloadInstanceWithRef(replicasetFail.ObjectMeta, "ReplicaSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) r := &KeptnWorkloadInstanceReconciler{ - Client: fake.NewClientBuilder().WithObjects(replicasetFail, statefulsetFail).Build(), + Client: fakeClient, } - isOwnerRunning, err := r.isReferencedWorkloadRunning(context.TODO(), klcv1alpha1.ResourceReference{UID: "myrep", Name: "myrep", Kind: "ReplicaSet"}, "default") + + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) testrequire.Nil(t, err) - if isOwnerRunning { - t.Errorf("Should fail!") + testrequire.Equal(t, common.StateProgressing, keptnState) +} + +func makeWorkloadInstanceWithRef(objectMeta metav1.ObjectMeta, refKind string) *klcv1alpha1.KeptnWorkloadInstance { + workloadInstance := &klcv1alpha1.KeptnWorkloadInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-wli", + Namespace: "default", + }, + Spec: klcv1alpha1.KeptnWorkloadInstanceSpec{ + KeptnWorkloadSpec: klcv1alpha1.KeptnWorkloadSpec{ + ResourceReference: klcv1alpha1.ResourceReference{ + UID: objectMeta.UID, + Name: objectMeta.Name, + Kind: refKind, + }, + }, + }, } + return workloadInstance +} + +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_FailedStatefulSet(t *testing.T) { - isOwnerRunning, err = r.isReferencedWorkloadRunning(context.TODO(), klcv1alpha1.ResourceReference{UID: "mystat", Name: "mystat", Kind: "StatefulSet"}, "default") + rep := int32(1) + statefulsetFail := makeStatefulSet("mystat", "default", &rep, 0) + + fakeClient := fake.NewClientBuilder().WithObjects(statefulsetFail).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) testrequire.Nil(t, err) - if isOwnerRunning { - t.Errorf("Should fail!") + + workloadInstance := makeWorkloadInstanceWithRef(statefulsetFail.ObjectMeta, "StatefulSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) + + r := &KeptnWorkloadInstanceReconciler{ + Client: fakeClient, } - replicasetPass := makeReplicaSet("myrep", "default", &rep, 1) - statefulsetPass := makeStatefulSet("mystat", "default", &rep, 1) + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) + testrequire.Nil(t, err) + testrequire.Equal(t, common.StateProgressing, keptnState) +} + +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_FailedDaemonSet(t *testing.T) { - r2 := &KeptnWorkloadInstanceReconciler{ - Client: fake.NewClientBuilder().WithObjects(replicasetPass, statefulsetPass).Build(), + daemonSetFail := makeDaemonSet("mystat", "default", 1, 0) + + fakeClient := fake.NewClientBuilder().WithObjects(daemonSetFail).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) + testrequire.Nil(t, err) + + workloadInstance := makeWorkloadInstanceWithRef(daemonSetFail.ObjectMeta, "DaemonSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) + + r := &KeptnWorkloadInstanceReconciler{ + Client: fakeClient, + } + + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) + testrequire.Nil(t, err) + testrequire.Equal(t, common.StateProgressing, keptnState) +} + +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_ReadyReplicaSet(t *testing.T) { + + rep := int32(1) + replicaSet := makeReplicaSet("myrep", "default", &rep, 1) + + fakeClient := fake.NewClientBuilder().WithObjects(replicaSet).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) + testrequire.Nil(t, err) + + workloadInstance := makeWorkloadInstanceWithRef(replicaSet.ObjectMeta, "ReplicaSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) + + r := &KeptnWorkloadInstanceReconciler{ + Client: fakeClient, } - isOwnerRunning, err = r2.isReferencedWorkloadRunning(context.TODO(), klcv1alpha1.ResourceReference{UID: "myrep", Name: "myrep", Kind: "ReplicaSet"}, "default") + + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) + testrequire.Nil(t, err) + testrequire.Equal(t, common.StateSucceeded, keptnState) +} + +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_ReadyStatefulSet(t *testing.T) { + + rep := int32(1) + statefulSet := makeStatefulSet("mystat", "default", &rep, 1) + + fakeClient := fake.NewClientBuilder().WithObjects(statefulSet).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) testrequire.Nil(t, err) - if !isOwnerRunning { - t.Errorf("Should find a replica owner!") + + workloadInstance := makeWorkloadInstanceWithRef(statefulSet.ObjectMeta, "StatefulSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) + + r := &KeptnWorkloadInstanceReconciler{ + Client: fakeClient, } - isOwnerRunning, err = r2.isReferencedWorkloadRunning(context.TODO(), klcv1alpha1.ResourceReference{UID: "mystat", Name: "mystat", Kind: "StatefulSet"}, "default") + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) testrequire.Nil(t, err) - if !isOwnerRunning { - t.Errorf("Should find a stateful set owner!") + testrequire.Equal(t, common.StateSucceeded, keptnState) +} + +func TestKeptnWorkloadInstanceReconciler_reconcileDeployment_ReadyDaemonSet(t *testing.T) { + + daemonSet := makeDaemonSet("mystat", "default", 1, 1) + + fakeClient := fake.NewClientBuilder().WithObjects(daemonSet).Build() + + err := klcv1alpha1.AddToScheme(fakeClient.Scheme()) + testrequire.Nil(t, err) + + workloadInstance := makeWorkloadInstanceWithRef(daemonSet.ObjectMeta, "DaemonSet") + + err = fakeClient.Create(context.TODO(), workloadInstance) + require.Nil(t, err) + + r := &KeptnWorkloadInstanceReconciler{ + Client: fakeClient, } + keptnState, err := r.reconcileDeployment(context.TODO(), workloadInstance) + testrequire.Nil(t, err) + testrequire.Equal(t, common.StateSucceeded, keptnState) } func TestKeptnWorkloadInstanceReconciler_IsPodRunning(t *testing.T) { @@ -135,6 +256,26 @@ func makeStatefulSet(name string, namespace string, wanted *int32, available int } +func makeDaemonSet(name string, namespace string, wanted int32, available int32) *appsv1.DaemonSet { + + return &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID(name), + }, + Spec: appsv1.DaemonSetSpec{}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: wanted, + NumberReady: available, + }, + } + +} + func Test_getLatestAppVersion(t *testing.T) { type args struct { apps *klcv1alpha1.KeptnAppVersionList diff --git a/operator/controllers/keptnworkloadinstance/reconcile_deploymentstate.go b/operator/controllers/keptnworkloadinstance/reconcile_deploymentstate.go index fcba34fddf..2f411c0f99 100644 --- a/operator/controllers/keptnworkloadinstance/reconcile_deploymentstate.go +++ b/operator/controllers/keptnworkloadinstance/reconcile_deploymentstate.go @@ -2,6 +2,8 @@ package keptnworkloadinstance import ( "context" + controllercommon "github.com/keptn/lifecycle-toolkit/operator/controllers/common" + klcv1alpha1 "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1" "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1/common" appsv1 "k8s.io/api/apps/v1" @@ -11,60 +13,54 @@ import ( ) func (r *KeptnWorkloadInstanceReconciler) reconcileDeployment(ctx context.Context, workloadInstance *klcv1alpha1.KeptnWorkloadInstance) (common.KeptnState, error) { - if workloadInstance.Spec.ResourceReference.Kind == "Pod" { - isPodRunning, err := r.isPodRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) - if err != nil { - return common.StateUnknown, err - } - if isPodRunning { - workloadInstance.Status.DeploymentStatus = common.StateSucceeded - } else { - workloadInstance.Status.DeploymentStatus = common.StateProgressing - } + var isRunning bool + var err error + + switch workloadInstance.Spec.ResourceReference.Kind { + case "Pod": + isRunning, err = r.isPodRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) + case "ReplicaSet": + isRunning, err = r.isReplicaSetRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) + case "StatefulSet": + isRunning, err = r.isStatefulSetRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) + case "DaemonSet": + isRunning, err = r.isDaemonSetRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) + default: + isRunning, err = false, controllercommon.ErrUnsupportedWorkloadInstanceResourceReference + } + + if err != nil { + return common.StateUnknown, err + } + if isRunning { + workloadInstance.Status.DeploymentStatus = common.StateSucceeded } else { - isReplicaRunning, err := r.isReferencedWorkloadRunning(ctx, workloadInstance.Spec.ResourceReference, workloadInstance.Namespace) - if err != nil { - return common.StateUnknown, err - } - if isReplicaRunning { - workloadInstance.Status.DeploymentStatus = common.StateSucceeded - } else { - workloadInstance.Status.DeploymentStatus = common.StateProgressing - } + workloadInstance.Status.DeploymentStatus = common.StateProgressing } - err := r.Client.Status().Update(ctx, workloadInstance) + err = r.Client.Status().Update(ctx, workloadInstance) if err != nil { return common.StateUnknown, err } return workloadInstance.Status.DeploymentStatus, nil } -func (r *KeptnWorkloadInstanceReconciler) isReferencedWorkloadRunning(ctx context.Context, resource klcv1alpha1.ResourceReference, namespace string) (bool, error) { - - var replicas *int32 - var desiredReplicas int32 - switch resource.Kind { - case "ReplicaSet": - rep := appsv1.ReplicaSet{} - err := r.Client.Get(ctx, types.NamespacedName{Name: resource.Name, Namespace: namespace}, &rep) - if err != nil { - return false, err - } - replicas = rep.Spec.Replicas - desiredReplicas = rep.Status.AvailableReplicas - case "StatefulSet": - sts := appsv1.StatefulSet{} - err := r.Client.Get(ctx, types.NamespacedName{Name: resource.Name, Namespace: namespace}, &sts) - if err != nil { - return false, err - } - replicas = sts.Spec.Replicas - desiredReplicas = sts.Status.AvailableReplicas +func (r *KeptnWorkloadInstanceReconciler) isReplicaSetRunning(ctx context.Context, resource klcv1alpha1.ResourceReference, namespace string) (bool, error) { + rep := appsv1.ReplicaSet{} + err := r.Client.Get(ctx, types.NamespacedName{Name: resource.Name, Namespace: namespace}, &rep) + if err != nil { + return false, err } + return *rep.Spec.Replicas == rep.Status.AvailableReplicas, nil +} - return *replicas == desiredReplicas, nil - +func (r *KeptnWorkloadInstanceReconciler) isDaemonSetRunning(ctx context.Context, resource klcv1alpha1.ResourceReference, namespace string) (bool, error) { + daemonSet := &appsv1.DaemonSet{} + err := r.Client.Get(ctx, types.NamespacedName{Name: resource.Name, Namespace: namespace}, daemonSet) + if err != nil { + return false, err + } + return daemonSet.Status.DesiredNumberScheduled == daemonSet.Status.NumberReady, nil } func (r *KeptnWorkloadInstanceReconciler) isPodRunning(ctx context.Context, resource klcv1alpha1.ResourceReference, namespace string) (bool, error) { @@ -82,3 +78,12 @@ func (r *KeptnWorkloadInstanceReconciler) isPodRunning(ctx context.Context, reso } return false, nil } + +func (r *KeptnWorkloadInstanceReconciler) isStatefulSetRunning(ctx context.Context, resource klcv1alpha1.ResourceReference, namespace string) (bool, error) { + sts := appsv1.StatefulSet{} + err := r.Client.Get(ctx, types.NamespacedName{Name: resource.Name, Namespace: namespace}, &sts) + if err != nil { + return false, err + } + return *sts.Spec.Replicas == sts.Status.AvailableReplicas, nil +} diff --git a/operator/test/component/workloadinstancecontroller_test.go b/operator/test/component/workloadinstancecontroller_test.go index ec4ac39576..f64e400bef 100644 --- a/operator/test/component/workloadinstancecontroller_test.go +++ b/operator/test/component/workloadinstancecontroller_test.go @@ -3,16 +3,37 @@ package component import ( "context" klcv1alpha1 "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1" + "github.com/keptn/lifecycle-toolkit/operator/api/v1alpha1/common" keptncontroller "github.com/keptn/lifecycle-toolkit/operator/controllers/common" "github.com/keptn/lifecycle-toolkit/operator/controllers/keptnworkloadinstance" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" otelsdk "go.opentelemetry.io/otel/sdk/trace" sdktest "go.opentelemetry.io/otel/sdk/trace/tracetest" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) +func getPodTemplateSpec() corev1.PodTemplateSpec { + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } +} + // clean example of component test (E2E test/ integration test can be achieved adding a real cluster) // App controller creates AppVersion when a new App CRD is added // span for creation and reconcile are correct @@ -42,12 +63,13 @@ var _ = Describe("KeptnWorkloadInstanceController", Ordered, func() { ////setup controllers here controllers := []keptncontroller.Controller{&keptnworkloadinstance.KeptnWorkloadInstanceReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("test-app-controller"), - Log: GinkgoLogr, - Meters: initKeptnMeters(), - Tracer: tracer.Tracer("test-app-tracer"), + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("test-app-controller"), + Log: GinkgoLogr, + Meters: initKeptnMeters(), + SpanHandler: &keptncontroller.SpanHandler{}, + Tracer: tracer.Tracer("test-app-tracer"), }} setupManager(controllers) // we can register multiple time the same controller // so that they have a different span/trace @@ -110,6 +132,164 @@ var _ = Describe("KeptnWorkloadInstanceController", Ordered, func() { g.Expect(wi.Status.CurrentPhase).To(BeEmpty()) }, "3s").Should(Succeed()) }) + + It("should detect that the referenced StatefulSet is progressing", func() { + By("Deploying a StatefulSet to reference") + repl := int32(1) + statefulSet := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-statefulset", + Namespace: namespace, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &repl, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: getPodTemplateSpec(), + }, + } + + defer func() { + _ = k8sClient.Delete(ctx, statefulSet) + }() + + err := k8sClient.Create(ctx, statefulSet) + Expect(err).To(BeNil()) + + By("Setting the App PreDeploymentEvaluation Status to 'Succeeded'") + appVersion.Status.PreDeploymentEvaluationStatus = common.StateSucceeded + err = k8sClient.Status().Update(ctx, appVersion) + Expect(err).To(BeNil()) + + By("Bringing the StatefulSet into its ready state") + statefulSet.Status.AvailableReplicas = 1 + statefulSet.Status.ReadyReplicas = 1 + statefulSet.Status.Replicas = 1 + err = k8sClient.Status().Update(ctx, statefulSet) + Expect(err).To(BeNil()) + + By("Looking up the StatefulSet to retrieve its UID") + err = k8sClient.Get(ctx, types.NamespacedName{ + Namespace: namespace, + Name: statefulSet.Name, + }, statefulSet) + Expect(err).To(BeNil()) + + By("Creating a WorkloadInstance that references the StatefulSet") + wi = &klcv1alpha1.KeptnWorkloadInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: klcv1alpha1.KeptnWorkloadInstanceSpec{ + KeptnWorkloadSpec: klcv1alpha1.KeptnWorkloadSpec{ + ResourceReference: klcv1alpha1.ResourceReference{ + UID: statefulSet.UID, + Kind: "StatefulSet", + Name: "my-statefulset", + }, + Version: "2.0", + AppName: appVersion.GetAppName(), + }, + WorkloadName: "test-app-wname", + TraceId: map[string]string{"traceparent": "00-0f89f15e562489e2e171eca1cf9ba958-d2fa6dbbcbf7e29a-01"}, + }, + } + + err = k8sClient.Create(context.TODO(), wi) + Expect(err).To(BeNil()) + + wiNameObj := types.NamespacedName{ + Namespace: wi.Namespace, + Name: wi.Name, + } + Eventually(func(g Gomega) { + wi := &klcv1alpha1.KeptnWorkloadInstance{} + err := k8sClient.Get(ctx, wiNameObj, wi) + g.Expect(err).To(BeNil()) + g.Expect(wi.Status.DeploymentStatus).To(Equal(common.StateSucceeded)) + }, "20s").Should(Succeed()) + }) + It("should detect that the referenced DaemonSet is progressing", func() { + By("Deploying a DaemonSet to reference") + daemonSet := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-daemonset", + Namespace: namespace, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: getPodTemplateSpec(), + }, + } + + defer func() { + _ = k8sClient.Delete(ctx, daemonSet) + }() + + err := k8sClient.Create(ctx, daemonSet) + Expect(err).To(BeNil()) + + By("Setting the App PreDeploymentEvaluation Status to 'Succeeded'") + appVersion.Status.PreDeploymentEvaluationStatus = common.StateSucceeded + err = k8sClient.Status().Update(ctx, appVersion) + Expect(err).To(BeNil()) + + By("Bringing the DaemonSet into its ready state") + daemonSet.Status.DesiredNumberScheduled = 1 + daemonSet.Status.NumberReady = 1 + err = k8sClient.Status().Update(ctx, daemonSet) + Expect(err).To(BeNil()) + + By("Looking up the DaemonSet to retrieve its UID") + err = k8sClient.Get(ctx, types.NamespacedName{ + Namespace: namespace, + Name: daemonSet.Name, + }, daemonSet) + Expect(err).To(BeNil()) + + By("Creating a WorkloadInstance that references the DaemonSet") + wi = &klcv1alpha1.KeptnWorkloadInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: klcv1alpha1.KeptnWorkloadInstanceSpec{ + KeptnWorkloadSpec: klcv1alpha1.KeptnWorkloadSpec{ + ResourceReference: klcv1alpha1.ResourceReference{ + UID: daemonSet.UID, + Kind: "DaemonSet", + Name: "my-daemonset", + }, + Version: "2.0", + AppName: appVersion.GetAppName(), + }, + WorkloadName: "test-app-wname", + TraceId: map[string]string{"traceparent": "00-0f89f15e562489e2e171eca1cf9ba958-d2fa6dbbcbf7e29a-01"}, + }, + } + + err = k8sClient.Create(context.TODO(), wi) + Expect(err).To(BeNil()) + + wiNameObj := types.NamespacedName{ + Namespace: wi.Namespace, + Name: wi.Name, + } + Eventually(func(g Gomega) { + wi := &klcv1alpha1.KeptnWorkloadInstance{} + err := k8sClient.Get(ctx, wiNameObj, wi) + g.Expect(err).To(BeNil()) + g.Expect(wi.Status.DeploymentStatus).To(Equal(common.StateSucceeded)) + }, "20s").Should(Succeed()) + }) AfterEach(func() { // Remember to clean up the cluster after each test k8sClient.Delete(ctx, appVersion) @@ -130,11 +310,12 @@ func createAppVersionInCluster(name string, namespace string, version string) *k Namespace: namespace, }, Spec: klcv1alpha1.KeptnAppVersionSpec{ + AppName: name, KeptnAppSpec: klcv1alpha1.KeptnAppSpec{ Version: version, Workloads: []klcv1alpha1.KeptnWorkloadRef{ { - Name: "wi-test-app-wname", + Name: "wname", Version: "2.0", }, }, diff --git a/test/integration/simple-daemonset-annotated/00-assert.yaml b/test/integration/simple-daemonset-annotated/00-assert.yaml new file mode 100644 index 0000000000..b65c3d0fb2 --- /dev/null +++ b/test/integration/simple-daemonset-annotated/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: daemonset +status: + phase: Running diff --git a/test/integration/simple-daemonset-annotated/00-install.yaml b/test/integration/simple-daemonset-annotated/00-install.yaml new file mode 100644 index 0000000000..68bd4f80d5 --- /dev/null +++ b/test/integration/simple-daemonset-annotated/00-install.yaml @@ -0,0 +1,37 @@ +apiVersion: lifecycle.keptn.sh/v1alpha1 +kind: KeptnTaskDefinition +metadata: + name: pre-deployment-hello +spec: + function: + inline: + code: | + console.log("Pre-Deployment Task has been executed"); + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: daemonset + name: daemonset + annotations: + keptn.sh/workload: work + keptn.sh/version: "0.4" + keptn.sh/pre-deployment-tasks: pre-deployment-hello +spec: + selector: + matchLabels: + app: daemonset + template: + metadata: + labels: + app: daemonset + annotations: + rollme: what + spec: + containers: + - image: busybox + name: busybox + command: ['sh', '-c', 'echo The app is running! && sleep infinity'] + diff --git a/test/integration/simple-daemonset-annotated/00-teststep.yaml b/test/integration/simple-daemonset-annotated/00-teststep.yaml new file mode 100644 index 0000000000..a96f3a8d8f --- /dev/null +++ b/test/integration/simple-daemonset-annotated/00-teststep.yaml @@ -0,0 +1,4 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl annotate ns $NAMESPACE keptn.sh/lifecycle-toolkit='enabled' \ No newline at end of file diff --git a/test/integration/simple-daemonset-annotated/01-assert.yaml b/test/integration/simple-daemonset-annotated/01-assert.yaml new file mode 100644 index 0000000000..89d83bd352 --- /dev/null +++ b/test/integration/simple-daemonset-annotated/01-assert.yaml @@ -0,0 +1,22 @@ +apiVersion: lifecycle.keptn.sh/v1alpha1 +kind: KeptnWorkload +metadata: + name: work-work + +--- + +apiVersion: lifecycle.keptn.sh/v1alpha1 +kind: KeptnWorkloadInstance +metadata: + name: work-work-0.4 +status: + currentPhase: Completed + deploymentStatus: Succeeded + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentStatus: Succeeded + preDeploymentTaskStatus: + - status: Succeeded + taskDefinitionName: pre-deployment-hello + status: Succeeded \ No newline at end of file diff --git a/test/integration/simple-daemonset-annotated/02-assert.yaml b/test/integration/simple-daemonset-annotated/02-assert.yaml new file mode 100644 index 0000000000..0e584446e0 --- /dev/null +++ b/test/integration/simple-daemonset-annotated/02-assert.yaml @@ -0,0 +1,16 @@ + +apiVersion: lifecycle.keptn.sh/v1alpha1 +kind: KeptnWorkloadInstance +metadata: + name: work-work-0.5 +status: + currentPhase: Completed + deploymentStatus: Succeeded + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentStatus: Succeeded + preDeploymentTaskStatus: + - status: Succeeded + taskDefinitionName: pre-deployment-hello + status: Succeeded \ No newline at end of file diff --git a/test/integration/simple-daemonset-annotated/02-install.yaml b/test/integration/simple-daemonset-annotated/02-install.yaml new file mode 100644 index 0000000000..af75423a4d --- /dev/null +++ b/test/integration/simple-daemonset-annotated/02-install.yaml @@ -0,0 +1,26 @@ + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: daemonset + name: daemonset + annotations: + keptn.sh/workload: work + keptn.sh/version: "0.5" + keptn.sh/pre-deployment-tasks: pre-deployment-hello +spec: + selector: + matchLabels: + app: daemonset + template: + metadata: + labels: + app: daemonset + annotations: + rollme: eSoWV + spec: + containers: + - image: busybox + name: busybox + command: ['sh', '-c', 'echo The app is running! && sleep infinity']