diff --git a/pkg/apis/kudo/v1beta1/instance_types.go b/pkg/apis/kudo/v1beta1/instance_types.go index 494378aad..5fd3ea040 100644 --- a/pkg/apis/kudo/v1beta1/instance_types.go +++ b/pkg/apis/kudo/v1beta1/instance_types.go @@ -31,6 +31,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const instanceCleanupFinalizerName = "kudo.dev.instance.cleanup" + // InstanceSpec defines the desired state of Instance. type InstanceSpec struct { // OperatorVersion specifies a reference to a specific Operator object. @@ -159,6 +161,9 @@ const ( // UpdatePlanName is the name of the update plan UpdatePlanName = "update" + + // CleanupPlanName is the name of the cleanup plan + CleanupPlanName = "cleanup" ) // IsTerminal returns true if the status is terminal (either complete, or in a nonrecoverable error) @@ -387,8 +392,34 @@ func selectPlan(possiblePlans []string, ov *OperatorVersion) *string { return nil } +func (i *Instance) PlanStatus(plan string) *PlanStatus { + for _, planStatus := range i.Status.PlanStatus { + if planStatus.Name == plan { + return &planStatus + } + } + + return nil +} + // GetPlanToBeExecuted returns name of the plan that should be executed func (i *Instance) GetPlanToBeExecuted(ov *OperatorVersion) (*string, error) { + if i.IsDeleting() { + // we have a cleanup plan + plan := selectPlan([]string{CleanupPlanName}, ov) + if plan != nil { + if planStatus := i.PlanStatus(*plan); planStatus != nil { + if !planStatus.Status.IsRunning() { + if planStatus.Status.IsFinished() { + // we already finished the cleanup plan + return nil, nil + } + return plan, nil + } + } + } + } + if i.GetPlanInProgress() != nil { // we're already running some plan return nil, nil } @@ -476,6 +507,71 @@ func parameterDifference(old, new map[string]string) map[string]string { return diff } +func contains(values []string, s string) bool { + for _, value := range values { + if value == s { + return true + } + } + return false +} + +func remove(values []string, s string) (result []string) { + for _, value := range values { + if value == s { + continue + } + result = append(result, value) + } + return +} + +// TryAddFinalizer adds the cleanup finalizer to an instance if the finalizer +// hasn't been added yet, the instance has a cleanup plan and the cleanup plan +// didn't run yet. Returns true if the cleanup finalizer has been added. +func (i *Instance) TryAddFinalizer() bool { + if !contains(i.ObjectMeta.Finalizers, instanceCleanupFinalizerName) { + if planStatus := i.PlanStatus(CleanupPlanName); planStatus != nil { + // avoid adding a finalizer again if a reconciliation is requested + // after it has just been removed but the instance isn't deleted yet + if planStatus.Status == ExecutionNeverRun { + i.ObjectMeta.Finalizers = append(i.ObjectMeta.Finalizers, instanceCleanupFinalizerName) + return true + } + } + } + + return false +} + +// TryRemoveFinalizer removes the cleanup finalizer of an instance if it has +// been added, the instance has a cleanup plan and the cleanup plan completed. +// Returns true if the cleanup finalizer has been removed. +func (i *Instance) TryRemoveFinalizer() bool { + if contains(i.ObjectMeta.Finalizers, instanceCleanupFinalizerName) { + if planStatus := i.PlanStatus(CleanupPlanName); planStatus != nil { + if planStatus.Status.IsTerminal() { + i.ObjectMeta.Finalizers = remove(i.ObjectMeta.Finalizers, instanceCleanupFinalizerName) + return true + } + } else { + // We have a finalizer but no cleanup plan. This could be due to an updated instance. + // Let's remove the finalizer. + i.ObjectMeta.Finalizers = remove(i.ObjectMeta.Finalizers, instanceCleanupFinalizerName) + return true + } + } + + return false +} + +// IsDeleting returns true is the instance is being deleted. +func (i *Instance) IsDeleting() bool { + // a delete request is indicated by a non-zero 'metadata.deletionTimestamp', + // see https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers + return !i.ObjectMeta.DeletionTimestamp.IsZero() +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/instance/instance_controller.go b/pkg/controller/instance/instance_controller.go index 7db2dcfd6..5f38be59f 100644 --- a/pkg/controller/instance/instance_controller.go +++ b/pkg/controller/instance/instance_controller.go @@ -105,6 +105,12 @@ func (r *Reconciler) SetupWithManager( // | // v // +-------------------------------+ +// | Start cleanup plan if object | +// | is being deleted | +// +-------------------------------+ +// | +// v +// +-------------------------------+ // | Start new plan if required | // | and none is running | // +-------------------------------+ @@ -141,7 +147,19 @@ func (r *Reconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { return reconcile.Result{}, err // OV not found has to be retried because it can really have been created after Instance } - // ---------- 2. First check if we should start execution of new plan ---------- + // ---------- 2. Check if the object is being deleted, start cleanup plan ---------- + + if !instance.IsDeleting() { + if _, hasCleanupPlan := ov.Spec.Plans[kudov1beta1.CleanupPlanName]; hasCleanupPlan { + if instance.TryAddFinalizer() { + log.Printf("InstanceController: Adding finalizer on instance %s/%s", instance.Namespace, instance.Name) + } + } + } else { + log.Printf("InstanceController: Instance %s/%s is being deleted", instance.Namespace, instance.Name) + } + + // ---------- 3. Check if we should start execution of new plan ---------- planToBeExecuted, err := instance.GetPlanToBeExecuted(ov) if err != nil { @@ -156,7 +174,7 @@ func (r *Reconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { r.Recorder.Event(instance, "Normal", "PlanStarted", fmt.Sprintf("Execution of plan %s started", kudo.StringValue(planToBeExecuted))) } - // ---------- 3. If there's currently active plan, continue with the execution ---------- + // ---------- 4. If there's currently active plan, continue with the execution ---------- activePlanStatus := instance.GetPlanInProgress() if activePlanStatus == nil { // we have no plan in progress @@ -182,7 +200,7 @@ func (r *Reconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { log.Printf("InstanceController: Going to proceed in execution of active plan %s on instance %s/%s", activePlan.Name, instance.Namespace, instance.Name) newStatus, err := workflow.Execute(activePlan, metadata, r.Client, &renderer.KustomizeEnhancer{Scheme: r.Scheme}, time.Now()) - // ---------- 4. Update status of instance after the execution proceeded ---------- + // ---------- 5. Update status of instance after the execution proceeded ---------- if newStatus != nil { instance.UpdateInstanceStatus(newStatus) } @@ -206,7 +224,9 @@ func (r *Reconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { func updateInstance(instance *kudov1beta1.Instance, oldInstance *kudov1beta1.Instance, client client.Client) error { // update instance spec and metadata. this will not update Instance.Status field - if !reflect.DeepEqual(instance.Spec, oldInstance.Spec) || !reflect.DeepEqual(instance.ObjectMeta.Annotations, oldInstance.ObjectMeta.Annotations) { + if !reflect.DeepEqual(instance.Spec, oldInstance.Spec) || + !reflect.DeepEqual(instance.ObjectMeta.Annotations, oldInstance.ObjectMeta.Annotations) || + !reflect.DeepEqual(instance.ObjectMeta.Finalizers, oldInstance.ObjectMeta.Finalizers) { instanceStatus := instance.Status.DeepCopy() err := client.Update(context.TODO(), instance) if err != nil { @@ -222,6 +242,17 @@ func updateInstance(instance *kudov1beta1.Instance, oldInstance *kudov1beta1.Ins log.Printf("InstanceController: Error when updating instance status. %v", err) return err } + + // update instance metadata if finalizer is removed + // because Kubernetes might immediately delete the instance, this has to be the last instance update + if instance.TryRemoveFinalizer() { + log.Printf("InstanceController: Removing finalizer on instance %s/%s", instance.Namespace, instance.Name) + if err := client.Update(context.TODO(), instance); err != nil { + log.Printf("InstanceController: Error when removing instance finalizer. %v", err) + return err + } + } + return nil } diff --git a/test/e2e/cli-install-uninstall/02-assert.yaml b/test/e2e/cli-install-uninstall/02-assert.yaml new file mode 100644 index 000000000..f8c5e2d5d --- /dev/null +++ b/test/e2e/cli-install-uninstall/02-assert.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +kind: Event +message: 'Execution of plan cleanup finished with status COMPLETE' diff --git a/test/e2e/cli-install-uninstall/02-errors.yaml b/test/e2e/cli-install-uninstall/02-errors.yaml index 7b5331d6b..7c867a0c3 100644 --- a/test/e2e/cli-install-uninstall/02-errors.yaml +++ b/test/e2e/cli-install-uninstall/02-errors.yaml @@ -10,3 +10,8 @@ apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cleanup-deployment diff --git a/test/e2e/cli-install-uninstall/first-operator/operator.yaml b/test/e2e/cli-install-uninstall/first-operator/operator.yaml index f7c6cd592..ee0795884 100644 --- a/test/e2e/cli-install-uninstall/first-operator/operator.yaml +++ b/test/e2e/cli-install-uninstall/first-operator/operator.yaml @@ -12,6 +12,11 @@ tasks: spec: resources: - deployment.yaml + - name: cleanup + kind: Apply + spec: + resources: + - cleanup.yaml plans: deploy: strategy: serial @@ -22,3 +27,12 @@ plans: - name: everything tasks: - app + cleanup: + strategy: serial + phases: + - name: main + strategy: parallel + steps: + - name: everything + tasks: + - cleanup diff --git a/test/e2e/cli-install-uninstall/first-operator/templates/cleanup.yaml b/test/e2e/cli-install-uninstall/first-operator/templates/cleanup.yaml new file mode 100644 index 000000000..8f310842d --- /dev/null +++ b/test/e2e/cli-install-uninstall/first-operator/templates/cleanup.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cleanup-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80