diff --git a/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go b/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go index 04bd20dc4..2fce54d5d 100644 --- a/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go +++ b/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go @@ -49,6 +49,36 @@ type OperatorConfiguration struct { EnableExperimentalFeatures *bool `json:"enableExperimentalFeatures,omitempty"` } +type CleanupCronJobConfig struct { + // Enable determines whether the cleanup cron job is enabled. + // Defaults to false if not specified. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty"` + // Image specifies the container image to use for the cleanup cron job. + // If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + // +kubebuilder:validation:Optional + Image string `json:"image,omitempty"` + // RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + // For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + // Defaults to 2592000 seconds (30 days) if not specified. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default:=2592000 + // +kubebuilder:validation:Optional + RetainTime *int32 `json:"retainTime,omitempty"` + // DryRun determines whether the cleanup cron job should be run in dry-run mode. + // If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + // Defaults to false if not specified. + // +kubebuilder:validation:Optional + DryRun *bool `json:"dryRun,omitempty"` + // CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + // This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + // The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + // The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + // +kubebuilder:default:=devworkspace-pruner + // +kubebuilder:validation:Optional + CronJobScript string `json:"cronJobScript,omitempty"` +} + type RoutingConfig struct { // DefaultRoutingClass specifies the routingClass to be used when a DevWorkspace // specifies an empty `.spec.routingClass`. Supported routingClasses can be defined @@ -161,6 +191,8 @@ type WorkspaceConfig struct { PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // RuntimeClassName defines the spec.runtimeClassName for DevWorkspace pods created by the DevWorkspace Operator. RuntimeClassName *string `json:"runtimeClassName,omitempty"` + // CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. + CleanupCronJob *CleanupCronJobConfig `json:"cleanupCronJob,omitempty"` } type WebhookConfig struct { diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index 687b2aa2a..9381d38b4 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -46,6 +46,36 @@ func (in Attributes) DeepCopy() Attributes { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupCronJobConfig) DeepCopyInto(out *CleanupCronJobConfig) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.RetainTime != nil { + in, out := &in.RetainTime, &out.RetainTime + *out = new(int32) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupCronJobConfig. +func (in *CleanupCronJobConfig) DeepCopy() *CleanupCronJobConfig { + if in == nil { + return nil + } + out := new(CleanupCronJobConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigmapReference) DeepCopyInto(out *ConfigmapReference) { *out = *in @@ -748,6 +778,11 @@ func (in *WorkspaceConfig) DeepCopyInto(out *WorkspaceConfig) { *out = new(string) **out = **in } + if in.CleanupCronJob != nil { + in, out := &in.CleanupCronJob, &out.CleanupCronJob + *out = new(CleanupCronJobConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceConfig. diff --git a/controllers/workspace/pruner_controller.go b/controllers/workspace/pruner_controller.go new file mode 100644 index 000000000..d2e03380c --- /dev/null +++ b/controllers/workspace/pruner_controller.go @@ -0,0 +1,438 @@ +// Copyright (c) 2019-2024 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "fmt" + "strconv" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/go-logr/logr" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PrunerReconciler ensures that the pruning CronJob and ConfigMap are created. +type PrunerReconciler struct { + client.Client + Scheme *runtime.Scheme + Log logr.Logger +} + +const ( + PrunerConfigMap = "devworkspace-pruner" + PrunerCronJobName = "devworkspace-pruner" + PrunerImage = "image-registry.openshift-image-registry.svc:5000/openshift/cli:latest" + PrunerRetainTime = "2592000" + PrunerClusterRoleBindingName = "devworkspace-pruner" + PrunerClusterRoleName = "devworkspace-pruner" + PrunerSchedule = "0 0 1 * *" + PrunerScriptKey = "devworkspace-pruner" + PrunerScriptVolume = "script" + PrunerServiceAccountName = "devworkspace-pruner" +) + +// Reconcile ensures the CronJob and ConfigMap are in place. + +// +kubebuilder:rbac:groups="",resources=devworkspaceoperatorconfigs,verbs=get;list;watch +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete + +func (r *PrunerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("pruner", req.NamespacedName) + log.Info("Reconciling DevWorkspace pruner resources") + + config := &controllerv1alpha1.DevWorkspaceOperatorConfig{} + if err := r.Get(ctx, req.NamespacedName, config); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Suspend the CronJob if the feature is disabled + if config.Config.Workspace == nil || config.Config.Workspace.CleanupCronJob == nil || config.Config.Workspace.CleanupCronJob.Enable == nil || !*config.Config.Workspace.CleanupCronJob.Enable { + if err := r.suspendCronJob(ctx, req.Namespace); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + // Ensure the default ConfigMap is present + if err := r.ensureConfigMap(ctx, req.Namespace); err != nil { + return ctrl.Result{}, err + } + + // Ensure the user ConfigMap is present (if configured) + if err := r.ensureCustomConfigMap(ctx, req.Namespace, config.Config.Workspace.CleanupCronJob); err != nil { + return ctrl.Result{}, err + } + + // Ensure the ServiceAccount + if err := r.ensureServiceAccount(ctx, req.Namespace); err != nil { + return ctrl.Result{}, err + } + // Ensure the Role + if err := r.ensureClusterRole(ctx, req.Namespace); err != nil { + return ctrl.Result{}, err + } + // Ensure the RoleBinding + if err := r.ensureClusterRoleBinding(ctx, req.Namespace); err != nil { + return ctrl.Result{}, err + } + + // Reconcile CronJob + if err := r.ensureCronJob(ctx, req.Namespace, config.Config.Workspace.CleanupCronJob); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *PrunerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&controllerv1alpha1.DevWorkspaceOperatorConfig{}). + Complete(r) +} + +func (r *PrunerReconciler) suspendCronJob(ctx context.Context, namespace string) error { + var cronJob batchv1.CronJob + if err := r.Get(ctx, client.ObjectKey{Namespace: namespace, Name: PrunerCronJobName}, &cronJob); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + if cronJob.Spec.Suspend == nil || !*cronJob.Spec.Suspend { + cronJob.Spec.Suspend = pointer.Bool(true) + if err := r.Update(ctx, &cronJob); err != nil { + return err + } + } + + return nil +} + +func (r *PrunerReconciler) ensureServiceAccount(ctx context.Context, namespace string) error { + sa := &corev1.ServiceAccount{ + ObjectMeta: meta.ObjectMeta{ + Name: PrunerServiceAccountName, + Namespace: namespace, + Labels: resourceLabels(), + }, + } + if err := r.Create(ctx, sa); err != nil { + if apierrors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +func (r *PrunerReconciler) ensureClusterRole(ctx context.Context, namespace string) error { + role := &rbacv1.ClusterRole{ + ObjectMeta: meta.ObjectMeta{ + Name: PrunerClusterRoleName, + Namespace: namespace, + Labels: resourceLabels(), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"workspace.devfile.io"}, + Resources: []string{"devworkspaces"}, + Verbs: []string{"get", "create", "delete", "list", "update", "patch", "watch"}, + }, + }, + } + if err := r.Create(ctx, role); err != nil { + if apierrors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (r *PrunerReconciler) ensureClusterRoleBinding(ctx context.Context, namespace string) error { + roleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: meta.ObjectMeta{ + Name: PrunerClusterRoleBindingName, + Namespace: namespace, + Labels: resourceLabels(), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: PrunerClusterRoleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: PrunerServiceAccountName, + Namespace: namespace, + }, + }, + } + if err := r.Create(ctx, roleBinding); err != nil { + if apierrors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (r *PrunerReconciler) ensureConfigMap(ctx context.Context, namespace string) error { + cm := &corev1.ConfigMap{} + if err := r.Get(ctx, client.ObjectKey{Namespace: namespace, Name: PrunerConfigMap}, cm); err != nil { + if apierrors.IsNotFound(err) { + return r.Create(ctx, prunerDefaultConfigMap(namespace)) + } + return err + } + return nil +} + +func (r *PrunerReconciler) ensureCustomConfigMap(ctx context.Context, namespace string, config *controllerv1alpha1.CleanupCronJobConfig) error { + if config == nil || config.CronJobScript == "" { + return nil + } + + cm := &corev1.ConfigMap{} + if err := r.Get(ctx, client.ObjectKey{Namespace: namespace, Name: config.CronJobScript}, cm); err != nil { + return err + } + + // check if PrunerScriptKey exists + if _, ok := cm.Data[PrunerScriptKey]; !ok { + return fmt.Errorf("ConfigMap %s does not contain key %s", config.CronJobScript, PrunerScriptKey) + } + + return nil +} + +func (r *PrunerReconciler) ensureCronJob(ctx context.Context, namespace string, config *controllerv1alpha1.CleanupCronJobConfig) error { + suspend := true + if config != nil && config.Enable != nil { + suspend = !*config.Enable + } + image := PrunerImage + if config != nil && config.Image != "" { + image = config.Image + } + retainTime := PrunerRetainTime + if config != nil && config.RetainTime != nil && *config.RetainTime != 0 { + retainTime = strconv.FormatInt(int64(*config.RetainTime), 10) + } + dryRun := false + if config != nil && config.DryRun != nil { + dryRun = *config.DryRun + } + configMapName := PrunerConfigMap + if config != nil && config.CronJobScript != "" { + configMapName = config.CronJobScript + } + + cronJob := &batchv1.CronJob{} + if err := r.Get(ctx, client.ObjectKey{Namespace: namespace, Name: PrunerCronJobName}, cronJob); err != nil { + if apierrors.IsNotFound(err) { + return r.Create(ctx, prunerCronJob(namespace, suspend, image, retainTime, configMapName)) + } + return err + } + + needUpdate := false + // suspend + if cronJob.Spec.Suspend != nil && *cronJob.Spec.Suspend != suspend { + cronJob.Spec.Suspend = &suspend + needUpdate = true + } + // image + if cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image != image { + cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = image + needUpdate = true + } + // envs + containerEnvs := &cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env + envs := map[string]string{ + "RETAIN_TIME": retainTime, + "DRY_RUN": strconv.FormatBool(dryRun), + } + for name, val := range envs { + found := false + + for i, env := range *containerEnvs { + if env.Name == name { + found = true + if env.Value != val { + (*containerEnvs)[i].Value = val + needUpdate = true + break + } + } + } + + if !found { + *containerEnvs = append(*containerEnvs, corev1.EnvVar{Name: name, Value: val}) + needUpdate = true + } + } + // configMap + if cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name != configMapName { + cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name = configMapName + needUpdate = true + } + if needUpdate { + if err := r.Update(ctx, cronJob); err != nil { + return err + } + } + + return nil +} + +func prunerDefaultConfigMap(namespace string) *corev1.ConfigMap { + labels := resourceLabels() + labels[constants.DevWorkspaceWatchConfigMapLabel] = "true" + + return &corev1.ConfigMap{ + ObjectMeta: meta.ObjectMeta{ + Name: PrunerConfigMap, + Namespace: namespace, + Labels: labels, + }, + Data: map[string]string{ + PrunerScriptKey: `#!/usr/bin/env bash +current_time=$(date +%s) +echo "Current time: ${current_time}" +echo "RETAIN_TIME: ${RETAIN_TIME}" +for namespace in $(oc get namespaces -l app.kubernetes.io/component=workspaces-namespace -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') +do + for workspace in $(oc get devworkspaces -n ${namespace} -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') + do + last_start=$(date -d$(oc get devworkspace ${workspace} -n ${namespace} -o go-template='{{range .status.conditions}}{{if eq .type "Started"}}{{.lastTransitionTime}}{{end}}{{end}}') +%s) + workspace_age=$(( ${current_time} - ${last_start} )) + started=$(oc get devworkspace ${workspace} -n ${namespace} -o go-template='{{.spec.started}}') + if [[ "$started" != "true" ]] && [[ ${workspace_age} -gt ${RETAIN_TIME} ]] + then + echo "Removing workspace: ${workspace} in ${namespace}" + oc delete devworkspace ${workspace} -n ${namespace} + fi + done +done +`, + }, + } +} + +func prunerCronJob(namespace string, suspend bool, image, retainTime, configMapName string) *batchv1.CronJob { + labels := resourceLabels() + labels[constants.DevWorkspaceWatchCronJobLabel] = "true" + + return &batchv1.CronJob{ + ObjectMeta: meta.ObjectMeta{ + Name: PrunerCronJobName, + Namespace: namespace, + Labels: labels, + }, + Spec: batchv1.CronJobSpec{ + Schedule: PrunerSchedule, + SuccessfulJobsHistoryLimit: pointer.Int32(3), + FailedJobsHistoryLimit: pointer.Int32(3), + ConcurrencyPolicy: batchv1.ForbidConcurrent, + Suspend: pointer.Bool(suspend), + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: PrunerServiceAccountName, + Volumes: []corev1.Volume{ + { + Name: PrunerScriptVolume, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapName, + }, + DefaultMode: pointer.Int32(0555), + Items: []corev1.KeyToPath{ + { + Key: PrunerScriptKey, + Path: "devworkspace-pruner.sh", + }, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "openshift-cli", + Image: image, + Env: []corev1.EnvVar{ + { + Name: "RETAIN_TIME", + Value: retainTime, + }, + }, + Command: []string{"/script/devworkspace-pruner.sh"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("64Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("64Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/script", + Name: PrunerScriptVolume, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceLabels() map[string]string { + labels := constants.ControllerAppLabels() + labels["app.kubernetes.io/name"] = "devworkspace-pruner" + return labels +} diff --git a/controllers/workspace/pruner_controller_test.go b/controllers/workspace/pruner_controller_test.go new file mode 100644 index 000000000..b28e066ce --- /dev/null +++ b/controllers/workspace/pruner_controller_test.go @@ -0,0 +1,439 @@ +// Copyright (c) 2019-2024 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers_test + +import ( + "fmt" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + workspacecontroller "github.com/devfile/devworkspace-operator/controllers/workspace" + "github.com/devfile/devworkspace-operator/pkg/constants" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Pruner Controller", func() { + const ( + testPrunerConfigMapName = "test-pruner-configmap" + ) + + Context("Pruner Resources creation", func() { + + AfterEach(func() { + // Clean up resources created by the test + deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + deleteConfigMap(workspacecontroller.PrunerConfigMap) + deleteConfigMap(testPrunerConfigMapName) + deleteCronJob(workspacecontroller.PrunerCronJobName) + deleteServiceAccount(workspacecontroller.PrunerServiceAccountName) + deleteClusterRole(workspacecontroller.PrunerClusterRoleName) + deleteClusterRoleBinding(workspacecontroller.PrunerClusterRoleBindingName) + }) + + It("Creates CronJob and ConfigMap when CleanupCronJob is enabled in DevWorkspaceOperatorConfig", func() { + // Create a DevWorkspaceOperatorConfig with CleanupCronJob enabled + enable := true + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-operator-config", + Namespace: testNamespace, + }, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + CleanupCronJob: &controllerv1alpha1.CleanupCronJobConfig{ + Enable: &enable, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, dwoc)).Should(Succeed()) + defer deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + + By("Checking that default ConfigMap is created") + cm := &corev1.ConfigMap{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerConfigMap, testNamespace), cm) + }, timeout, interval).Should(Succeed(), "Default ConfigMap should be created") + + By("Checking that CronJob is created") + cronJob := &batchv1.CronJob{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + }, timeout, interval).Should(Succeed(), "CronJob should be created") + + By("Checking that ServiceAccount is created") + sa := &corev1.ServiceAccount{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerServiceAccountName, testNamespace), sa) + }, timeout, interval).Should(Succeed(), "ServiceAccount should be created") + + By("Checking that ClusterRole is created") + cr := &rbacv1.ClusterRole{} + Eventually(func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: workspacecontroller.PrunerClusterRoleName}, cr) + }, timeout, interval).Should(Succeed(), "ClusterRole should be created") + + By("Checking that ClusterRoleBinding is created") + crb := &rbacv1.ClusterRoleBinding{} + Eventually(func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: workspacecontroller.PrunerClusterRoleBindingName}, crb) + }, timeout, interval).Should(Succeed(), "ClusterRoleBinding should be created") + }) + + It("Creates CronJob with custom image and retainTime and custom configmap when provided in DevWorkspaceOperatorConfig", func() { + // Create a DevWorkspaceOperatorConfig with CleanupCronJob enabled and custom image, retainTime and configmap + enabled := true + customImage := "test-image" + customRetainTime := int32(12345) + customConfigMapName := testPrunerConfigMapName + + // create custom configmap + customConfigMapLabels := constants.ControllerAppLabels() + customConfigMapLabels["app.kubernetes.io/name"] = "devworkspace-pruner" + customConfigMapLabels[constants.DevWorkspaceWatchConfigMapLabel] = "true" + customConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: customConfigMapName, + Namespace: testNamespace, + Labels: customConfigMapLabels, + }, + Data: map[string]string{ + "devworkspace-pruner": "test-script", + }, + } + Expect(k8sClient.Create(ctx, customConfigMap)).Should(Succeed()) + // defer deleteConfigMap(customConfigMapName) + + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-operator-config", + Namespace: testNamespace, + }, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + CleanupCronJob: &controllerv1alpha1.CleanupCronJobConfig{ + Enable: &enabled, + Image: customImage, + RetainTime: &customRetainTime, + CronJobScript: customConfigMapName, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, dwoc)).Should(Succeed()) + defer deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + + By("Checking that default ConfigMap is created") + cm := &corev1.ConfigMap{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerConfigMap, testNamespace), cm) + }, timeout, interval).Should(Succeed(), "Default ConfigMap should be created") + + By("Checking that CronJob is created with custom parameters") + cronJob := &batchv1.CronJob{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + }, timeout, interval).Should(Succeed(), "CronJob should be created") + + Expect(cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image).Should(Equal(customImage), "CronJob should have custom image") + Expect(cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name).Should(Equal(customConfigMapName), "CronJob should have custom configmap") + + // Check if RETAIN_TIME env var has correct value + found := false + for _, env := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { + if env.Name == "RETAIN_TIME" { + Expect(env.Value).Should(Equal(fmt.Sprintf("%d", customRetainTime)), "CronJob should have custom retainTime") + found = true + break + } + } + Expect(found).Should(BeTrue(), "CronJob should have RETAIN_TIME env var") + }) + + It("Does not create CronJob and ConfigMap when CleanupCronJob is disabled in DevWorkspaceOperatorConfig", func() { + // Create a DevWorkspaceOperatorConfig with CleanupCronJob disabled + disabled := false + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-operator-config", + Namespace: testNamespace, + }, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + CleanupCronJob: &controllerv1alpha1.CleanupCronJobConfig{ + Enable: &disabled, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, dwoc)).Should(Succeed()) + defer deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + + By("Checking that ConfigMap is not created") + cm := &corev1.ConfigMap{} + Eventually(func() bool { + err := k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerConfigMap, testNamespace), cm) + return k8sErrors.IsNotFound(err) + }, timeout, interval).Should(BeTrue(), "ConfigMap should not be created") + + By("Checking that CronJob is not created") + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + err := k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + return k8sErrors.IsNotFound(err) + }, timeout, interval).Should(BeTrue(), "CronJob should not be created") + + By("Checking that ServiceAccount is not created") + sa := &corev1.ServiceAccount{} + Eventually(func() bool { + err := k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerServiceAccountName, testNamespace), sa) + return k8sErrors.IsNotFound(err) + }, timeout, interval).Should(BeTrue(), "ServiceAccount should not be created") + + By("Checking that ClusterRole is not created") + cr := &rbacv1.ClusterRole{} + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: workspacecontroller.PrunerClusterRoleName}, cr) + return k8sErrors.IsNotFound(err) + }, timeout, interval).Should(BeTrue(), "ClusterRole should not be created") + + By("Checking that ClusterRoleBinding is not created") + crb := &rbacv1.ClusterRoleBinding{} + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: workspacecontroller.PrunerClusterRoleBindingName}, crb) + return k8sErrors.IsNotFound(err) + }, timeout, interval).Should(BeTrue(), "ClusterRoleBinding should not be created") + }) + + It("Updates CronJob when CleanupCronJob parameters are updated in DevWorkspaceOperatorConfig", func() { + // Create a DevWorkspaceOperatorConfig with CleanupCronJob enabled + enabled := true + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-operator-config", + Namespace: testNamespace, + }, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + CleanupCronJob: &controllerv1alpha1.CleanupCronJobConfig{ + Enable: &enabled, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, dwoc)).Should(Succeed()) + defer deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + + By("Checking that default ConfigMap is created") + cm := &corev1.ConfigMap{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerConfigMap, testNamespace), cm) + }, timeout, interval).Should(Succeed(), "Default ConfigMap should be created") + + By("Checking that CronJob is created") + cronJob := &batchv1.CronJob{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + }, timeout, interval).Should(Succeed(), "CronJob should be created") + + // Update the DevWorkspaceOperatorConfig with new values + customImage := "test-image" + customDryRun := true + customRetainTime := int32(12345) + customConfigMapName := testPrunerConfigMapName + + // create custom configmap + customConfigMapLabels := constants.ControllerAppLabels() + customConfigMapLabels["app.kubernetes.io/name"] = "devworkspace-pruner" + customConfigMapLabels[constants.DevWorkspaceWatchConfigMapLabel] = "true" + customConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: customConfigMapName, + Namespace: testNamespace, + Labels: customConfigMapLabels, + }, + Data: map[string]string{ + "devworkspace-pruner": "test-script", + }, + } + Expect(k8sClient.Create(ctx, customConfigMap)).Should(Succeed()) + defer deleteConfigMap(customConfigMapName) + + dwoc.Config.Workspace.CleanupCronJob.Image = customImage + dwoc.Config.Workspace.CleanupCronJob.RetainTime = &customRetainTime + dwoc.Config.Workspace.CleanupCronJob.DryRun = &customDryRun + dwoc.Config.Workspace.CleanupCronJob.CronJobScript = customConfigMapName + + Expect(k8sClient.Update(ctx, dwoc)).Should(Succeed()) + + By("Checking that CronJob is updated with new parameters") + Eventually(func() error { + err := k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + if err != nil { + return err + } + + if cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image != customImage { + return fmt.Errorf("CronJob image not updated, expected %s, got %s", customImage, cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image) + } + + if cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name != customConfigMapName { + return fmt.Errorf("CronJob configmap not updated, expected %s, got %s", customConfigMapName, cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name) + } + + // Check if RETAIN_TIME env var has correct value + foundRetainTime := false + for _, env := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { + if env.Name == "RETAIN_TIME" { + if env.Value != fmt.Sprintf("%d", customRetainTime) { + return fmt.Errorf("CronJob retainTime not updated, expected %d, got %s", customRetainTime, env.Value) + } + foundRetainTime = true + break + } + } + if !foundRetainTime { + return fmt.Errorf("CronJob should have RETAIN_TIME env var") + } + + // Check if DRY_RUN env var has correct value + foundDryRun := false + for _, env := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { + if env.Name == "DRY_RUN" { + if env.Value != "true" { + return fmt.Errorf("CronJob dryRun not updated, expected true, got %s", env.Value) + } + foundDryRun = true + break + } + } + if !foundDryRun { + return fmt.Errorf("CronJob should have DRY_RUN env var") + } + + return nil + }, timeout, interval).Should(Succeed(), "CronJob should be updated with new parameters") + }) + + It("Suspends CronJob when CleanupCronJob is disabled in DevWorkspaceOperatorConfig", func() { + // Create a DevWorkspaceOperatorConfig with CleanupCronJob enable + enable := true + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-operator-config", + Namespace: testNamespace, + }, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + CleanupCronJob: &controllerv1alpha1.CleanupCronJobConfig{ + Enable: &enable, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, dwoc)).Should(Succeed()) + defer deleteDevWorkspaceOperatorConfig("devworkspace-operator-config") + + By("Checking that default ConfigMap is created") + cm := &corev1.ConfigMap{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerConfigMap, testNamespace), cm) + }, timeout, interval).Should(Succeed(), "Default ConfigMap should be created") + + By("Checking that CronJob is created") + cronJob := &batchv1.CronJob{} + Eventually(func() error { + return k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + }, timeout, interval).Should(Succeed(), "CronJob should be created") + + // Update the DevWorkspaceOperatorConfig to disable CleanupCronJob + disabled := false + dwoc.Config.Workspace.CleanupCronJob.Enable = &disabled + Expect(k8sClient.Update(ctx, dwoc)).Should(Succeed()) + + By("Checking that CronJob is suspended") + Eventually(func() bool { + err := k8sClient.Get(ctx, namespacedName(workspacecontroller.PrunerCronJobName, testNamespace), cronJob) + if err != nil { + return false + } + return *cronJob.Spec.Suspend + }, timeout, interval).Should(BeTrue(), "CronJob should be suspended") + }) + }) +}) + +func deleteDevWorkspaceOperatorConfig(name string) { + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + } + _ = k8sClient.Delete(ctx, dwoc) +} + +func deleteConfigMap(name string) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + } + _ = k8sClient.Delete(ctx, cm) +} + +func deleteCronJob(name string) { + cronJob := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + } + _ = k8sClient.Delete(ctx, cronJob) +} + +func deleteServiceAccount(name string) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + } + _ = k8sClient.Delete(ctx, sa) +} + +func deleteClusterRole(name string) { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _ = k8sClient.Delete(ctx, cr) +} + +func deleteClusterRoleBinding(name string) { + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _ = k8sClient.Delete(ctx, crb) +} diff --git a/controllers/workspace/suite_test.go b/controllers/workspace/suite_test.go index b2373017f..02d655482 100644 --- a/controllers/workspace/suite_test.go +++ b/controllers/workspace/suite_test.go @@ -141,6 +141,13 @@ var _ = BeforeSuite(func() { }).SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred()) + err = (&workspacecontroller.PrunerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("DevWorkspacePruner"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + // Set HTTP client to fail all requests by default; tests that require HTTP must set this up directly workspacecontroller.SetupHttpClientsForTesting(getBasicTestHttpClient()) diff --git a/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml b/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml index 705998fb8..d615fc77c 100644 --- a/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml +++ b/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml @@ -185,6 +185,43 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to diff --git a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml index a3e65beaf..b5029d6e4 100644 --- a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml +++ b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml @@ -83,6 +83,18 @@ spec: - subjectaccessreviews verbs: - create + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -104,6 +116,14 @@ spec: - serviceaccounts verbs: - '*' + - apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -190,6 +210,18 @@ spec: - subjectaccessreviews verbs: - create + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index d44882e7b..e792111a2 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -186,6 +186,44 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options + for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to @@ -24938,6 +24976,18 @@ metadata: app.kubernetes.io/part-of: devworkspace-operator name: devworkspace-controller-role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -24959,6 +25009,14 @@ rules: - serviceaccounts verbs: - '*' +- apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -25045,6 +25103,18 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml index e9f2e742f..d0d4474f9 100644 --- a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml @@ -6,6 +6,18 @@ metadata: app.kubernetes.io/part-of: devworkspace-operator name: devworkspace-controller-role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -27,6 +39,14 @@ rules: - serviceaccounts verbs: - '*' +- apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -113,6 +133,18 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml b/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml index ed68ed014..bb37c8468 100644 --- a/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml @@ -186,6 +186,44 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options + for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 58edcf702..9c05d0ae9 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -186,6 +186,44 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options + for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to @@ -24938,6 +24976,18 @@ metadata: app.kubernetes.io/part-of: devworkspace-operator name: devworkspace-controller-role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -24959,6 +25009,14 @@ rules: - serviceaccounts verbs: - '*' +- apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -25045,6 +25103,18 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml index e9f2e742f..d0d4474f9 100644 --- a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml @@ -6,6 +6,18 @@ metadata: app.kubernetes.io/part-of: devworkspace-operator name: devworkspace-controller-role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -27,6 +39,14 @@ rules: - serviceaccounts verbs: - '*' +- apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -113,6 +133,18 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml b/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml index ed68ed014..bb37c8468 100644 --- a/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml +++ b/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml @@ -186,6 +186,44 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options + for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to diff --git a/deploy/templates/components/rbac/role.yaml b/deploy/templates/components/rbac/role.yaml index 73d0895e2..4f0243357 100644 --- a/deploy/templates/components/rbac/role.yaml +++ b/deploy/templates/components/rbac/role.yaml @@ -4,6 +4,18 @@ kind: ClusterRole metadata: name: role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resourceNames: @@ -25,6 +37,14 @@ rules: - serviceaccounts verbs: - '*' +- apiGroups: + - "" + resources: + - devworkspaceoperatorconfigs + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -111,6 +131,18 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml b/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml index cbc5e9fe2..40e2748cd 100644 --- a/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml +++ b/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml @@ -184,6 +184,44 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + cleanupCronJob: + description: CleanupCronJobConfig defines configuration options + for a cron job that automatically cleans up stale DevWorkspaces. + properties: + cronJobScript: + default: devworkspace-pruner + description: |- + CronJobScript specifies the name of a ConfigMap containing the script to be executed by the cleanup cron job. + This ConfigMap must reside in the same namespace as the DevWorkspace Operator. + The script is responsible for identifying and deleting stale DevWorkspaces (based on the `retainTime`). + The script must be idempotent. If not specified, defaults to `devworkspace-pruner`. + type: string + dryRun: + description: |- + DryRun determines whether the cleanup cron job should be run in dry-run mode. + If set to true, the cron job will not delete any DevWorkspaces, but will log the DevWorkspaces that would have been deleted. + Defaults to false if not specified. + type: boolean + enable: + description: |- + Enable determines whether the cleanup cron job is enabled. + Defaults to false if not specified. + type: boolean + image: + description: |- + Image specifies the container image to use for the cleanup cron job. + If not specified, a suitable default image for the Kubernetes/OpenShift environment will be used. + type: string + retainTime: + default: 2592000 + description: |- + RetainTime specifies the minimum time (in seconds) since a DevWorkspace was last started before it is considered stale and eligible for cleanup. + For example, a value of 2592000 (30 days) would mean that any DevWorkspace that has not been started in the last 30 days will be deleted. + Defaults to 2592000 seconds (30 days) if not specified. + format: int32 + minimum: 0 + type: integer + type: object cleanupOnStop: description: |- CleanupOnStop governs how the Operator handles stopped DevWorkspaces. If set to diff --git a/main.go b/main.go index 2984f4f1f..acfd171fe 100644 --- a/main.go +++ b/main.go @@ -169,6 +169,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "DevWorkspace") os.Exit(1) } + if err = (&workspacecontroller.PrunerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("DevWorkspacePruner"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DevWorkspacePruner") + os.Exit(1) + } // +kubebuilder:scaffold:builder // Get a config to talk to the apiserver diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index a29e6d868..9521d0122 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -46,6 +46,10 @@ func GetCacheFunc() (cache.NewCacheFunc, error) { if err != nil { return nil, err } + cronJobObjectSelector, err := labels.Parse(fmt.Sprintf("%s=true", constants.DevWorkspaceWatchCronJobLabel)) + if err != nil { + return nil, err + } rbacObjectSelector, err := labels.Parse("controller.devfile.io/workspace-rbac=true") if err != nil { return nil, err @@ -70,6 +74,9 @@ func GetCacheFunc() (cache.NewCacheFunc, error) { &networkingv1.Ingress{}: { Label: devworkspaceObjectSelector, }, + &batchv1.CronJob{}: { + Label: cronJobObjectSelector, + }, &corev1.ConfigMap{}: { Label: configmapObjectSelector, }, diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 5e1c2eba0..e52105903 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -76,6 +76,13 @@ var defaultConfig = &v1alpha1.OperatorConfiguration{ corev1.ResourceMemory: resource.MustParse("64Mi"), }, }, + CleanupCronJob: &v1alpha1.CleanupCronJobConfig{ + Enable: pointer.Bool(false), + Image: "image-registry.openshift-image-registry.svc:5000/openshift/cli:latest", + DryRun: pointer.Bool(false), + RetainTime: pointer.Int32(2592000), + CronJobScript: "devworkspace-pruner", + }, }, } diff --git a/pkg/config/sync.go b/pkg/config/sync.go index 483c820ec..daff903f0 100644 --- a/pkg/config/sync.go +++ b/pkg/config/sync.go @@ -410,6 +410,27 @@ func mergeConfig(from, to *controller.OperatorConfiguration) { to.Workspace.PodAnnotations[key] = value } } + + if from.Workspace.CleanupCronJob != nil { + if to.Workspace.CleanupCronJob == nil { + to.Workspace.CleanupCronJob = &controller.CleanupCronJobConfig{} + } + if from.Workspace.CleanupCronJob.Enable != nil { + to.Workspace.CleanupCronJob.Enable = from.Workspace.CleanupCronJob.Enable + } + if from.Workspace.CleanupCronJob.Image != "" { + to.Workspace.CleanupCronJob.Image = from.Workspace.CleanupCronJob.Image + } + if from.Workspace.CleanupCronJob.DryRun != nil { + to.Workspace.CleanupCronJob.DryRun = from.Workspace.CleanupCronJob.DryRun + } + if from.Workspace.CleanupCronJob.RetainTime != nil { + to.Workspace.CleanupCronJob.RetainTime = from.Workspace.CleanupCronJob.RetainTime + } + if from.Workspace.CleanupCronJob.CronJobScript != "" { + to.Workspace.CleanupCronJob.CronJobScript = from.Workspace.CleanupCronJob.CronJobScript + } + } } } @@ -638,6 +659,23 @@ func GetCurrentConfigString(currConfig *controller.OperatorConfiguration) string if !reflect.DeepEqual(workspace.PodAnnotations, defaultConfig.Workspace.PodAnnotations) { config = append(config, "workspace.podAnnotations is set") } + if workspace.CleanupCronJob != nil { + if workspace.CleanupCronJob.Enable != nil && *workspace.CleanupCronJob.Enable != *defaultConfig.Workspace.CleanupCronJob.Enable { + config = append(config, fmt.Sprintf("workspace.cleanupCronJob.enable=%t", *workspace.CleanupCronJob.Enable)) + } + if workspace.CleanupCronJob.Image != defaultConfig.Workspace.CleanupCronJob.Image { + config = append(config, fmt.Sprintf("workspace.cleanupCronJob.image=%s", workspace.CleanupCronJob.Image)) + } + if workspace.CleanupCronJob.DryRun != nil && *workspace.CleanupCronJob.DryRun != *defaultConfig.Workspace.CleanupCronJob.DryRun { + config = append(config, fmt.Sprintf("workspace.cleanupCronJob.dryRun=%t", *workspace.CleanupCronJob.DryRun)) + } + if workspace.CleanupCronJob.RetainTime != nil && *workspace.CleanupCronJob.RetainTime != *defaultConfig.Workspace.CleanupCronJob.RetainTime { + config = append(config, fmt.Sprintf("workspace.cleanupCronJob.retainTime=%d", *workspace.CleanupCronJob.RetainTime)) + } + if workspace.CleanupCronJob.CronJobScript != defaultConfig.Workspace.CleanupCronJob.CronJobScript { + config = append(config, fmt.Sprintf("workspace.cleanupCronJob.cronJobScript=%s", workspace.CleanupCronJob.CronJobScript)) + } + } } if currConfig.EnableExperimentalFeatures != nil && *currConfig.EnableExperimentalFeatures { config = append(config, "enableExperimentalFeatures=true") diff --git a/pkg/constants/metadata.go b/pkg/constants/metadata.go index 20ffd09a6..6715e15d6 100644 --- a/pkg/constants/metadata.go +++ b/pkg/constants/metadata.go @@ -34,6 +34,10 @@ const ( // DevWorkspaceNameLabel is the label key to store workspace name DevWorkspaceNameLabel = "controller.devfile.io/devworkspace_name" + // DevWorkspaceWatchCronJobLabel marks a cronjob so that it is watched by the controller. This label is required on all + // cronjobs that should be seen by the controller + DevWorkspaceWatchCronJobLabel = "controller.devfile.io/watch-cronjob" + // DevWorkspaceWatchConfigMapLabel marks a configmap so that it is watched by the controller. This label is required on all // configmaps that should be seen by the controller DevWorkspaceWatchConfigMapLabel = "controller.devfile.io/watch-configmap"