From 13303c65c2eeb2ccd5983fd02bb6c84975c31ebb Mon Sep 17 00:00:00 2001 From: greg pereira Date: Tue, 2 Apr 2024 13:06:17 -0700 Subject: [PATCH] swapping cr and crbs to r and rbs Signed-off-by: greg pereira --- api/v1alpha1/common.go | 2 +- .../rhtas-operator.clusterserviceversion.yaml | 4 +- .../manifests/rhtas.redhat.com_fulcios.yaml | 2 +- bundle/manifests/rhtas.redhat.com_rekors.yaml | 2 +- .../rhtas.redhat.com_securesigns.yaml | 4 +- .../crd/bases/rhtas.redhat.com_fulcios.yaml | 2 +- config/crd/bases/rhtas.redhat.com_rekors.yaml | 2 +- .../bases/rhtas.redhat.com_securesigns.yaml | 4 +- config/rbac/role.yaml | 3 + config/samples/rhtas_v1alpha1_securesign.yaml | 4 +- controllers/securesign/actions/rbac.go | 216 ++++++++++++++---- .../securesign/securesign_controller.go | 25 +- 12 files changed, 209 insertions(+), 61 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 4eeec6936..3bb8edbd7 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -17,7 +17,7 @@ type ExternalAccess struct { type MonitoringConfig struct { // If true, the Operator will create monitoring resources //+kubebuilder:validation:XValidation:rule=(self || !oldSelf),message=Feature cannot be disabled - //+kubebuilder:default:=false + //+kubebuilder:default:=true Enabled bool `json:"enabled"` } diff --git a/bundle/manifests/rhtas-operator.clusterserviceversion.yaml b/bundle/manifests/rhtas-operator.clusterserviceversion.yaml index 95dc610a7..ad4ce29b3 100644 --- a/bundle/manifests/rhtas-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhtas-operator.clusterserviceversion.yaml @@ -111,7 +111,7 @@ metadata: "enabled": true }, "monitoring": { - "enabled": false + "enabled": true } }, "rekor": { @@ -119,7 +119,7 @@ metadata: "enabled": true }, "monitoring": { - "enabled": false + "enabled": true } }, "trillian": { diff --git a/bundle/manifests/rhtas.redhat.com_fulcios.yaml b/bundle/manifests/rhtas.redhat.com_fulcios.yaml index ac60e7c30..c96f11699 100644 --- a/bundle/manifests/rhtas.redhat.com_fulcios.yaml +++ b/bundle/manifests/rhtas.redhat.com_fulcios.yaml @@ -241,7 +241,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/bundle/manifests/rhtas.redhat.com_rekors.yaml b/bundle/manifests/rhtas.redhat.com_rekors.yaml index 5c2781fbd..c17d9f87c 100644 --- a/bundle/manifests/rhtas.redhat.com_rekors.yaml +++ b/bundle/manifests/rhtas.redhat.com_rekors.yaml @@ -91,7 +91,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/bundle/manifests/rhtas.redhat.com_securesigns.yaml b/bundle/manifests/rhtas.redhat.com_securesigns.yaml index 4125e1f7a..69bf7e4bd 100644 --- a/bundle/manifests/rhtas.redhat.com_securesigns.yaml +++ b/bundle/manifests/rhtas.redhat.com_securesigns.yaml @@ -355,7 +355,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean @@ -427,7 +427,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean diff --git a/config/crd/bases/rhtas.redhat.com_fulcios.yaml b/config/crd/bases/rhtas.redhat.com_fulcios.yaml index d0da89e8a..9a198ab4f 100644 --- a/config/crd/bases/rhtas.redhat.com_fulcios.yaml +++ b/config/crd/bases/rhtas.redhat.com_fulcios.yaml @@ -241,7 +241,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/config/crd/bases/rhtas.redhat.com_rekors.yaml b/config/crd/bases/rhtas.redhat.com_rekors.yaml index 36b81800a..3a3dd4614 100644 --- a/config/crd/bases/rhtas.redhat.com_rekors.yaml +++ b/config/crd/bases/rhtas.redhat.com_rekors.yaml @@ -91,7 +91,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/config/crd/bases/rhtas.redhat.com_securesigns.yaml b/config/crd/bases/rhtas.redhat.com_securesigns.yaml index b433ac805..9388ea616 100644 --- a/config/crd/bases/rhtas.redhat.com_securesigns.yaml +++ b/config/crd/bases/rhtas.redhat.com_securesigns.yaml @@ -355,7 +355,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean @@ -427,7 +427,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5e109341e..95ed5a169 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -233,6 +233,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -245,6 +246,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -257,6 +259,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch diff --git a/config/samples/rhtas_v1alpha1_securesign.yaml b/config/samples/rhtas_v1alpha1_securesign.yaml index 021d7f94b..4c31bed96 100644 --- a/config/samples/rhtas_v1alpha1_securesign.yaml +++ b/config/samples/rhtas_v1alpha1_securesign.yaml @@ -12,7 +12,7 @@ spec: externalAccess: enabled: true monitoring: - enabled: false + enabled: true trillian: database: create: true @@ -30,7 +30,7 @@ spec: organizationEmail: jdoe@redhat.com commonName: fulcio.hostname monitoring: - enabled: false + enabled: true tuf: externalAccess: enabled: true diff --git a/controllers/securesign/actions/rbac.go b/controllers/securesign/actions/rbac.go index e8f81b379..6de13a535 100644 --- a/controllers/securesign/actions/rbac.go +++ b/controllers/securesign/actions/rbac.go @@ -9,14 +9,21 @@ import ( "github.com/securesign/operator/controllers/common/action" "github.com/securesign/operator/controllers/common/utils/kubernetes" "github.com/securesign/operator/controllers/constants" + "github.com/securesign/operator/controllers/rekor/actions" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) -const namespacedNamePattern = SegmentRBACName + "-%s" +const ( + namespacedNamePattern = SegmentRBACName + "-%s" + clusterWideNamePattern = SegmentRBACName + "-%s" + "-%s" + OpenshiftMonitoringNS = "openshift-monitoring" + openshiftConsoleNS = "openshift-console" + OpenshiftUWMonitoringNS = "openshift-user-workload-monitoring" +) func NewRBACAction() action.Action[rhtasv1alpha1.Securesign] { return &rbacAction{} @@ -47,87 +54,208 @@ func (i rbacAction) Handle(ctx context.Context, instance *rhtasv1alpha1.Securesi } var err error - labels := constants.LabelsFor(SegmentBackupCronJobName, SegmentBackupCronJobName, instance.Name) + labels := constants.LabelsFor(SegmentBackupJobName, SegmentBackupCronJobName, instance.Name) labels["app.kubernetes.io/instance-namespace"] = instance.Namespace - sa := &v1.ServiceAccount{ + serviceAccount := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: SegmentRBACName, Namespace: instance.Namespace, Labels: labels, }, } - - if err = ctrl.SetControllerReference(instance, sa, i.Client.Scheme()); err != nil { - return i.Failed(fmt.Errorf("could not set controll reference for SA: %w", err)) + if err = controllerutil.SetControllerReference(instance, serviceAccount, i.Client.Scheme()); err != nil { + return i.Failed(fmt.Errorf("could not set controller reference for serviceAccount: %w", err)) + } + if _, err = i.Ensure(ctx, serviceAccount); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create serviceAccount: %w", err), instance) } - // don't re-enqueue for RBAC in any case (except failure) - _, err = i.Ensure(ctx, sa) - if err != nil { + openshiftMonitoringSBJRole := kubernetes.CreateRole( + OpenshiftMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "patch"}, + }, + { + APIGroups: []string{"route.openshift.io"}, + Resources: []string{"routes"}, + Verbs: []string{"get", "list"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftMonitoringSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create SA: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-monitoring role for SBJ: %w", err), instance) } - role := kubernetes.CreateClusterRole(SegmentRBACName, constants.LabelsRHTAS(), []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"secrets"}, - Verbs: []string{"get", "list"}, + openshiftMonitoringSBJRoleBinding := kubernetes.CreateRoleBinding( + OpenshiftMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf(namespacedNamePattern, instance.Namespace), }, - { - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - Verbs: []string{"get", "list"}, - }, - { - APIGroups: []string{"route.openshift.io"}, - Resources: []string{"routes"}, - Verbs: []string{"get", "list"}, - }, - { - APIGroups: []string{"operator.openshift.io"}, - Resources: []string{"consoles"}, - Verbs: []string{"get", "list"}, - }, - }) - - _, err = i.Ensure(ctx, role) + []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, + }) + if _, err = i.Ensure(ctx, openshiftMonitoringSBJRoleBinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-monitoring role binding for SBJ: %w", err), instance) + } - if err != nil { + openshiftUWMonitoringSBJRole := kubernetes.CreateRole( + OpenshiftUWMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get", "list"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftUWMonitoringSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create clusterrole required for SBJ: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-user-workload-monitoring role for SBJ: %w", err), instance) } - rb := kubernetes.CreateClusterRoleBinding(fmt.Sprintf(namespacedNamePattern, instance.Namespace), labels, rbacv1.RoleRef{ - APIGroup: v1.SchemeGroupVersion.Group, - Kind: "ClusterRole", - Name: SegmentRBACName, - }, + openshiftUWMonitoringDBJRolebinding := kubernetes.CreateRoleBinding( + OpenshiftUWMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf(namespacedNamePattern, instance.Namespace), + }, []rbacv1.Subject{ {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, }) + if _, err = i.Ensure(ctx, openshiftUWMonitoringDBJRolebinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-user-workload-monitoring role binding for SBJ: %w", err), instance) + } - _, err = i.Ensure(ctx, rb) + openshiftConsoleSBJRole := kubernetes.CreateClusterRole( + fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRole"), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{"operator.openshift.io"}, + Resources: []string{"consoles"}, + Verbs: []string{"get", "list"}, + ResourceNames: []string{"cluster"}, + }, + { + APIGroups: []string{"route.openshift.io"}, + Resources: []string{"routes"}, + Verbs: []string{"get", "list"}, + ResourceNames: []string{"console"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftConsoleSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-console ClusterRole for SBJ: %w", err), instance) + } - if err != nil { + openshiftConsoleSBJRolebinding := kubernetes.CreateClusterRoleBinding( + fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRoleBinding"), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "ClusterRole", + Name: fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRole"), + }, + []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, + }) + if _, err = i.Ensure(ctx, openshiftConsoleSBJRolebinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create clusterrolebinding required for SBJ: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-console ClusterRoleBinding for SBJ: %w", err), instance) } return i.Continue() diff --git a/controllers/securesign/securesign_controller.go b/controllers/securesign/securesign_controller.go index 3ad9bc964..4e78472b4 100644 --- a/controllers/securesign/securesign_controller.go +++ b/controllers/securesign/securesign_controller.go @@ -55,10 +55,10 @@ type SecuresignReconciler struct { //+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=networking,resources=ingresses,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete;deletecollection //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection //+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete @@ -92,11 +92,28 @@ func (r *SecuresignReconciler) Reconcile(ctx context.Context, req ctrl.Request) } if instance.DeletionTimestamp != nil { - labels := constants.LabelsFor(actions.SegmentBackupCronJobName, actions.SegmentBackupCronJobName, instance.Name) + labels := constants.LabelsFor(actions.SegmentBackupJobName, actions.SegmentBackupCronJobName, instance.Name) labels["app.kubernetes.io/instance-namespace"] = instance.Namespace if err := r.Client.DeleteAllOf(ctx, &v1.ClusterRoleBinding{}, client.MatchingLabels(labels)); err != nil { log.Error(err, "problem with removing clusterRoleBinding resource") } + if err := r.Client.DeleteAllOf(ctx, &v1.ClusterRole{}, client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing ClusterRole resource") + } + + monitoringNS := []string{ + actions.OpenshiftMonitoringNS, + actions.OpenshiftUWMonitoringNS, + } + for _, ns := range monitoringNS { + if err := r.Client.DeleteAllOf(ctx, &v1.Role{}, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing Role resource in %s", ns) + } + if err := r.Client.DeleteAllOf(ctx, &v1.RoleBinding{}, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing RoleBinding resource in %s", ns) + } + } + controllerutil.RemoveFinalizer(target, finalizer) return ctrl.Result{}, r.Update(ctx, target) }