diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 4eeec693..3bb8edbd 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -17,7 +17,7 @@ type ExternalAccess struct { type MonitoringConfig struct { // If true, the Operator will create monitoring resources //+kubebuilder:validation:XValidation:rule=(self || !oldSelf),message=Feature cannot be disabled - //+kubebuilder:default:=false + //+kubebuilder:default:=true Enabled bool `json:"enabled"` } diff --git a/bundle/manifests/rhtas-operator.clusterserviceversion.yaml b/bundle/manifests/rhtas-operator.clusterserviceversion.yaml index 95dc610a..ad4ce29b 100644 --- a/bundle/manifests/rhtas-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhtas-operator.clusterserviceversion.yaml @@ -111,7 +111,7 @@ metadata: "enabled": true }, "monitoring": { - "enabled": false + "enabled": true } }, "rekor": { @@ -119,7 +119,7 @@ metadata: "enabled": true }, "monitoring": { - "enabled": false + "enabled": true } }, "trillian": { diff --git a/bundle/manifests/rhtas.redhat.com_fulcios.yaml b/bundle/manifests/rhtas.redhat.com_fulcios.yaml index ac60e7c3..c96f1169 100644 --- a/bundle/manifests/rhtas.redhat.com_fulcios.yaml +++ b/bundle/manifests/rhtas.redhat.com_fulcios.yaml @@ -241,7 +241,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/bundle/manifests/rhtas.redhat.com_rekors.yaml b/bundle/manifests/rhtas.redhat.com_rekors.yaml index 5c2781fb..c17d9f87 100644 --- a/bundle/manifests/rhtas.redhat.com_rekors.yaml +++ b/bundle/manifests/rhtas.redhat.com_rekors.yaml @@ -91,7 +91,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/bundle/manifests/rhtas.redhat.com_securesigns.yaml b/bundle/manifests/rhtas.redhat.com_securesigns.yaml index 4125e1f7..69bf7e4b 100644 --- a/bundle/manifests/rhtas.redhat.com_securesigns.yaml +++ b/bundle/manifests/rhtas.redhat.com_securesigns.yaml @@ -355,7 +355,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean @@ -427,7 +427,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean diff --git a/config/crd/bases/rhtas.redhat.com_fulcios.yaml b/config/crd/bases/rhtas.redhat.com_fulcios.yaml index d0da89e8..9a198ab4 100644 --- a/config/crd/bases/rhtas.redhat.com_fulcios.yaml +++ b/config/crd/bases/rhtas.redhat.com_fulcios.yaml @@ -241,7 +241,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/config/crd/bases/rhtas.redhat.com_rekors.yaml b/config/crd/bases/rhtas.redhat.com_rekors.yaml index 36b81800..3a3dd461 100644 --- a/config/crd/bases/rhtas.redhat.com_rekors.yaml +++ b/config/crd/bases/rhtas.redhat.com_rekors.yaml @@ -91,7 +91,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean x-kubernetes-validations: diff --git a/config/crd/bases/rhtas.redhat.com_securesigns.yaml b/config/crd/bases/rhtas.redhat.com_securesigns.yaml index b433ac80..9388ea61 100644 --- a/config/crd/bases/rhtas.redhat.com_securesigns.yaml +++ b/config/crd/bases/rhtas.redhat.com_securesigns.yaml @@ -355,7 +355,7 @@ spec: description: Enable Service monitors for fulcio properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean @@ -427,7 +427,7 @@ spec: description: Enable Service monitors for rekor properties: enabled: - default: false + default: true description: If true, the Operator will create monitoring resources type: boolean diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5e109341..95ed5a16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -233,6 +233,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -245,6 +246,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -257,6 +259,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch diff --git a/config/samples/rhtas_v1alpha1_securesign.yaml b/config/samples/rhtas_v1alpha1_securesign.yaml index 021d7f94..4c31bed9 100644 --- a/config/samples/rhtas_v1alpha1_securesign.yaml +++ b/config/samples/rhtas_v1alpha1_securesign.yaml @@ -12,7 +12,7 @@ spec: externalAccess: enabled: true monitoring: - enabled: false + enabled: true trillian: database: create: true @@ -30,7 +30,7 @@ spec: organizationEmail: jdoe@redhat.com commonName: fulcio.hostname monitoring: - enabled: false + enabled: true tuf: externalAccess: enabled: true diff --git a/controllers/securesign/actions/rbac.go b/controllers/securesign/actions/rbac.go index e8f81b37..6de13a53 100644 --- a/controllers/securesign/actions/rbac.go +++ b/controllers/securesign/actions/rbac.go @@ -9,14 +9,21 @@ import ( "github.com/securesign/operator/controllers/common/action" "github.com/securesign/operator/controllers/common/utils/kubernetes" "github.com/securesign/operator/controllers/constants" + "github.com/securesign/operator/controllers/rekor/actions" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) -const namespacedNamePattern = SegmentRBACName + "-%s" +const ( + namespacedNamePattern = SegmentRBACName + "-%s" + clusterWideNamePattern = SegmentRBACName + "-%s" + "-%s" + OpenshiftMonitoringNS = "openshift-monitoring" + openshiftConsoleNS = "openshift-console" + OpenshiftUWMonitoringNS = "openshift-user-workload-monitoring" +) func NewRBACAction() action.Action[rhtasv1alpha1.Securesign] { return &rbacAction{} @@ -47,87 +54,208 @@ func (i rbacAction) Handle(ctx context.Context, instance *rhtasv1alpha1.Securesi } var err error - labels := constants.LabelsFor(SegmentBackupCronJobName, SegmentBackupCronJobName, instance.Name) + labels := constants.LabelsFor(SegmentBackupJobName, SegmentBackupCronJobName, instance.Name) labels["app.kubernetes.io/instance-namespace"] = instance.Namespace - sa := &v1.ServiceAccount{ + serviceAccount := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: SegmentRBACName, Namespace: instance.Namespace, Labels: labels, }, } - - if err = ctrl.SetControllerReference(instance, sa, i.Client.Scheme()); err != nil { - return i.Failed(fmt.Errorf("could not set controll reference for SA: %w", err)) + if err = controllerutil.SetControllerReference(instance, serviceAccount, i.Client.Scheme()); err != nil { + return i.Failed(fmt.Errorf("could not set controller reference for serviceAccount: %w", err)) + } + if _, err = i.Ensure(ctx, serviceAccount); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create serviceAccount: %w", err), instance) } - // don't re-enqueue for RBAC in any case (except failure) - _, err = i.Ensure(ctx, sa) - if err != nil { + openshiftMonitoringSBJRole := kubernetes.CreateRole( + OpenshiftMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "patch"}, + }, + { + APIGroups: []string{"route.openshift.io"}, + Resources: []string{"routes"}, + Verbs: []string{"get", "list"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftMonitoringSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create SA: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-monitoring role for SBJ: %w", err), instance) } - role := kubernetes.CreateClusterRole(SegmentRBACName, constants.LabelsRHTAS(), []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"secrets"}, - Verbs: []string{"get", "list"}, + openshiftMonitoringSBJRoleBinding := kubernetes.CreateRoleBinding( + OpenshiftMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf(namespacedNamePattern, instance.Namespace), }, - { - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - Verbs: []string{"get", "list"}, - }, - { - APIGroups: []string{"route.openshift.io"}, - Resources: []string{"routes"}, - Verbs: []string{"get", "list"}, - }, - { - APIGroups: []string{"operator.openshift.io"}, - Resources: []string{"consoles"}, - Verbs: []string{"get", "list"}, - }, - }) - - _, err = i.Ensure(ctx, role) + []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, + }) + if _, err = i.Ensure(ctx, openshiftMonitoringSBJRoleBinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-monitoring role binding for SBJ: %w", err), instance) + } - if err != nil { + openshiftUWMonitoringSBJRole := kubernetes.CreateRole( + OpenshiftUWMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get", "list"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftUWMonitoringSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create clusterrole required for SBJ: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-user-workload-monitoring role for SBJ: %w", err), instance) } - rb := kubernetes.CreateClusterRoleBinding(fmt.Sprintf(namespacedNamePattern, instance.Namespace), labels, rbacv1.RoleRef{ - APIGroup: v1.SchemeGroupVersion.Group, - Kind: "ClusterRole", - Name: SegmentRBACName, - }, + openshiftUWMonitoringDBJRolebinding := kubernetes.CreateRoleBinding( + OpenshiftUWMonitoringNS, + fmt.Sprintf(namespacedNamePattern, instance.Namespace), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf(namespacedNamePattern, instance.Namespace), + }, []rbacv1.Subject{ {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, }) + if _, err = i.Ensure(ctx, openshiftUWMonitoringDBJRolebinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-user-workload-monitoring role binding for SBJ: %w", err), instance) + } - _, err = i.Ensure(ctx, rb) + openshiftConsoleSBJRole := kubernetes.CreateClusterRole( + fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRole"), + labels, + []rbacv1.PolicyRule{ + { + APIGroups: []string{"operator.openshift.io"}, + Resources: []string{"consoles"}, + Verbs: []string{"get", "list"}, + ResourceNames: []string{"cluster"}, + }, + { + APIGroups: []string{"route.openshift.io"}, + Resources: []string{"routes"}, + Verbs: []string{"get", "list"}, + ResourceNames: []string{"console"}, + }, + }) + if _, err = i.Ensure(ctx, openshiftConsoleSBJRole); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: constants.Ready, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-console ClusterRole for SBJ: %w", err), instance) + } - if err != nil { + openshiftConsoleSBJRolebinding := kubernetes.CreateClusterRoleBinding( + fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRoleBinding"), + labels, + rbacv1.RoleRef{ + APIGroup: v1.SchemeGroupVersion.Group, + Kind: "ClusterRole", + Name: fmt.Sprintf(clusterWideNamePattern, instance.Namespace, "clusterRole"), + }, + []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: SegmentRBACName, Namespace: instance.Namespace}, + }) + if _, err = i.Ensure(ctx, openshiftConsoleSBJRolebinding); err != nil { + meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ + Type: actions.ServerCondition, + Status: metav1.ConditionFalse, + Reason: constants.Failure, + Message: err.Error(), + }) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: constants.Ready, Status: metav1.ConditionFalse, Reason: constants.Failure, Message: err.Error(), }) - return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create clusterrolebinding required for SBJ: %w", err), instance) + return i.FailedWithStatusUpdate(ctx, fmt.Errorf("could not create openshift-console ClusterRoleBinding for SBJ: %w", err), instance) } return i.Continue() diff --git a/controllers/securesign/securesign_controller.go b/controllers/securesign/securesign_controller.go index 3ad9bc96..4e78472b 100644 --- a/controllers/securesign/securesign_controller.go +++ b/controllers/securesign/securesign_controller.go @@ -55,10 +55,10 @@ type SecuresignReconciler struct { //+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=networking,resources=ingresses,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete;deletecollection //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection //+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete @@ -92,11 +92,28 @@ func (r *SecuresignReconciler) Reconcile(ctx context.Context, req ctrl.Request) } if instance.DeletionTimestamp != nil { - labels := constants.LabelsFor(actions.SegmentBackupCronJobName, actions.SegmentBackupCronJobName, instance.Name) + labels := constants.LabelsFor(actions.SegmentBackupJobName, actions.SegmentBackupCronJobName, instance.Name) labels["app.kubernetes.io/instance-namespace"] = instance.Namespace if err := r.Client.DeleteAllOf(ctx, &v1.ClusterRoleBinding{}, client.MatchingLabels(labels)); err != nil { log.Error(err, "problem with removing clusterRoleBinding resource") } + if err := r.Client.DeleteAllOf(ctx, &v1.ClusterRole{}, client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing ClusterRole resource") + } + + monitoringNS := []string{ + actions.OpenshiftMonitoringNS, + actions.OpenshiftUWMonitoringNS, + } + for _, ns := range monitoringNS { + if err := r.Client.DeleteAllOf(ctx, &v1.Role{}, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing Role resource in %s", ns) + } + if err := r.Client.DeleteAllOf(ctx, &v1.RoleBinding{}, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + log.Error(err, "problem with removing RoleBinding resource in %s", ns) + } + } + controllerutil.RemoveFinalizer(target, finalizer) return ctrl.Result{}, r.Update(ctx, target) }