diff --git a/controllers/workspace/devworkspace_controller.go b/controllers/workspace/devworkspace_controller.go index f5727ec04..9a317ec5a 100644 --- a/controllers/workspace/devworkspace_controller.go +++ b/controllers/workspace/devworkspace_controller.go @@ -70,8 +70,9 @@ const ( // DevWorkspaceReconciler reconciles a DevWorkspace object type DevWorkspaceReconciler struct { client.Client - Log logr.Logger - Scheme *runtime.Scheme + NonCachingClient client.Client + Log logr.Logger + Scheme *runtime.Scheme } /////// CRD-related RBAC roles @@ -84,6 +85,7 @@ type DevWorkspaceReconciler struct { // +kubebuilder:rbac:groups="",resources=namespaces;events,verbs=get;list;watch // +kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;create;list;watch;update;patch;delete // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews;localsubjectaccessreviews,verbs=create // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterroles;clusterrolebindings,verbs=get;list;watch;create;update // +kubebuilder:rbac:groups=oauth.openshift.io,resources=oauthclients,verbs=get;list;watch;create;update;patch;delete;deletecollection // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;create @@ -98,10 +100,11 @@ type DevWorkspaceReconciler struct { func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcileResult ctrl.Result, err error) { reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) clusterAPI := sync.ClusterAPI{ - Client: r.Client, - Scheme: r.Scheme, - Logger: reqLogger, - Ctx: ctx, + Client: r.Client, + NonCachingClient: r.NonCachingClient, + Scheme: r.Scheme, + Logger: reqLogger, + Ctx: ctx, } // Fetch the Workspace instance @@ -257,7 +260,7 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request // Set finalizer on DevWorkspace if necessary // Note: we need to check the flattened workspace to see if a finalizer is needed, as plugins could require storage - if isFinalizerNecessary(workspace, storageProvisioner) { + if storageProvisioner.NeedsStorage(&workspace.Spec.Template) { coputil.AddFinalizer(clusterWorkspace, storageCleanupFinalizer) if err := r.Update(ctx, clusterWorkspace); err != nil { return reconcile.Result{}, err @@ -354,7 +357,9 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request } serviceAcctStatus := wsprovision.SyncServiceAccount(workspace, saAnnotations, clusterAPI) if !serviceAcctStatus.Continue { - // FailStartup is not possible for generating the serviceaccount + if serviceAcctStatus.FailStartup { + return r.failWorkspace(workspace, serviceAcctStatus.Message, metrics.ReasonBadRequest, reqLogger, &reconcileStatus) + } reqLogger.Info("Waiting for workspace ServiceAccount") reconcileStatus.setConditionFalse(dw.DevWorkspaceServiceAccountReady, "Waiting for DevWorkspace ServiceAccount") if !serviceAcctStatus.Requeue && serviceAcctStatus.Err == nil { @@ -362,6 +367,13 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request } return reconcile.Result{Requeue: serviceAcctStatus.Requeue}, serviceAcctStatus.Err } + if wsprovision.NeedsServiceAccountFinalizer(&workspace.Spec.Template) { + coputil.AddFinalizer(clusterWorkspace, serviceAccountCleanupFinalizer) + if err := r.Update(ctx, clusterWorkspace); err != nil { + return reconcile.Result{}, err + } + } + serviceAcctName := serviceAcctStatus.ServiceAccountName reconcileStatus.setConditionTrue(dw.DevWorkspaceServiceAccountReady, "DevWorkspace serviceaccount ready") diff --git a/controllers/workspace/finalize.go b/controllers/workspace/finalize.go index d28a17723..a7c1d3275 100644 --- a/controllers/workspace/finalize.go +++ b/controllers/workspace/finalize.go @@ -33,13 +33,23 @@ import ( ) const ( - storageCleanupFinalizer = "storage.controller.devfile.io" + storageCleanupFinalizer = "storage.controller.devfile.io" + serviceAccountCleanupFinalizer = "serviceaccount.controller.devfile.io" ) -func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { - if !coputil.HasFinalizer(workspace, storageCleanupFinalizer) { - return reconcile.Result{}, nil +func (r *DevWorkspaceReconciler) workspaceNeedsFinalize(workspace *dw.DevWorkspace) bool { + for _, finalizer := range workspace.Finalizers { + if finalizer == storageCleanupFinalizer { + return true + } + if finalizer == serviceAccountCleanupFinalizer { + return true + } } + return false +} + +func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { workspace.Status.Message = "Cleaning up resources for deletion" workspace.Status.Phase = devworkspacePhaseTerminating err := r.Client.Status().Update(ctx, workspace) @@ -47,6 +57,18 @@ func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, return reconcile.Result{}, err } + for _, finalizer := range workspace.Finalizers { + switch finalizer { + case storageCleanupFinalizer: + return r.finalizeStorage(ctx, log, workspace) + case serviceAccountCleanupFinalizer: + return r.finalizeServiceAccount(ctx, log, workspace) + } + } + return reconcile.Result{}, nil +} + +func (r *DevWorkspaceReconciler) finalizeStorage(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { // Need to make sure Deployment is cleaned up before starting job to avoid mounting issues for RWO PVCs wait, err := wsprovision.DeleteWorkspaceDeployment(ctx, workspace, r.Client) if err != nil { @@ -98,8 +120,20 @@ func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, return reconcile.Result{}, r.Update(ctx, workspace) } -func isFinalizerNecessary(workspace *dw.DevWorkspace, provisioner storage.Provisioner) bool { - return provisioner.NeedsStorage(&workspace.Spec.Template) +func (r *DevWorkspaceReconciler) finalizeServiceAccount(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { + retry, err := wsprovision.FinalizeServiceAccount(workspace, ctx, r.NonCachingClient) + if err != nil { + log.Error(err, "Failed to finalize workspace ServiceAccount") + failedStatus := currentStatus{phase: dw.DevWorkspaceStatusError} + failedStatus.setConditionTrue(dw.DevWorkspaceError, err.Error()) + return r.updateWorkspaceStatus(workspace, r.Log, &failedStatus, reconcile.Result{}, nil) + } + if retry { + return reconcile.Result{Requeue: true}, nil + } + log.Info("ServiceAccount clean up successful; clearing finalizer") + coputil.RemoveFinalizer(workspace, serviceAccountCleanupFinalizer) + return reconcile.Result{}, r.Update(ctx, workspace) } func (r *DevWorkspaceReconciler) namespaceIsTerminating(ctx context.Context, namespace string) (bool, error) { diff --git a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml index cdf347aeb..b6ecee34e 100644 --- a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml +++ b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml @@ -180,6 +180,13 @@ spec: - get - list - watch + - apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index cb05d3453..230b0f569 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -19808,6 +19808,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml index 39ac50e5a..38829dceb 100644 --- a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml @@ -107,6 +107,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 10297864d..ed316fa6f 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -19808,6 +19808,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml index 39ac50e5a..38829dceb 100644 --- a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml @@ -107,6 +107,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/templates/components/rbac/role.yaml b/deploy/templates/components/rbac/role.yaml index 375e663cd..db454dfab 100644 --- a/deploy/templates/components/rbac/role.yaml +++ b/deploy/templates/components/rbac/role.yaml @@ -106,6 +106,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/main.go b/main.go index 0648dbf5c..2d1c0210a 100644 --- a/main.go +++ b/main.go @@ -38,6 +38,7 @@ import ( oauthv1 "github.com/openshift/api/oauth/v1" routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" templatev1 "github.com/openshift/api/template/v1" corev1 "k8s.io/api/core/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" @@ -75,6 +76,9 @@ func init() { utilruntime.Must(routev1.Install(scheme)) utilruntime.Must(templatev1.Install(scheme)) utilruntime.Must(oauthv1.Install(scheme)) + // Enable controller to manage SCCs in OpenShift; permissions to do this are not requested + // by default and must be added by a cluster-admin. + utilruntime.Must(securityv1.Install(scheme)) } // +kubebuilder:scaffold:scheme @@ -122,6 +126,12 @@ func main() { os.Exit(1) } + nonCachingClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: scheme}) + if err != nil { + setupLog.Error(err, "unable to initialize non-caching client") + os.Exit(1) + } + // Index Events on involvedObject.name to allow us to get events involving a DevWorkspace's pod(s). This is used to // check for issues that prevent the pod from starting, so that DevWorkspaces aren't just hanging indefinitely. if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Event{}, "involvedObject.name", func(obj client.Object) []string { @@ -143,9 +153,10 @@ func main() { os.Exit(1) } if err = (&workspacecontroller.DevWorkspaceReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("DevWorkspace"), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + NonCachingClient: nonCachingClient, + Log: ctrl.Log.WithName("controllers").WithName("DevWorkspace"), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DevWorkspace") os.Exit(1) diff --git a/pkg/constants/attributes.go b/pkg/constants/attributes.go index 33adfb7e9..f91c8d365 100644 --- a/pkg/constants/attributes.go +++ b/pkg/constants/attributes.go @@ -37,6 +37,20 @@ const ( // value: VAL_2 WorkspaceEnvAttribute = "workspaceEnv" + // WorkspaceSCCAttribute defines additional SCCs that should be added to the DevWorkspace. The user adding + // this attribute to a workspace must have the RBAC permissions to "use" the SCC with the given name. For example, + // to add the 'anyuid' SCC to the workspace Pod, the DevWorkspace should contain + // + // spec: + // template: + // attributes: + // controller.devfile.io/scc: "anyuid" + // + // Creating a workspace with this attribute, or updating an existing workspace to include this attribute will fail + // if the user making the request does not have the "use" permission for the "anyuid" SCC. + // Only supported on OpenShift. + WorkspaceSCCAttribute = "controller.devfile.io/scc" + // ProjectCloneAttribute configures how the DevWorkspace will treat project cloning. By default, an init container // will be added to the workspace deployment to clone projects to the workspace before it starts. This attribute // must be applied to top-level attributes field in the DevWorkspace. diff --git a/pkg/provision/sync/cluster_api.go b/pkg/provision/sync/cluster_api.go index a8a7bbdf9..85ce31f31 100644 --- a/pkg/provision/sync/cluster_api.go +++ b/pkg/provision/sync/cluster_api.go @@ -24,10 +24,11 @@ import ( ) type ClusterAPI struct { - Client crclient.Client - Scheme *runtime.Scheme - Logger logr.Logger - Ctx context.Context + Client crclient.Client + NonCachingClient crclient.Client + Scheme *runtime.Scheme + Logger logr.Logger + Ctx context.Context } // NotInSyncError is returned when a spec object is out-of-sync with its cluster counterpart diff --git a/pkg/provision/workspace/serviceaccount.go b/pkg/provision/workspace/serviceaccount.go index 6f2858c75..3aafddaf0 100644 --- a/pkg/provision/workspace/serviceaccount.go +++ b/pkg/provision/workspace/serviceaccount.go @@ -16,11 +16,18 @@ package workspace import ( + "context" + "fmt" + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/devworkspace-operator/pkg/constants" "github.com/devfile/devworkspace-operator/pkg/provision/sync" + securityv1 "github.com/openshift/api/security/v1" corev1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/devfile/devworkspace-operator/pkg/common" @@ -74,11 +81,22 @@ func SyncServiceAccount( case *sync.NotInSyncError: return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Requeue: true}} case *sync.UnrecoverableSyncError: - return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Err: t.Cause}} + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Message: t.Cause.Error()}} default: return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Err: err}} } + if workspace.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + sccName := workspace.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) + retry, err := addSCCToServiceAccount(specSA.Name, specSA.Namespace, sccName, clusterAPI) + if err != nil { + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Message: err.Error()}} + } + if retry { + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Requeue: true}} + } + } + return ServiceAcctProvisioningStatus{ ProvisioningStatus: ProvisioningStatus{ Continue: true, @@ -86,3 +104,105 @@ func SyncServiceAccount( ServiceAccountName: saName, } } + +func NeedsServiceAccountFinalizer(workspace *dw.DevWorkspaceTemplateSpec) bool { + if !workspace.Attributes.Exists(constants.WorkspaceSCCAttribute) { + return false + } + if workspace.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) == "" { + return false + } + return true +} + +func FinalizeServiceAccount(workspace *dw.DevWorkspace, ctx context.Context, nonCachingClient crclient.Client) (retry bool, err error) { + saName := common.ServiceAccountName(workspace.Status.DevWorkspaceId) + namespace := workspace.Namespace + if !workspace.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + return false, nil + } + sccName := workspace.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) + + return removeSCCFromServiceAccount(saName, namespace, sccName, ctx, nonCachingClient) +} + +func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.ClusterAPI) (retry bool, err error) { + serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) + + scc := &securityv1.SecurityContextConstraints{} + if err := clusterAPI.NonCachingClient.Get(clusterAPI.Ctx, types.NamespacedName{Name: sccName}, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsNotFound(err): + return false, fmt.Errorf("requested SecurityContextConstraints '%s' not found on cluster", sccName) + default: + return false, err + } + } + + for _, user := range scc.Users { + if user == serviceaccount { + // This serviceaccount is already added to the SCC + return false, nil + } + } + + scc.Users = append(scc.Users, serviceaccount) + if err := clusterAPI.NonCachingClient.Update(clusterAPI.Ctx, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsConflict(err): + return true, nil + default: + return false, err + } + } + + return false, nil +} + +func removeSCCFromServiceAccount(saName, namespace, sccName string, ctx context.Context, nonCachingClient crclient.Client) (retry bool, err error) { + serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) + + scc := &securityv1.SecurityContextConstraints{} + if err := nonCachingClient.Get(ctx, types.NamespacedName{Name: sccName}, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsNotFound(err): + return false, fmt.Errorf("requested SecurityContextConstraints '%s' not found on cluster", sccName) + default: + return false, err + } + } + + found := false + var newUsers []string + for _, user := range scc.Users { + if user == serviceaccount { + found = true + continue + } + newUsers = append(newUsers, user) + } + if !found { + return false, err + } + + scc.Users = newUsers + + if err := nonCachingClient.Update(ctx, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsConflict(err): + return true, nil + default: + return false, err + } + } + + return false, nil +} diff --git a/pkg/webhook/cluster_roles.go b/pkg/webhook/cluster_roles.go index c6229eecc..7d4d75680 100755 --- a/pkg/webhook/cluster_roles.go +++ b/pkg/webhook/cluster_roles.go @@ -120,6 +120,7 @@ func getSpecClusterRole() (*v1.ClusterRole, error) { }, Resources: []string{ "subjectaccessreviews", + "localsubjectaccessreviews", }, Verbs: []string{ "create", diff --git a/webhook/workspace/handler/access_control.go b/webhook/workspace/handler/access_control.go new file mode 100644 index 000000000..008c26458 --- /dev/null +++ b/webhook/workspace/handler/access_control.go @@ -0,0 +1,109 @@ +// Copyright (c) 2019-2021 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + "context" + "fmt" + + dwv2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// validateUserPermissions validates that the user making the request has permissions to create/update the given workspace. +// In case we're validating a workspace on creation, parameter oldWksp should be set to nil. Returns an error if the user +// cannot perform the requested changes, or if an unexpected error occurs. +// Note: we only perform validation on v1alpha2 DevWorkspaces at the moment, as v1alpha1 DevWorkspaces do not support attributes. +func (h *WebhookHandler) validateUserPermissions(ctx context.Context, req admission.Request, newWksp, oldWksp *dwv2.DevWorkspace) error { + if !newWksp.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + // Workspace is not requesting anything we need to check RBAC for. + return nil + } + + var attributeDecodeErr error + newSCCAttr := newWksp.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, &attributeDecodeErr) + if attributeDecodeErr != nil { + return fmt.Errorf("failed to read %s attribute in DevWorkspace: %s", constants.WorkspaceSCCAttribute, attributeDecodeErr) + } + + if oldWksp != nil && oldWksp.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + // If we're updating a DevWorkspace, check RBAC only if the relevant attribute is modified to avoid performing too many SARs. + oldSCCAttr := oldWksp.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, &attributeDecodeErr) + if attributeDecodeErr != nil { + return fmt.Errorf("failed to read %s attribute in DevWorkspace: %s", constants.WorkspaceSCCAttribute, attributeDecodeErr) + } + if oldSCCAttr == newSCCAttr { + // RBAC has already been checked for this setting, don't recheck + return nil + } + if oldSCCAttr != newSCCAttr { + // Don't allow attribute to be changed once it is set, otherwise we can't clean up the SCC when the workspace is deleted. + return fmt.Errorf("%s attribute cannot be modified after being set -- workspace must be deleted", constants.WorkspaceSCCAttribute) + } + } + + if err := h.validateOpenShiftSCC(ctx, req, newSCCAttr); err != nil { + return err + } + + return nil +} + +func (h *WebhookHandler) validateOpenShiftSCC(ctx context.Context, req admission.Request, scc string) error { + if !infrastructure.IsOpenShift() { + // We can only check the appropriate capability on OpenShift currently (via securitycontextconstraints.security.openshift.io) + // so forbid using this attribute + return fmt.Errorf("specifying additional SCCs is only permitted on OpenShift") + } + + if scc == "" { + return fmt.Errorf("empty value for attribute %s is invalid", constants.WorkspaceSCCAttribute) + } + + sar := &v1.LocalSubjectAccessReview{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: req.Namespace, + }, + Spec: v1.SubjectAccessReviewSpec{ + ResourceAttributes: &v1.ResourceAttributes{ + Namespace: req.Namespace, + Verb: "use", + Group: "security.openshift.io", + Resource: "securitycontextconstraints", + Name: scc, + }, + User: req.UserInfo.Username, + Groups: req.UserInfo.Groups, + UID: req.UserInfo.UID, + }, + } + + err := h.Client.Create(ctx, sar) + if err != nil { + return fmt.Errorf("failed to create subjectaccessreview for request: %w", err) + } + + if !sar.Status.Allowed { + if sar.Status.Reason != "" { + return fmt.Errorf("user is not permitted to use the %s SCC: %s", scc, sar.Status.Reason) + } + return fmt.Errorf("user is not permitted use the %s SCC", scc) + } + + return nil +} diff --git a/webhook/workspace/handler/workspace.go b/webhook/workspace/handler/workspace.go index 35e11722b..9f3d2d94b 100644 --- a/webhook/workspace/handler/workspace.go +++ b/webhook/workspace/handler/workspace.go @@ -40,7 +40,7 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha1OnCreate(_ context.Context, req return h.returnPatched(req, wksp) } -func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(_ context.Context, req admission.Request) admission.Response { +func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(ctx context.Context, req admission.Request) admission.Response { wksp := &dwv2.DevWorkspace{} err := h.Decoder.Decode(req, wksp) if err != nil { @@ -49,6 +49,10 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(_ context.Context, req wksp.Labels = maputils.Append(wksp.Labels, constants.DevWorkspaceCreatorLabel, req.UserInfo.UID) + if err := h.validateUserPermissions(ctx, req, wksp, nil); err != nil { + return admission.Denied(err.Error()) + } + return h.returnPatched(req, wksp) } @@ -85,7 +89,7 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha1OnUpdate(_ context.Context, req return admission.Allowed("new devworkspace has the same devworkspace creator as old one") } -func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(_ context.Context, req admission.Request) admission.Response { +func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(ctx context.Context, req admission.Request) admission.Response { newWksp := &dwv2.DevWorkspace{} oldWksp := &dwv2.DevWorkspace{} err := h.parse(req, oldWksp, newWksp) @@ -97,6 +101,10 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(_ context.Context, req return admission.Denied(msg) } + if err := h.validateUserPermissions(ctx, req, newWksp, oldWksp); err != nil { + return admission.Denied(err.Error()) + } + oldCreator, found := oldWksp.Labels[constants.DevWorkspaceCreatorLabel] if !found { return admission.Denied(fmt.Sprintf("label '%s' is missing. Please recreate devworkspace to get it initialized", constants.DevWorkspaceCreatorLabel))