From 16d917a517cd7c08e6369977a5634386aefa3429 Mon Sep 17 00:00:00 2001 From: Jiaqi Luo Date: Fri, 14 Feb 2025 15:56:11 -0700 Subject: [PATCH] add the support for the version management feature on imported rke2/k3s clusters --- docs.md | 16 + go.mod | 2 + .../v3/cluster/Cluster.md | 16 + .../v3/cluster/mutator.go | 64 +++- .../v3/cluster/mutator_test.go | 61 ++++ .../v3/cluster/validator.go | 136 ++++++++- .../v3/cluster/validator_test.go | 279 +++++++++++++++++- pkg/server/handlers.go | 3 + 8 files changed, 539 insertions(+), 38 deletions(-) diff --git a/docs.md b/docs.md index 8d329ae8b..f432453a6 100644 --- a/docs.md +++ b/docs.md @@ -89,6 +89,14 @@ If yes, the webhook redacts the role, so that it only grants a deletion permissi ## Cluster + +### Mutation Checks + +##### Feature: version management on imported RKE2/K3s cluster + +- When a cluster is created or updated, add the `rancher.io/imported-cluster-version-management: system-default` annotation if the annotation is missing or its value is an empty string. + + ### Validation Checks #### Annotations validation @@ -99,6 +107,14 @@ When a cluster is updated `field.cattle.io/creator-principal-name` and `field.ca If `field.cattle.io/no-creator-rbac` annotation is set, `field.cattle.io/creatorId` cannot be set. + +##### Feature: version management on imported RKE2/K3s cluster + + - When a cluster is created or updated, the `rancher.io/imported-cluster-version-management` annotation must be set with a valid value (true, false, or system-default). + - If the cluster represents other types of clusters and the annotation is present, the webhook will permit the request with a warning that the annotation is intended for imported RKE2/k3s clusters and will not take effect on this cluster. + - If version management is determined to be disabled, and the `.spec.rke2Config` or `.spec.k3sConfig` field exists in the new cluster object with a value different from the old one, the webhook will permit the update with a warning indicating that these changes will not take effect until version management is enabled for the cluster. + - If version management is determined to be disabled, and the `.spec.rke2Config` or `.spec.k3sConfig` field is missing, the webhook will permit the request to allow users to remove the unused fields via API or Terraform. + ## ClusterProxyConfig ### Validation Checks diff --git a/go.mod b/go.mod index 16c24de91..fb735108c 100644 --- a/go.mod +++ b/go.mod @@ -18,9 +18,11 @@ replace ( k8s.io/component-helpers => k8s.io/component-helpers v0.32.1 k8s.io/controller-manager => k8s.io/controller-manager v0.32.1 k8s.io/cri-api => k8s.io/cri-api v0.32.1 + k8s.io/cri-client => k8s.io/cri-client v0.32.1 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.1 k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.1 k8s.io/endpointslice => k8s.io/endpointslice v0.32.1 + k8s.io/externaljwt => k8s.io/externaljwt v0.32.1 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.1 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.1 k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.1 diff --git a/pkg/resources/management.cattle.io/v3/cluster/Cluster.md b/pkg/resources/management.cattle.io/v3/cluster/Cluster.md index 2400856be..0394a25ad 100644 --- a/pkg/resources/management.cattle.io/v3/cluster/Cluster.md +++ b/pkg/resources/management.cattle.io/v3/cluster/Cluster.md @@ -1,3 +1,11 @@ + +## Mutation Checks + +#### Feature: version management on imported RKE2/K3s cluster + +- When a cluster is created or updated, add the `rancher.io/imported-cluster-version-management: system-default` annotation if the annotation is missing or its value is an empty string. + + ## Validation Checks ### Annotations validation @@ -7,3 +15,11 @@ When a cluster is created and `field.cattle.io/creator-principal-name` annotatio When a cluster is updated `field.cattle.io/creator-principal-name` and `field.cattle.io/creatorId` annotations must stay the same or removed. If `field.cattle.io/no-creator-rbac` annotation is set, `field.cattle.io/creatorId` cannot be set. + + +#### Feature: version management on imported RKE2/K3s cluster + + - When a cluster is created or updated, the `rancher.io/imported-cluster-version-management` annotation must be set with a valid value (true, false, or system-default). + - If the cluster represents other types of clusters and the annotation is present, the webhook will permit the request with a warning that the annotation is intended for imported RKE2/k3s clusters and will not take effect on this cluster. + - If version management is determined to be disabled, and the `.spec.rke2Config` or `.spec.k3sConfig` field exists in the new cluster object with a value different from the old one, the webhook will permit the update with a warning indicating that these changes will not take effect until version management is enabled for the cluster. + - If version management is determined to be disabled, and the `.spec.rke2Config` or `.spec.k3sConfig` field is missing, the webhook will permit the request to allow users to remove the unused fields via API or Terraform. diff --git a/pkg/resources/management.cattle.io/v3/cluster/mutator.go b/pkg/resources/management.cattle.io/v3/cluster/mutator.go index ad219fae6..1603ba9fd 100644 --- a/pkg/resources/management.cattle.io/v3/cluster/mutator.go +++ b/pkg/resources/management.cattle.io/v3/cluster/mutator.go @@ -64,9 +64,34 @@ func (m *ManagementClusterMutator) Admit(request *admission.Request) (*admission if err != nil { return nil, fmt.Errorf("unable to re-marshal new cluster: %w", err) } + + err = m.mutatePSACT(oldCluster, newCluster, request.Operation) + if err != nil { + return nil, fmt.Errorf("failed to mutate PSACT: %w", err) + } + + m.mutateVersionManagement(newCluster, request.Operation) + + response := &admissionv1.AdmissionResponse{} + // we use the re-marshalled new cluster to make sure that the patch doesn't drop "unknown" fields which were + // in the json, but not in the cluster struct. This can occur due to out of date RKE versions + if err := patch.CreatePatch(newClusterRaw, newCluster, response); err != nil { + return nil, fmt.Errorf("failed to create patch: %w", err) + } + response.Allowed = true + return response, nil +} + +// mutatePSACT updates the newCluster's Pod Security Admission (PSA) configuration based on changes to +// the cluster's `DefaultPodSecurityAdmissionConfigurationTemplateName`. +// It applies or removes the PSA plugin configuration depending on the operation and the current cluster state. +func (m *ManagementClusterMutator) mutatePSACT(oldCluster, newCluster *apisv3.Cluster, operation admissionv1.Operation) error { // no need to mutate the local cluster, or imported cluster which represents a KEv2 cluster (GKE/EKS/AKS) or v1 Provisioning Cluster if newCluster.Name == "local" || newCluster.Spec.RancherKubernetesEngineConfig == nil { - return admission.ResponseAllowed(), nil + return nil + } + if operation != admissionv1.Update && operation != admissionv1.Create { + return nil } newTemplateName := newCluster.Spec.DefaultPodSecurityAdmissionConfigurationTemplateName oldTemplateName := oldCluster.Spec.DefaultPodSecurityAdmissionConfigurationTemplateName @@ -75,13 +100,11 @@ func (m *ManagementClusterMutator) Admit(request *admission.Request) (*admission if newTemplateName != "" { err := m.setPSAConfig(newCluster) if err != nil && !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("failed to set PSAconfig: %w", err) + return fmt.Errorf("failed to set PSAconfig: %w", err) } } else { - switch request.Operation { - case admissionv1.Create: - return admission.ResponseAllowed(), nil - case admissionv1.Update: + if operation == admissionv1.Update { + // The case of dropping the PSACT in the UPDATE operation: // It is a valid use case where user switches from using PSACT to putting a PluginConfig for PSA under kube-api.AdmissionConfiguration, // but it is not a valid use case where the PluginConfig for PSA has the same content as the one in the previous-set PSACT, // so we need to drop it in this case. @@ -97,15 +120,7 @@ func (m *ManagementClusterMutator) Admit(request *admission.Request) (*admission } } } - - response := &admissionv1.AdmissionResponse{} - // we use the re-marshalled new cluster to make sure that the patch doesn't drop "unknown" fields which were - // in the json, but not in the cluster struct. This can occur due to out of date RKE versions - if err := patch.CreatePatch(newClusterRaw, newCluster, response); err != nil { - return response, fmt.Errorf("failed to create patch: %w", err) - } - response.Allowed = true - return response, nil + return nil } // setPSAConfig makes sure that the PodSecurity config under the admission_configuration section matches the @@ -135,3 +150,22 @@ func (m *ManagementClusterMutator) setPSAConfig(cluster *apisv3.Cluster) error { cluster.Spec.RancherKubernetesEngineConfig.Services.KubeAPI.AdmissionConfiguration = admissionConfig return nil } + +// mutateVersionManagement set the annotation for version management if it is missing or has empty value on an imported RKE2/K3s cluster +func (m *ManagementClusterMutator) mutateVersionManagement(cluster *apisv3.Cluster, operation admissionv1.Operation) { + if operation != admissionv1.Update && operation != admissionv1.Create { + return + } + if cluster.Status.Driver != apisv3.ClusterDriverRke2 && cluster.Status.Driver != apisv3.ClusterDriverK3s { + return + } + + val, ok := cluster.Annotations[VersionManagementAnno] + if !ok || val == "" { + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + cluster.Annotations[VersionManagementAnno] = "system-default" + } + return +} diff --git a/pkg/resources/management.cattle.io/v3/cluster/mutator_test.go b/pkg/resources/management.cattle.io/v3/cluster/mutator_test.go index 860286c9a..e2cfc204d 100644 --- a/pkg/resources/management.cattle.io/v3/cluster/mutator_test.go +++ b/pkg/resources/management.cattle.io/v3/cluster/mutator_test.go @@ -9,6 +9,7 @@ import ( data2 "github.com/rancher/wrangler/v3/pkg/data" "github.com/stretchr/testify/assert" admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -44,3 +45,63 @@ func TestAdmitPreserveUnknownFields(t *testing.T) { assert.Nil(t, err) assert.Nil(t, response.Patch) } + +func TestMutateVersionManagement(t *testing.T) { + tests := []struct { + name string + cluster *v3.Cluster + operation admissionv1.Operation + expect bool + }{ + { + name: "invalid operation", + cluster: &v3.Cluster{}, + operation: admissionv1.Delete, + expect: false, + }, + { + name: "invalid cluster", + cluster: &v3.Cluster{ + Status: v3.ClusterStatus{ + Driver: "imported", + }, + }, + operation: admissionv1.Update, + expect: false, + }, + { + name: "missing annotation", + cluster: &v3.Cluster{ + Status: v3.ClusterStatus{ + Driver: "rke2", + }, + }, + operation: admissionv1.Create, + expect: true, + }, + { + name: "empty value", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "", + }, + }, + Status: v3.ClusterStatus{ + Driver: "k3s", + }, + }, + operation: admissionv1.Update, + expect: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &ManagementClusterMutator{} + m.mutateVersionManagement(tt.cluster, tt.operation) + if tt.expect { + assert.Equal(t, tt.cluster.Annotations[VersionManagementAnno], "system-default") + } + }) + } +} diff --git a/pkg/resources/management.cattle.io/v3/cluster/validator.go b/pkg/resources/management.cattle.io/v3/cluster/validator.go index d21e39354..e1e9774c5 100644 --- a/pkg/resources/management.cattle.io/v3/cluster/validator.go +++ b/pkg/resources/management.cattle.io/v3/cluster/validator.go @@ -24,6 +24,11 @@ import ( authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" ) +const ( + VersionManagementAnno = "rancher.io/imported-cluster-version-management" + VersionManagementSetting = "imported-cluster-version-management" +) + var parsedRangeLessThan123 = semver.MustParseRange("< 1.23.0-rancher0") // NewValidator returns a new validator for management clusters. @@ -31,12 +36,14 @@ func NewValidator( sar authorizationv1.SubjectAccessReviewInterface, cache v3.PodSecurityAdmissionConfigurationTemplateCache, userCache v3.UserCache, + settingCache v3.SettingCache, ) *Validator { return &Validator{ admitter: admitter{ - sar: sar, - psact: cache, - userCache: userCache, // userCache is nil for downstream clusters. + sar: sar, + psact: cache, + userCache: userCache, // userCache is nil for downstream clusters. + settingCache: settingCache, // settingCache is nil for downstream clusters }, } } @@ -69,9 +76,10 @@ func (v *Validator) Admitters() []admission.Admitter { } type admitter struct { - sar authorizationv1.SubjectAccessReviewInterface - psact v3.PodSecurityAdmissionConfigurationTemplateCache - userCache v3.UserCache + sar authorizationv1.SubjectAccessReviewInterface + psact v3.PodSecurityAdmissionConfigurationTemplateCache + userCache v3.UserCache + settingCache v3.SettingCache } // Admit handles the webhook admission request sent to this webhook. @@ -109,23 +117,26 @@ func (a *admitter) Admit(request *admission.Request) (*admissionv1.AdmissionResp } } - if request.Operation == admissionv1.Create || request.Operation == admissionv1.Update { - // no need to validate the PodSecurityAdmissionConfigurationTemplate on a local cluster, - // or imported cluster which represents a KEv2 cluster (GKE/EKS/AKS) or v1 Provisioning Cluster - if newCluster.Name == "local" || newCluster.Spec.RancherKubernetesEngineConfig == nil { - return admission.ResponseAllowed(), nil - } + response, err = a.validatePSACT(oldCluster, newCluster, request.Operation) + if err != nil { + return nil, fmt.Errorf("failed to validate PodSecurityAdmissionConfigurationTemplate(PSACT): %w", err) + } + if !response.Allowed { + return response, nil + } - response, err = a.validatePSACT(oldCluster, newCluster, request.Operation) + if a.settingCache != nil { + // The following checks don't make sense for downstream clusters (settingCache == nil) + response, err = a.validateVersionManagementFeature(oldCluster, newCluster, request.Operation) if err != nil { - return nil, fmt.Errorf("failed to validate PodSecurityAdmissionConfigurationTemplate(PSACT): %w", err) + return nil, fmt.Errorf("failed to validate version management feature: %w", err) } if !response.Allowed { return response, nil } } - return admission.ResponseAllowed(), nil + return response, nil } func toExtra(extra map[string]authenticationv1.ExtraValue) map[string]v1.ExtraValue { @@ -197,6 +208,15 @@ func (a *admitter) validateFleetPermissions(request *admission.Request, oldClust // validatePSACT validates the cluster spec when PodSecurityAdmissionConfigurationTemplate is used. func (a *admitter) validatePSACT(oldCluster, newCluster *apisv3.Cluster, op admissionv1.Operation) (*admissionv1.AdmissionResponse, error) { + if op != admissionv1.Create && op != admissionv1.Update { + return admission.ResponseAllowed(), nil + } + // no need to validate the PodSecurityAdmissionConfigurationTemplate on a local cluster, + // or imported cluster which represents a KEv2 cluster (GKE/EKS/AKS) or v1 Provisioning Cluster + if newCluster.Name == "local" || newCluster.Spec.RancherKubernetesEngineConfig == nil { + return admission.ResponseAllowed(), nil + } + newTemplateName := newCluster.Spec.DefaultPodSecurityAdmissionConfigurationTemplateName oldTemplateName := oldCluster.Spec.DefaultPodSecurityAdmissionConfigurationTemplateName @@ -289,3 +309,89 @@ func (a *admitter) checkPSAConfigOnCluster(cluster *apisv3.Cluster) (*admissionv return admission.ResponseAllowed(), nil } + +// validateVersionManagementFeature validates the annotation for the version management feature is set with valid value on the imported RKE2/K3s cluster, +// additionally, it permits but include a warning to the response if either of the following is true: +// - the annotation is found on a cluster rather than imported RKE2/K3s cluster; +// - the spec.rke2Config or spec.k3sConfig is changed when the version management feature is disabled for the cluster. +func (a *admitter) validateVersionManagementFeature(oldCluster, newCluster *apisv3.Cluster, op admissionv1.Operation) (*admissionv1.AdmissionResponse, error) { + if op != admissionv1.Create && op != admissionv1.Update { + return admission.ResponseAllowed(), nil + } + + val, exist := newCluster.Annotations[VersionManagementAnno] + driver := newCluster.Status.Driver + + if driver != apisv3.ClusterDriverRke2 && driver != apisv3.ClusterDriverK3s { + response := admission.ResponseAllowed() + if exist { + msg := fmt.Sprintf("The annotation [%s] takes effect only on imported RKE2/K3s cluster, please consider removing it from cluster [%s]", VersionManagementAnno, newCluster.Name) + response.Warnings = append(response.Warnings, msg) + } + return response, nil + } + + // reaching this point indicates the cluster is an imported RKE2/K3s cluster + if !exist { + message := fmt.Sprintf("the %s annotation is missing", VersionManagementAnno) + return admission.ResponseBadRequest(message), nil + } + if val != "true" && val != "false" && val != "system-default" { + message := fmt.Sprintf("the value of the %s annotation must be one of the following: true, false, system-default", VersionManagementAnno) + return admission.ResponseBadRequest(message), nil + } + enabled, err := a.versionManagementEnabled(newCluster) + if err != nil { + return nil, fmt.Errorf("failed to check the version management feature: %w", err) + } + response := admission.ResponseAllowed() + if !enabled && op == admissionv1.Update { + if driver == apisv3.ClusterDriverRke2 { + if !reflect.DeepEqual(oldCluster.Spec.Rke2Config, newCluster.Spec.Rke2Config) && newCluster.Spec.Rke2Config != nil { + msg := fmt.Sprintf("Cluster [%s]: changes to the Rke2Config field will take effect after the version management is enabled on the cluster", newCluster.Name) + response.Warnings = append(response.Warnings, msg) + } + } + if driver == apisv3.ClusterDriverK3s { + if !reflect.DeepEqual(oldCluster.Spec.K3sConfig, newCluster.Spec.K3sConfig) && newCluster.Spec.K3sConfig != nil { + msg := fmt.Sprintf("Cluster [%s]: changes to the K3sConfig field will take effect after the version management is enabled on the cluster", newCluster.Name) + response.Warnings = append(response.Warnings, msg) + } + } + } + return response, nil +} + +func (a *admitter) versionManagementEnabled(cluster *apisv3.Cluster) (bool, error) { + if cluster == nil { + return false, fmt.Errorf("cluster is nil") + } + val, ok := cluster.Annotations[VersionManagementAnno] + if !ok { + return false, fmt.Errorf("the %s annotation is missing from the cluster", VersionManagementAnno) + } + if val == "true" { + return true, nil + } + if val == "false" { + return false, nil + } + if val == "system-default" { + s, err := a.settingCache.Get(VersionManagementSetting) + if err != nil { + return false, err + } + actual := s.Value + if actual == "" { + actual = s.Default + } + if actual == "true" { + return true, nil + } + if actual == "false" { + return false, nil + } + return false, fmt.Errorf("the value (%s) of the %s setting is invalid", actual, VersionManagementSetting) + } + return false, fmt.Errorf("the value of the %s annotation is invalid", VersionManagementAnno) +} diff --git a/pkg/resources/management.cattle.io/v3/cluster/validator_test.go b/pkg/resources/management.cattle.io/v3/cluster/validator_test.go index dbaf2658b..7e777b8cd 100644 --- a/pkg/resources/management.cattle.io/v3/cluster/validator_test.go +++ b/pkg/resources/management.cattle.io/v3/cluster/validator_test.go @@ -52,13 +52,24 @@ func TestAdmit(t *testing.T) { return nil, apierrors.NewNotFound(schema.GroupResource{}, name) }).AnyTimes() + settingCache := fake.NewMockNonNamespacedCacheInterface[*v3.Setting](ctrl) + settingCache.EXPECT().Get(gomock.Any()).DoAndReturn(func(name string) (*v3.Setting, error) { + if name == VersionManagementSetting { + return &v3.Setting{ + Value: "true", + }, nil + } + return nil, apierrors.NewNotFound(schema.GroupResource{}, name) + }).AnyTimes() + tests := []struct { - name string - oldCluster v3.Cluster - newCluster v3.Cluster - operation admissionv1.Operation - expectAllowed bool - expectedReason metav1.StatusReason + name string + oldCluster v3.Cluster + newCluster v3.Cluster + operation admissionv1.Operation + expectAllowed bool + expectedReason metav1.StatusReason + expectContainWarning bool }{ { name: "Create", @@ -309,14 +320,167 @@ func TestAdmit(t *testing.T) { expectAllowed: true, expectedReason: metav1.StatusReasonBadRequest, }, + // Test cases for the version management feature + { + name: "cluster version management - imported RKE2 cluster,valid annotation, create", + operation: admissionv1.Create, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "false", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverRke2, + }, + }, + expectAllowed: true, + }, + + { + name: "cluster version management - imported RKE2 cluster,no annotation, create", + operation: admissionv1.Create, + newCluster: v3.Cluster{ + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverRke2, + }, + }, + expectAllowed: false, + expectedReason: metav1.StatusReasonBadRequest, + }, + { + name: "cluster version management - imported K3s cluster,valid annotation, create", + operation: admissionv1.Create, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "true", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + expectAllowed: true, + }, + { + name: "cluster version management - imported K3s cluster,valid annotation, update", + operation: admissionv1.Update, + oldCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "false", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "system-default", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + expectAllowed: true, + }, + { + name: "cluster version management - imported K3s cluster,drop annotation, update", + operation: admissionv1.Update, + oldCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "system-default", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + newCluster: v3.Cluster{ + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + expectAllowed: false, + expectedReason: metav1.StatusReasonBadRequest, + }, + { + name: "cluster version management - imported RKE2 cluster,invalid annotation, update", + operation: admissionv1.Update, + oldCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "false", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverK3s, + }, + }, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "INVALID", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverRke2, + }, + }, + expectAllowed: false, + expectedReason: metav1.StatusReasonBadRequest, + }, + { + name: "cluster version management - invalid cluster type, valid annotation, create", + operation: admissionv1.Create, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "false", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverAKS, + }, + }, + expectAllowed: true, + expectContainWarning: true, + }, + { + name: "cluster version management - invalid cluster type, invalid annotation, update", + operation: admissionv1.Create, + oldCluster: v3.Cluster{ + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverAKS, + }, + }, + newCluster: v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "INVALID", + }, + }, + Status: v3.ClusterStatus{ + Driver: v3.ClusterDriverAKS, + }, + }, + expectAllowed: true, + expectContainWarning: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := &Validator{ admitter: admitter{ - sar: &mockReviewer{}, - userCache: userCache, + sar: &mockReviewer{}, + userCache: userCache, + settingCache: settingCache, }, } @@ -347,6 +511,105 @@ func TestAdmit(t *testing.T) { assert.Equal(t, tt.expectedReason, res.Result.Reason) } } + if tt.expectContainWarning { + assert.NotEmpty(t, res.Warnings) + } + }) + } +} + +func Test_versionManagementEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + settingCache := fake.NewMockNonNamespacedCacheInterface[*v3.Setting](ctrl) + settingCache.EXPECT().Get(gomock.Any()).DoAndReturn(func(name string) (*v3.Setting, error) { + if name == VersionManagementSetting { + return &v3.Setting{ + Value: "true", + }, nil + } + return nil, apierrors.NewNotFound(schema.GroupResource{}, name) + }).AnyTimes() + + tests := []struct { + name string + cluster *v3.Cluster + expectError bool + expectResult bool + }{ + { + name: "nil cluster", + cluster: nil, + expectError: true, + expectResult: false, + }, + { + name: "no annotation", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + }, + expectError: true, + expectResult: false, + }, + { + name: "annotation value false", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "false", + }, + }, + }, + expectError: false, + expectResult: false, + }, + { + name: "annotation value true", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "true", + }, + }, + }, + expectError: false, + expectResult: true, + }, { + name: "annotation value system-default", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "system-default", + }, + }, + }, + expectError: false, + expectResult: true, + }, { + name: "annotation value invalid", + cluster: &v3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VersionManagementAnno: "INVALID", + }, + }, + }, + expectError: true, + expectResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := &admitter{ + settingCache: settingCache, + } + got, err := a.versionManagementEnabled(tt.cluster) + if tt.expectError { + assert.Error(t, err) + } + assert.Equal(t, tt.expectResult, got) }) } } diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index a6f2299ae..da9143b85 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -35,14 +35,17 @@ import ( // Validation returns a list of all ValidatingAdmissionHandlers used by the webhook. func Validation(clients *clients.Clients) ([]admission.ValidatingAdmissionHandler, error) { var userCache v3.UserCache + var settingCache v3.SettingCache if clients.MultiClusterManagement { userCache = clients.Management.User().Cache() + settingCache = clients.Management.Setting().Cache() } clusters := managementCluster.NewValidator( clients.K8s.AuthorizationV1().SubjectAccessReviews(), clients.Management.PodSecurityAdmissionConfigurationTemplate().Cache(), userCache, + settingCache, ) handlers := []admission.ValidatingAdmissionHandler{