diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 916695d5..c255fb97 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -9,6 +9,9 @@ on: artifacts: required: false type: string + ginkgoLabelFilter: + required: false + type: string env: GO_REQUIRED_MIN_VERSION: '' @@ -33,7 +36,8 @@ jobs: - name: Test E2E run: | - cd ${{ inputs.repo }} && make test-e2e + cd ${{ inputs.repo }} + LABEL_FILTER=${{ inputs.ginkgoLabelFilter }} make test-e2e - name: Upload Artifacts if: | diff --git a/.github/workflows/planner.yml b/.github/workflows/planner.yml index abebcbdd..64907569 100644 --- a/.github/workflows/planner.yml +++ b/.github/workflows/planner.yml @@ -3,7 +3,7 @@ name: Planner on: pull_request: pull_request_target: - types: [unlabeled] + types: [opened, labeled, unlabeled, reopened, synchronize, ready_for_review] workflow_dispatch: concurrency: @@ -92,6 +92,50 @@ jobs: echo "Matrix: $matrixJson" echo "Artifacts: $artifacts_json" + + extract-label-filter: + name: extract-label-filter + needs: generate-matrix + runs-on: ubuntu-latest + outputs: + ginkgoLabelFilter: ${{ steps.extract-label-filter.outputs.ginkgoLabelFilter }} + steps: + - name: Extract label filter + id: extract-label-filter + run: | + set -e + # Find labels that start with 'ginkgo-filter:' + LABELS="${{ join(github.event.pull_request.labels.*.name, ',') }}" + + # Array to collect all filters + FILTERS=() + + # Extract all ginkgo filters + for label in $(echo $LABELS | tr ',' '\n'); do + if [[ $label == ginkgo-filter:* ]]; then + # Extract the filter part after the prefix + FILTER="${label#ginkgo-filter:}" + echo "Found Ginkgo filter in label: $FILTER" + FILTERS+=("$FILTER") + fi + done + + # If we have filters, combine them with OR operator + if [ ${#FILTERS[@]} -gt 0 ]; then + COMBINED_FILTER="" + + # OR each filter + for i in "${!FILTERS[@]}"; do + if [ $i -eq 0 ]; then + COMBINED_FILTER="(${FILTERS[$i]})" + else + COMBINED_FILTER="$COMBINED_FILTER||(${FILTERS[$i]})" + fi + done + + echo "Final ginkgo label filter: $COMBINED_FILTER" + echo "ginkgoLabelFilter=$COMBINED_FILTER" >> $GITHUB_OUTPUT + fi call-test: name: test @@ -108,7 +152,9 @@ jobs: call-e2e: name: e2e - needs: generate-matrix + needs: + - generate-matrix + - extract-label-filter if: | needs.generate-matrix.outputs.matrix != '' strategy: @@ -118,4 +164,5 @@ jobs: with: repo: ${{ matrix.repo }} artifacts: ${{ fromJson(needs.generate-matrix.outputs.artifacts)[matrix.repo] }} + ginkgoLabelFilter: ${{ needs.extract-label-filter.outputs.ginkgoLabelFilter }} secrets: inherit diff --git a/fleetconfig-controller/.dockerignore b/fleetconfig-controller/.dockerignore index a3aab7af..cf1d7224 100644 --- a/fleetconfig-controller/.dockerignore +++ b/fleetconfig-controller/.dockerignore @@ -1,3 +1,4 @@ # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file # Ignore build and test binaries. bin/ +tmp/ \ No newline at end of file diff --git a/fleetconfig-controller/Makefile b/fleetconfig-controller/Makefile index 25014390..61954454 100644 --- a/fleetconfig-controller/Makefile +++ b/fleetconfig-controller/Makefile @@ -117,6 +117,7 @@ test-unit: manifests generate fmt vet envtest ## Run unit tests. -coverprofile=$(COVER_DIR)/unit/cover.out \ $(shell go list ./... | grep -v '/test/e2e') +LABEL_FILTER ?= v1beta1 .PHONY: test-e2e test-e2e: kind kubectl ginkgo support-bundle ## Run e2e tests in the top-level test directory. @mkdir -p $(COVER_DIR)/e2e @@ -125,7 +126,7 @@ test-e2e: kind kubectl ginkgo support-bundle ## Run e2e tests in the top-level t $(GINKGO) run -vv \ --cover \ --coverpkg=./... \ - --label-filter="fleetconfig" \ + --label-filter="$(if $(LABEL_FILTER),$(LABEL_FILTER),v1beta1)" \ --output-dir=$(COVER_DIR)/e2e \ --timeout 20m \ ./test/e2e/ diff --git a/fleetconfig-controller/PROJECT b/fleetconfig-controller/PROJECT index e93ba4a2..72d8eeee 100644 --- a/fleetconfig-controller/PROJECT +++ b/fleetconfig-controller/PROJECT @@ -26,7 +26,7 @@ resources: path: github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1 version: v1beta1 webhooks: - defaulting: true + defaulting: false validation: true webhookVersion: v1 - api: @@ -36,7 +36,7 @@ resources: path: github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1 version: v1beta1 webhooks: - defaulting: true + defaulting: false validation: true webhookVersion: v1 version: "3" diff --git a/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go b/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go index 82ff3d9b..81455b8f 100644 --- a/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go +++ b/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go @@ -23,10 +23,11 @@ import ( "sort" "time" - "open-cluster-management.io/ocm/pkg/operator/helpers/chart" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "open-cluster-management.io/ocm/pkg/operator/helpers/chart" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" ) // FleetConfigSpec defines the desired state of FleetConfig. @@ -675,6 +676,33 @@ func (r *ResourceValues) String() string { return "" } +// GetRequests returns the resource requests. +func (r ResourceSpec) GetRequests() args.ResourceValues { + if r.Requests == nil { + return &ResourceValues{} + } + return r.Requests +} + +// GetLimits returns the resource limits. +func (r ResourceSpec) GetLimits() args.ResourceValues { + if r.Limits == nil { + return &ResourceValues{} + } + return r.Limits +} + +// GetQosClass returns the QoS class. +func (r ResourceSpec) GetQosClass() string { + return r.QosClass +} + +// Ensure ResourceSpec implements args.ResourceSpec interface +var _ args.ResourceSpec = (*ResourceSpec)(nil) + +// Ensure ResourceValues implements args.ResourceValues interface +var _ args.ResourceValues = (*ResourceValues)(nil) + // RegistrationAuth provides specifications for registration authentication. type RegistrationAuth struct { // The registration authentication driver to use. diff --git a/fleetconfig-controller/api/v1beta1/common.go b/fleetconfig-controller/api/v1beta1/common.go new file mode 100644 index 00000000..82487baa --- /dev/null +++ b/fleetconfig-controller/api/v1beta1/common.go @@ -0,0 +1,157 @@ +package v1beta1 + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" +) + +// Kubeconfig is the configuration for a kubeconfig. +type Kubeconfig struct { + // A reference to an existing secret containing a kubeconfig. + // Must be provided for remote clusters. + // For same-cluster, must be provided unless InCluster is set to true. + // +optional + SecretReference *SecretReference `json:"secretReference,omitempty"` + + // If set, the kubeconfig will be read from the cluster. + // Only applicable for same-cluster operations. + // Defaults to false. + // +optional + InCluster bool `json:"inCluster,omitempty"` + + // The context to use in the kubeconfig file. + // +optional + Context string `json:"context,omitempty"` +} + +// SecretReference describes how to retrieve a kubeconfig stored as a secret in the same namespace as the resource. +type SecretReference struct { + // The name of the secret. + // +required + Name string `json:"name"` + + // The map key to access the kubeconfig. Defaults to 'kubeconfig'. + // +kubebuilder:default:="kubeconfig" + // +optional + KubeconfigKey string `json:"kubeconfigKey,omitempty"` +} + +// ResourceSpec defines resource limits and requests for all managed clusters. +type ResourceSpec struct { + // The resource limits of all the containers managed by the Cluster Manager or Klusterlet operators. + // +optional + Limits *ResourceValues `json:"limits,omitempty"` + + // The resource requests of all the containers managed by the Cluster Manager or Klusterlet operators. + // +optional + Requests *ResourceValues `json:"requests,omitempty"` + + // The resource QoS class of all the containers managed by the Cluster Manager or Klusterlet operators. + // One of Default, BestEffort or ResourceRequirement. + // +kubebuilder:validation:Enum=Default;BestEffort;ResourceRequirement + // +kubebuilder:default:="Default" + // +optional + QosClass string `json:"qosClass,omitempty"` +} + +// ResourceValues detail container resource constraints. +type ResourceValues struct { + // The number of CPU units to request, e.g., '800m'. + // +optional + CPU string `json:"cpu,omitempty"` + + // The amount of memory to request, e.g., '8Gi'. + // +optional + Memory string `json:"memory,omitempty"` +} + +// String returns a string representation of the resource values. +func (r *ResourceValues) String() string { + if r.CPU != "" && r.Memory != "" { + return fmt.Sprintf("cpu=%s,memory=%s", r.CPU, r.Memory) + } else if r.CPU != "" { + return fmt.Sprintf("cpu=%s", r.CPU) + } else if r.Memory != "" { + return fmt.Sprintf("memory=%s", r.Memory) + } + return "" +} + +// GetRequests returns the resource requests. +func (r ResourceSpec) GetRequests() args.ResourceValues { + if r.Requests == nil { + return &ResourceValues{} + } + return r.Requests +} + +// GetLimits returns the resource limits. +func (r ResourceSpec) GetLimits() args.ResourceValues { + if r.Limits == nil { + return &ResourceValues{} + } + return r.Limits +} + +// GetQosClass returns the QoS class. +func (r ResourceSpec) GetQosClass() string { + return r.QosClass +} + +// Ensure ResourceSpec implements args.ResourceSpec interface +var _ args.ResourceSpec = (*ResourceSpec)(nil) + +// Ensure ResourceValues implements args.ResourceValues interface +var _ args.ResourceValues = (*ResourceValues)(nil) + +// NewCondition returns a new v1beta1.Condition. +func NewCondition(msg, cType string, status, wantStatus metav1.ConditionStatus) Condition { + return Condition{ + Condition: metav1.Condition{ + Status: status, + Message: msg, + Reason: ReconcileSuccess, + Type: cType, + LastTransitionTime: metav1.Time{Time: time.Now()}, + }, + WantStatus: wantStatus, + } +} + +// Condition describes the state of a FleetConfig. +type Condition struct { + metav1.Condition `json:",inline"` + WantStatus metav1.ConditionStatus `json:"wantStatus"` +} + +// Equal returns true if the condition is identical to the supplied condition, ignoring the LastTransitionTime. +func (c Condition) Equal(other Condition) bool { + return c.Type == other.Type && c.Status == other.Status && c.WantStatus == other.WantStatus && + c.Reason == other.Reason && c.Message == other.Message +} + +// RegistrationAuth provides specifications for registration authentication. +type RegistrationAuth struct { + // The registration authentication driver to use. + // Options are: + // - csr: Use the default CSR-based registration authentication. + // - awsirsa: Use AWS IAM Role for Service Accounts (IRSA) registration authentication. + // The set of valid options is open for extension. + // +kubebuilder:validation:Enum=csr;awsirsa + // +kubebuilder:default:="csr" + // +optional + Driver string `json:"driver,omitempty"` + + // The Hub cluster ARN for awsirsa registration authentication. Required when Type is awsirsa, otherwise ignored. + // +optional + HubClusterARN string `json:"hubClusterARN,omitempty"` + + // List of AWS EKS ARN patterns so any EKS clusters with these patterns will be auto accepted to join with hub cluster. + // Example pattern: "arn:aws:eks:us-west-2:123456789013:cluster/.*" + // +optional + AutoApprovedARNPatterns []string `json:"autoApprovedARNPatterns,omitempty"` +} diff --git a/fleetconfig-controller/api/v1beta1/constants.go b/fleetconfig-controller/api/v1beta1/constants.go new file mode 100644 index 00000000..ac7079b3 --- /dev/null +++ b/fleetconfig-controller/api/v1beta1/constants.go @@ -0,0 +1,114 @@ +package v1beta1 + +import "k8s.io/apimachinery/pkg/labels" + +const ( + // HubCleanupFinalizer is the finalizer for Hub cleanup. + HubCleanupFinalizer = "fleetconfig.open-cluster-management.io/hub-cleanup" + + // SpokeCleanupFinalizer is the finalizer for Spoke cleanup. + SpokeCleanupFinalizer = "fleetconfig.open-cluster-management.io/spoke-cleanup" +) + +// Hub and Spoke condition types +const ( + // HubInitialized means that the Hub has been initialized. + HubInitialized = "HubInitialized" + + // AddonsConfigured means that all addons have been configured on the Hub, or enabled/disabled on a Spoke. + AddonsConfigured = "AddonsConfigured" + + // CleanupFailed means that a failure occurred during cleanup. + CleanupFailed = "CleanupFailed" + + // SpokeJoined means that the spoke has successfully joined the Hub. + SpokeJoined = "SpokeJoined" +) + +// Hub and Spoke condition reasons +const ( + ReconcileSuccess = "ReconcileSuccess" +) + +// Hub and Spoke phases +const ( + // HubStarting means that the Hub is being initialized. + HubStarting = "Initializing" + + // HubRunning means that the Hub is initialized successfully. + HubRunning = "Running" + + // SpokeJoining means that the Spoke is being joined to the Hub. + SpokeJoining = "Joining" + + // SpokeRunning means that the Spoke has successfully joined the Hub. + SpokeRunning = "Running" + + // Unhealthy means that a failure occurred during Hub initialization and/or Spoke join attempt. + Unhealthy = "Unhealthy" + + // Deleting means that the Hub or Spoke is being deleted. + Deleting = "Deleting" +) + +// ManagedClusterType is the type of a managed cluster. +type ManagedClusterType string + +const ( + // ManagedClusterTypeHub is the type of managed cluster that is a hub. + ManagedClusterTypeHub = "hub" + + // ManagedClusterTypeSpoke is the type of managed cluster that is a spoke. + ManagedClusterTypeSpoke = "spoke" + + // ManagedClusterTypeHubAsSpoke is the type of managed cluster that is both a hub and a spoke. + ManagedClusterTypeHubAsSpoke = "hub-as-spoke" +) + +// FleetConfig labels +const ( + // LabelManagedClusterType is the label key for the managed cluster type. + LabelManagedClusterType = "fleetconfig.open-cluster-management.io/managedClusterType" + + // LabelAddOnManagedBy is the label key for the lifecycle manager of an add-on resource. + LabelAddOnManagedBy = "addon.open-cluster-management.io/managedBy" +) + +// Registration driver types +const ( + // CSRRegistrationDriver is the default CSR-based registration driver. + CSRRegistrationDriver = "csr" + + // AWSIRSARegistrationDriver is the AWS IAM Role for Service Accounts (IRSA) registration driver. + AWSIRSARegistrationDriver = "awsirsa" +) + +// Addon ConfigMap constants +const ( + // AddonConfigMapNamePrefix is the common name prefix for all configmaps containing addon configurations. + AddonConfigMapNamePrefix = "fleet-addon" + + // AddonConfigMapManifestRawKey is the data key containing raw manifests. + AddonConfigMapManifestRawKey = "manifestsRaw" + + // AddonConfigMapManifestRawKey is the data key containing a URL to download manifests. + AddonConfigMapManifestURLKey = "manifestsURL" +) + +// Reconcile parameters +const ( + // SpokeDefaultMaxConcurrentReconciles is the default maximum number of Spoke resources that may be reconciled in parallel. + SpokeDefaultMaxConcurrentReconciles = 5 +) + +// AllowedAddonURLSchemes are the URL schemes which can be used to provide manifests for configuring addons. +var AllowedAddonURLSchemes = []string{"http", "https"} + +var ( + // ManagedByLabels are labeles applies to resources to denote that fleetconfig-controller is managing the lifecycle. + ManagedByLabels = map[string]string{ + LabelAddOnManagedBy: "fleetconfig-controller", + } + // ManagedBySelector is a label selector for filtering add-on resources managed fleetconfig-controller. + ManagedBySelector = labels.SelectorFromSet(labels.Set(ManagedByLabels)) +) diff --git a/fleetconfig-controller/api/v1beta1/groupversion_info.go b/fleetconfig-controller/api/v1beta1/groupversion_info.go index d2d94a76..d036bf72 100644 --- a/fleetconfig-controller/api/v1beta1/groupversion_info.go +++ b/fleetconfig-controller/api/v1beta1/groupversion_info.go @@ -24,9 +24,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/scheme" ) +const group = "fleetconfig.open-cluster-management.io" + var ( + + // HubGroupKind is the group kind for the Hub API + HubGroupKind = schema.GroupKind{Group: group, Kind: "Hub"} + + // SpokeGroupKind is the group kind for the Spoke API + SpokeGroupKind = schema.GroupKind{Group: group, Kind: "Spoke"} + // GroupVersion is group version used to register these objects. - GroupVersion = schema.GroupVersion{Group: "fleetconfig.open-cluster-management.io", Version: "v1beta1"} + GroupVersion = schema.GroupVersion{Group: group, Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/fleetconfig-controller/api/v1beta1/hub_types.go b/fleetconfig-controller/api/v1beta1/hub_types.go index 7705aa07..7e107a57 100644 --- a/fleetconfig-controller/api/v1beta1/hub_types.go +++ b/fleetconfig-controller/api/v1beta1/hub_types.go @@ -17,33 +17,227 @@ limitations under the License. package v1beta1 import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // HubSpec defines the desired state of Hub type HubSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - // The following markers will use OpenAPI v3 schema to validate the value - // More info: https://book.kubebuilder.io/reference/markers/crd-validation.html + // APIServer is the API server URL for the Hub cluster. If provided, spokes clusters will + // join the hub using this API server instead of the one in the bootstrap kubeconfig. + // Spoke clusters with ForceInternalEndpointLookup set to true will ignore this field. + // +optional + APIServer string `json:"apiServer,omitempty"` + + // Hub cluster CA certificate, optional + // +optional + Ca string `json:"ca,omitempty"` + + // ClusterManager configuration. + // +optional + ClusterManager *ClusterManager `json:"clusterManager,omitempty"` + + // If true, create open-cluster-management namespace, otherwise use existing one. + // +kubebuilder:default:=true + // +optional + CreateNamespace bool `json:"createNamespace,omitempty"` + + // If set, the hub will be reinitialized. + // +optional + Force bool `json:"force,omitempty"` + + // Kubeconfig details for the Hub cluster. + // +required + Kubeconfig Kubeconfig `json:"kubeconfig"` + + // Singleton control plane configuration. If provided, deploy a singleton control plane instead of clustermanager. + // This is an alpha stage flag. + // +optional + SingletonControlPlane *SingletonControlPlane `json:"singleton,omitempty"` + + // +kubebuilder:default:={} + // +optional + RegistrationAuth RegistrationAuth `json:"registrationAuth,omitzero"` + + // +optional + AddOnConfigs []AddOnConfig `json:"addOnConfigs,omitempty"` + + // +optional + HubAddOns []HubAddOn `json:"hubAddOns,omitempty"` + + // Timeout is the timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. + // +kubebuilder:default:=300 + // +optional + Timeout int `json:"timeout,omitempty"` - // foo is an example field of Hub. Edit hub_types.go to remove/update + // LogVerbosity is the verbosity of the logs. + // +kubebuilder:validation:Enum=0;1;2;3;4;5;6;7;8;9;10 + // +kubebuilder:default:=0 // +optional - Foo *string `json:"foo,omitempty"` + LogVerbosity int `json:"logVerbosity,omitempty"` +} + +// SingletonControlPlane is the configuration for a singleton control plane +type SingletonControlPlane struct { + // The name of the singleton control plane. + // +kubebuilder:default:="singleton-controlplane" + // +optional + Name string `json:"name,omitempty"` + + // Helm configuration for the multicluster-controlplane Helm chart. + // For now https://open-cluster-management.io/helm-charts/ocm/multicluster-controlplane is always used - no private registry support. + // See: https://github.com/open-cluster-management-io/multicluster-controlplane/blob/main/charts/multicluster-controlplane/values.yaml + // +optional + Helm *Helm `json:"helm,omitempty"` +} + +// Helm is the configuration for helm. +type Helm struct { + // Raw, YAML-formatted Helm values. + // +optional + Values string `json:"values,omitempty"` + + // Comma-separated Helm values, e.g., key1=val1,key2=val2. + // +optional + Set []string `json:"set,omitempty"` + + // Comma-separated Helm JSON values, e.g., key1=jsonval1,key2=jsonval2. + // +optional + SetJSON []string `json:"setJson,omitempty"` + + // Comma-separated Helm literal STRING values. + // +optional + SetLiteral []string `json:"setLiteral,omitempty"` + + // Comma-separated Helm STRING values, e.g., key1=val1,key2=val2. + // +optional + SetString []string `json:"setString,omitempty"` +} + +// ClusterManager is the configuration for a cluster manager. +type ClusterManager struct { + // A set of comma-separated pairs of the form 'key1=value1,key2=value2' that describe feature gates for alpha/experimental features. + // Options are: + // - AddonManagement (ALPHA - default=true) + // - AllAlpha (ALPHA - default=false) + // - AllBeta (BETA - default=false) + // - CloudEventsDrivers (ALPHA - default=false) + // - DefaultClusterSet (ALPHA - default=false) + // - ManagedClusterAutoApproval (ALPHA - default=false) + // - ManifestWorkReplicaSet (ALPHA - default=false) + // - NilExecutorValidating (ALPHA - default=false) + // - ResourceCleanup (BETA - default=true) + // - V1beta1CSRAPICompatibility (ALPHA - default=false) + // +kubebuilder:default:="AddonManagement=true" + // +optional + FeatureGates string `json:"featureGates,omitempty"` + + // If set, the cluster manager operator will be purged and the open-cluster-management namespace deleted + // when the FleetConfig CR is deleted. + // +kubebuilder:default:=true + // +optional + PurgeOperator bool `json:"purgeOperator,omitempty"` + + // Resource specifications for all clustermanager-managed containers. + // +kubebuilder:default:={} + // +optional + Resources ResourceSpec `json:"resources,omitzero"` + + // Version and image registry details for the cluster manager. + // +kubebuilder:default:={} + // +optional + Source OCMSource `json:"source,omitzero"` + + // If set, the bootstrap token will used instead of a service account token. + // +optional + UseBootstrapToken bool `json:"useBootstrapToken,omitempty"` +} + +// AddOnConfig is the configuration of a custom AddOn that can be installed on a cluster. +type AddOnConfig struct { + // The name of the add-on. + // +required + Name string `json:"name"` + + // The add-on version. Optional, defaults to "v0.0.1" + // +kubebuilder:default:="v0.0.1" + // +optional + Version string `json:"version,omitempty"` + + // The rolebinding to the clusterrole in the cluster namespace for the addon agent + // +optional + ClusterRoleBinding string `json:"clusterRoleBinding,omitempty"` + + // Enable the agent to register to the hub cluster. Optional, defaults to false. + // +kubebuilder:default:=false + // +optional + HubRegistration bool `json:"hubRegistration,omitempty"` + + // Whether to overwrite the add-on if it already exists. Optional, defaults to false. + // +kubebuilder:default:=false + // +optional + Overwrite bool `json:"overwrite,omitempty"` +} + +// HubAddOn is the configuration for enabling a built-in AddOn. +type HubAddOn struct { + // Name is the name of the HubAddOn. + // +kubebuilder:validation:Enum=argocd;governance-policy-framework + // +required + Name string `json:"name"` + + // The namespace to install the add-on in. If left empty, installs into the "open-cluster-management-addon" namespace. + // +optional + InstallNamespace string `json:"installNamespace,omitempty"` + + // Whether or not the selected namespace should be created. If left empty, defaults to false. + // +kubebuilder:default:=false + // +optional + CreateNamespace bool `json:"createNamespace,omitempty"` +} + +// OCMSource is the configuration for an OCM source. +type OCMSource struct { + // The version of predefined compatible image versions (e.g. v0.6.0). Defaults to the latest released version. + // You can also set "latest" to install the latest development version. + // +kubebuilder:default:="default" + // +optional + BundleVersion string `json:"bundleVersion,omitempty"` + + // The name of the image registry serving OCM images, which will be used for all OCM components." + // +kubebuilder:default:="quay.io/open-cluster-management" + // +optional + Registry string `json:"registry,omitempty"` } // HubStatus defines the observed state of Hub. type HubStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Phase is the current phase of the Hub reconcile. + Phase string `json:"phase,omitempty"` + + // Conditions are the current conditions of the Hub. + Conditions []Condition `json:"conditions,omitempty"` + + InstalledHubAddOns []InstalledHubAddOn `json:"installedHubAddOns,omitempty"` +} + +// InstalledHubAddOn tracks metadata for each hubAddon that is successfully installed on the hub. +type InstalledHubAddOn struct { + // BundleVersion is the bundle version used when installing the addon. + BundleVersion string `json:"bundleVersion"` + + // Name is the name of the addon. + Name string `json:"name"` + + // Namespace is the namespace that the addon was installed into. + Namespace string `json:"namespace,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp" // Hub is the Schema for the hubs API type Hub struct { @@ -71,6 +265,60 @@ type HubList struct { Items []Hub `json:"items"` } +// BaseArgs returns the base arguments for all clusteradm commands. +func (h *Hub) BaseArgs() []string { + return []string{ + fmt.Sprintf("--timeout=%d", h.Spec.Timeout), + fmt.Sprintf("--v=%d", h.Spec.LogVerbosity), + } +} + +// GetCondition returns the condition with the supplied type, if it exists. +func (s *HubStatus) GetCondition(cType string) *Condition { + for _, c := range s.Conditions { + if c.Type == cType { + return &c + } + } + return nil +} + +// SetConditions sets the supplied conditions, adding net-new conditions and +// replacing any existing conditions of the same type. This is a no-op if all +// supplied conditions are identical (ignoring the last transition time) to +// those already set. If cover is false, existing conditions are not replaced. +func (s *HubStatus) SetConditions(cover bool, c ...Condition) { + for _, new := range c { + exists := false + for i, existing := range s.Conditions { + if existing.Type != new.Type { + continue + } + if existing.Equal(new) { + exists = true + continue + } + exists = true + if cover { + s.Conditions[i] = new + } + } + if !exists { + s.Conditions = append(s.Conditions, new) + } + } +} + +// GetCondition gets the condition with the supplied type, if it exists. +func (h *Hub) GetCondition(cType string) *Condition { + return h.Status.GetCondition(cType) +} + +// SetConditions sets the supplied conditions on a Hub, replacing any existing conditions. +func (h *Hub) SetConditions(cover bool, c ...Condition) { + h.Status.SetConditions(cover, c...) +} + func init() { SchemeBuilder.Register(&Hub{}, &HubList{}) } diff --git a/fleetconfig-controller/api/v1beta1/spoke_types.go b/fleetconfig-controller/api/v1beta1/spoke_types.go index 9365caa0..a7e880b2 100644 --- a/fleetconfig-controller/api/v1beta1/spoke_types.go +++ b/fleetconfig-controller/api/v1beta1/spoke_types.go @@ -17,33 +17,260 @@ limitations under the License. package v1beta1 import ( + "fmt" + "maps" + "reflect" + + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "open-cluster-management.io/ocm/pkg/operator/helpers/chart" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // SpokeSpec defines the desired state of Spoke type SpokeSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - // The following markers will use OpenAPI v3 schema to validate the value - // More info: https://book.kubebuilder.io/reference/markers/crd-validation.html + // If true, create open-cluster-management namespace and agent namespace (open-cluster-management-agent for Default mode, + // for Hosted mode), otherwise use existing one. + // +kubebuilder:default:=true + // +optional + CreateNamespace bool `json:"createNamespace,omitempty"` + + // HubRef is a reference to the Hub that this Spoke is managed by. + // +required + HubRef HubRef `json:"hubRef"` + + // If true, sync the labels from klusterlet to all agent resources. + // +optional + SyncLabels bool `json:"syncLabels,omitempty"` + + // Kubeconfig details for the Spoke cluster. + // +required + Kubeconfig Kubeconfig `json:"kubeconfig"` + + // Proxy CA certificate, optional + // +optional + ProxyCa string `json:"proxyCa,omitempty"` + + // URL of a forward proxy server used by agents to connect to the Hub cluster. + // +optional + ProxyURL string `json:"proxyUrl,omitempty"` + + // Klusterlet configuration. + // +kubebuilder:default:={} + // +optional + Klusterlet Klusterlet `json:"klusterlet,omitzero"` + + // ClusterARN is the ARN of the spoke cluster. + // This field is optionally used for AWS IRSA registration authentication. + // +optional + ClusterARN string `json:"clusterARN,omitempty"` + + // AddOns are the add-ons to enable for the spoke cluster. + // +optional + AddOns []AddOn `json:"addOns,omitempty"` + + // Timeout is the timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. + // If not set, defaults to the Hub's timeout. + // +kubebuilder:default:=300 + // +optional + Timeout int `json:"timeout,omitempty"` + + // LogVerbosity is the verbosity of the logs. + // If not set, defaults to the Hub's logVerbosity. + // +kubebuilder:validation:Enum=0;1;2;3;4;5;6;7;8;9;10 + // +kubebuilder:default:=0 + // +optional + LogVerbosity int `json:"logVerbosity,omitempty"` +} + +// HubRef is the information required to get a Hub resource. +type HubRef struct { + // Name is the name of the Hub that this Spoke is managed by. + // +required + Name string `json:"name"` + + // Namespace is namespace of the Hub that this Spoke is managed by. + // +required + Namespace string `json:"namespace"` +} + +// IsManagedBy checks whether or not the Spoke is managed by a particular Hub. +func (s *Spoke) IsManagedBy(om metav1.ObjectMeta) bool { + return s.Spec.HubRef.Name == om.Name && s.Spec.HubRef.Namespace == om.Namespace +} + +// Klusterlet is the configuration for a klusterlet. +type Klusterlet struct { + // Annotations to apply to the spoke cluster. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. + // Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on the spoke and subsequently to the ManagedCluster on the hub. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // A set of comma-separated pairs of the form 'key1=value1,key2=value2' that describe feature gates for alpha/experimental features. + // Options are: + // - AddonManagement (ALPHA - default=true) + // - AllAlpha (ALPHA - default=false) + // - AllBeta (BETA - default=false) + // - ClusterClaim (ALPHA - default=true) + // - ExecutorValidatingCaches (ALPHA - default=false) + // - RawFeedbackJsonString (ALPHA - default=false) + // - V1beta1CSRAPICompatibility (ALPHA - default=false) + // +kubebuilder:default:="AddonManagement=true,ClusterClaim=true" + // +optional + FeatureGates string `json:"featureGates,omitempty"` + + // Deployent mode for klusterlet + // +kubebuilder:validation:Enum=Default;Hosted + // +kubebuilder:default:="Default" + // +optional + Mode string `json:"mode,omitempty"` + + // If set, the klusterlet operator will be purged and all open-cluster-management namespaces deleted + // when the klusterlet is unjoined from its Hub cluster. + // +kubebuilder:default:=true + // +optional + PurgeOperator bool `json:"purgeOperator,omitempty"` + + // If true, the installed klusterlet agent will start the cluster registration process by looking for the + // internal endpoint from the public cluster-info in the Hub cluster instead of using hubApiServer. + // +optional + ForceInternalEndpointLookup bool `json:"forceInternalEndpointLookup,omitempty"` + + // External managed cluster kubeconfig, required if using hosted mode. + // +optional + ManagedClusterKubeconfig Kubeconfig `json:"managedClusterKubeconfig,omitzero"` + + // If true, the klusterlet accesses the managed cluster using the internal endpoint from the public + // cluster-info in the managed cluster instead of using managedClusterKubeconfig. + // +optional + ForceInternalEndpointLookupManaged bool `json:"forceInternalEndpointLookupManaged,omitempty"` - // foo is an example field of Spoke. Edit spoke_types.go to remove/update + // Resource specifications for all klusterlet-managed containers. + // +kubebuilder:default:={} // +optional - Foo *string `json:"foo,omitempty"` + Resources ResourceSpec `json:"resources,omitzero"` + + // If true, deploy klusterlet in singleton mode, with registration and work agents running in a single pod. + // This is an alpha stage flag. + // +optional + Singleton bool `json:"singleton,omitempty"` + + // ValuesFrom is an optional reference to a ConfigMap containing values for the klusterlet Helm chart. + // optional + ValuesFrom *ConfigMapRef `json:"valuesFrom,omitempty"` + + // Values for the klusterlet Helm chart. Values defined here override values which are defined in ValuesFrom. + // +optional + Values *KlusterletChartConfig `json:"values,omitempty"` +} + +// ConfigMapRef is a reference to data inside a ConfigMap, in the same namespace as the controller pod. +type ConfigMapRef struct { + // Name is the name of the ConfigMap + // +required + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + + // Key is the key under which the data is stored. + // +required + // +kubebuilder:validation:MinLength=1 + Key string `json:"key"` +} + +// KlusterletChartConfig is a wrapper around the external chart.KlusterletChartConfig +// to provide the required DeepCopy methods for code generation. +type KlusterletChartConfig struct { + chart.KlusterletChartConfig `json:",inline"` +} + +// DeepCopy returns a deep copy of the KlusterletChartConfig. +func (k *KlusterletChartConfig) DeepCopy() *KlusterletChartConfig { + if k == nil { + return nil + } + out := new(KlusterletChartConfig) + k.DeepCopyInto(out) + return out +} + +// DeepCopyInto copies all properties of this object into another object of the +// same type that is provided as a pointer. +func (k *KlusterletChartConfig) DeepCopyInto(out *KlusterletChartConfig) { + *out = *k + + out.KlusterletChartConfig = k.KlusterletChartConfig + + if k.NodeSelector != nil { + k, out := &k.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*k)) + maps.Copy(*out, *k) + } + if k.Tolerations != nil { + k, out := &k.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*k)) + for i := range *k { + (*k)[i].DeepCopyInto(&(*out)[i]) + } + } + + k.Affinity.DeepCopyInto(&out.Affinity) + k.Resources.DeepCopyInto(&out.Resources) + k.PodSecurityContext.DeepCopyInto(&out.PodSecurityContext) + k.SecurityContext.DeepCopyInto(&out.SecurityContext) + + out.Images = k.Images + out.Klusterlet = k.Klusterlet + + if k.MultiHubBootstrapHubKubeConfigs != nil { + k, out := &k.MultiHubBootstrapHubKubeConfigs, &out.MultiHubBootstrapHubKubeConfigs + *out = make([]chart.BootStrapKubeConfig, len(*k)) + copy(*out, *k) + } +} + +// IsEmpty checks if the KlusterletChartConfig is empty/default/zero-valued +func (k *KlusterletChartConfig) IsEmpty() bool { + return reflect.DeepEqual(*k, KlusterletChartConfig{}) +} + +// AddOn enables add-on installation on the cluster. +type AddOn struct { + // The name of the add-on being enabled. Must match one of the AddOnConfigs or HubAddOns names. + // +required + ConfigName string `json:"configName"` + + // The namespace to install the add-on in. If left empty, installs into the "open-cluster-management-addon" namespace. + // +optional + InstallNamespace string `json:"installNamespace,omitempty"` + + // Annotations to apply to the add-on. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` } // SpokeStatus defines the observed state of Spoke. type SpokeStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Phase is the current phase of the Spoke reconcile. + Phase string `json:"phase,omitempty"` + + // Conditions are the current conditions of the Spoke. + Conditions []Condition `json:"conditions,omitempty"` + + // EnabledAddons is the list of addons that are currently enabled on the Spoke. + // +kubebuilder:default:={} + // +optional + EnabledAddons []string `json:"enabledAddons,omitempty"` + + // KlusterletHash is a hash of the Spoke's .spec.klusterlet.values. + // +kubebuilder:default:="" + // +optional + KlusterletHash string `json:"klusterletHash,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:path=spokes,scope=Cluster +// +kubebuilder:resource:path=spokes +// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp" // Spoke is the Schema for the spokes API type Spoke struct { @@ -71,6 +298,60 @@ type SpokeList struct { Items []Spoke `json:"items"` } +// BaseArgs returns the base arguments for all clusteradm commands. +func (s *Spoke) BaseArgs() []string { + return []string{ + fmt.Sprintf("--timeout=%d", s.Spec.Timeout), + fmt.Sprintf("--v=%d", s.Spec.LogVerbosity), + } +} + +// GetCondition returns the condition with the supplied type, if it exists. +func (s *SpokeStatus) GetCondition(cType string) *Condition { + for _, c := range s.Conditions { + if c.Type == cType { + return &c + } + } + return nil +} + +// SetConditions sets the supplied conditions, adding net-new conditions and +// replacing any existing conditions of the same type. This is a no-op if all +// supplied conditions are identical (ignoring the last transition time) to +// those already set. If cover is false, existing conditions are not replaced. +func (s *SpokeStatus) SetConditions(cover bool, c ...Condition) { + for _, new := range c { + exists := false + for i, existing := range s.Conditions { + if existing.Type != new.Type { + continue + } + if existing.Equal(new) { + exists = true + continue + } + exists = true + if cover { + s.Conditions[i] = new + } + } + if !exists { + s.Conditions = append(s.Conditions, new) + } + } +} + +// GetCondition gets the condition with the supplied type, if it exists. +func (s *Spoke) GetCondition(cType string) *Condition { + return s.Status.GetCondition(cType) +} + +// SetConditions sets the supplied conditions on a Spoke, replacing any existing conditions. +func (s *Spoke) SetConditions(cover bool, c ...Condition) { + s.Status.SetConditions(cover, c...) +} + func init() { SchemeBuilder.Register(&Spoke{}, &SpokeList{}) } diff --git a/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go b/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go index 3f7c92e6..50eda6ef 100644 --- a/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go +++ b/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go @@ -24,13 +24,133 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOn) DeepCopyInto(out *AddOn) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOn. +func (in *AddOn) DeepCopy() *AddOn { + if in == nil { + return nil + } + out := new(AddOn) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnConfig) DeepCopyInto(out *AddOnConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnConfig. +func (in *AddOnConfig) DeepCopy() *AddOnConfig { + if in == nil { + return nil + } + out := new(AddOnConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterManager) DeepCopyInto(out *ClusterManager) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + out.Source = in.Source +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterManager. +func (in *ClusterManager) DeepCopy() *ClusterManager { + if in == nil { + return nil + } + out := new(ClusterManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.Condition.DeepCopyInto(&out.Condition) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapRef) DeepCopyInto(out *ConfigMapRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapRef. +func (in *ConfigMapRef) DeepCopy() *ConfigMapRef { + if in == nil { + return nil + } + out := new(ConfigMapRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Helm) DeepCopyInto(out *Helm) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SetJSON != nil { + in, out := &in.SetJSON, &out.SetJSON + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SetLiteral != nil { + in, out := &in.SetLiteral, &out.SetLiteral + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SetString != nil { + in, out := &in.SetString, &out.SetString + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Helm. +func (in *Helm) DeepCopy() *Helm { + if in == nil { + return nil + } + out := new(Helm) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Hub) DeepCopyInto(out *Hub) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hub. @@ -51,6 +171,21 @@ func (in *Hub) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubAddOn) DeepCopyInto(out *HubAddOn) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubAddOn. +func (in *HubAddOn) DeepCopy() *HubAddOn { + if in == nil { + return nil + } + out := new(HubAddOn) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HubList) DeepCopyInto(out *HubList) { *out = *in @@ -83,13 +218,45 @@ func (in *HubList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubRef) DeepCopyInto(out *HubRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubRef. +func (in *HubRef) DeepCopy() *HubRef { + if in == nil { + return nil + } + out := new(HubRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HubSpec) DeepCopyInto(out *HubSpec) { *out = *in - if in.Foo != nil { - in, out := &in.Foo, &out.Foo - *out = new(string) - **out = **in + if in.ClusterManager != nil { + in, out := &in.ClusterManager, &out.ClusterManager + *out = new(ClusterManager) + (*in).DeepCopyInto(*out) + } + in.Kubeconfig.DeepCopyInto(&out.Kubeconfig) + if in.SingletonControlPlane != nil { + in, out := &in.SingletonControlPlane, &out.SingletonControlPlane + *out = new(SingletonControlPlane) + (*in).DeepCopyInto(*out) + } + in.RegistrationAuth.DeepCopyInto(&out.RegistrationAuth) + if in.AddOnConfigs != nil { + in, out := &in.AddOnConfigs, &out.AddOnConfigs + *out = make([]AddOnConfig, len(*in)) + copy(*out, *in) + } + if in.HubAddOns != nil { + in, out := &in.HubAddOns, &out.HubAddOns + *out = make([]HubAddOn, len(*in)) + copy(*out, *in) } } @@ -106,6 +273,18 @@ func (in *HubSpec) DeepCopy() *HubSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HubStatus) DeepCopyInto(out *HubStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstalledHubAddOns != nil { + in, out := &in.InstalledHubAddOns, &out.InstalledHubAddOns + *out = make([]InstalledHubAddOn, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubStatus. @@ -118,13 +297,191 @@ func (in *HubStatus) DeepCopy() *HubStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstalledHubAddOn) DeepCopyInto(out *InstalledHubAddOn) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstalledHubAddOn. +func (in *InstalledHubAddOn) DeepCopy() *InstalledHubAddOn { + if in == nil { + return nil + } + out := new(InstalledHubAddOn) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Klusterlet) DeepCopyInto(out *Klusterlet) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.ManagedClusterKubeconfig.DeepCopyInto(&out.ManagedClusterKubeconfig) + in.Resources.DeepCopyInto(&out.Resources) + if in.ValuesFrom != nil { + in, out := &in.ValuesFrom, &out.ValuesFrom + *out = new(ConfigMapRef) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Klusterlet. +func (in *Klusterlet) DeepCopy() *Klusterlet { + if in == nil { + return nil + } + out := new(Klusterlet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubeconfig) DeepCopyInto(out *Kubeconfig) { + *out = *in + if in.SecretReference != nil { + in, out := &in.SecretReference, &out.SecretReference + *out = new(SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubeconfig. +func (in *Kubeconfig) DeepCopy() *Kubeconfig { + if in == nil { + return nil + } + out := new(Kubeconfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCMSource) DeepCopyInto(out *OCMSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCMSource. +func (in *OCMSource) DeepCopy() *OCMSource { + if in == nil { + return nil + } + out := new(OCMSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrationAuth) DeepCopyInto(out *RegistrationAuth) { + *out = *in + if in.AutoApprovedARNPatterns != nil { + in, out := &in.AutoApprovedARNPatterns, &out.AutoApprovedARNPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationAuth. +func (in *RegistrationAuth) DeepCopy() *RegistrationAuth { + if in == nil { + return nil + } + out := new(RegistrationAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(ResourceValues) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(ResourceValues) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceValues) DeepCopyInto(out *ResourceValues) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceValues. +func (in *ResourceValues) DeepCopy() *ResourceValues { + if in == nil { + return nil + } + out := new(ResourceValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingletonControlPlane) DeepCopyInto(out *SingletonControlPlane) { + *out = *in + if in.Helm != nil { + in, out := &in.Helm, &out.Helm + *out = new(Helm) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingletonControlPlane. +func (in *SingletonControlPlane) DeepCopy() *SingletonControlPlane { + if in == nil { + return nil + } + out := new(SingletonControlPlane) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Spoke) DeepCopyInto(out *Spoke) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spoke. @@ -180,10 +537,15 @@ func (in *SpokeList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpokeSpec) DeepCopyInto(out *SpokeSpec) { *out = *in - if in.Foo != nil { - in, out := &in.Foo, &out.Foo - *out = new(string) - **out = **in + out.HubRef = in.HubRef + in.Kubeconfig.DeepCopyInto(&out.Kubeconfig) + in.Klusterlet.DeepCopyInto(&out.Klusterlet) + if in.AddOns != nil { + in, out := &in.AddOns, &out.AddOns + *out = make([]AddOn, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } @@ -200,6 +562,18 @@ func (in *SpokeSpec) DeepCopy() *SpokeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpokeStatus) DeepCopyInto(out *SpokeStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnabledAddons != nil { + in, out := &in.EnabledAddons, &out.EnabledAddons + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeStatus. diff --git a/fleetconfig-controller/charts/fleetconfig-controller/README.md b/fleetconfig-controller/charts/fleetconfig-controller/README.md index ec9d8510..58993875 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/README.md +++ b/fleetconfig-controller/charts/fleetconfig-controller/README.md @@ -16,7 +16,7 @@ helm install fleetconfig-controller ocm/fleetconfig-controller -n fleetconfig-sy ### FleetConfig Configuration -Configuration for the FleetConfig resource created on the Hub. By default, bootstraps the Hub cluster in hub-as-spoke mode. +Configuration for the FleetConfig resources (Hub and Spoke) created on the Hub. By default, bootstraps the Hub cluster in hub-as-spoke mode. ### Spoke Feature Gates Uncomment and configure `fleetConfig.spokeFeatureGates` to enable feature gates for the Klusterlet on each Spoke. Do not disable the feature gates that are enabled by default. @@ -69,7 +69,7 @@ Resource specifications for all klusterlet-managed containers. | Name | Description | Value | | ----------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- | -| `fleetConfig.enabled` | Whether to create a FleetConfig resource. | `true` | +| `fleetConfig.enabled` | Whether to create a FleetConfig resources. | `true` | | `fleetConfig.timeout` | Timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. | `300` | | `fleetConfig.logVerbosity` | Log verbosity. Valid values: 0-10, 0 is the least verbose, 10 is the most verbose. | `0` | | `fleetConfig.spokeAnnotations` | Global annotations to apply to all spoke clusters. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on every spoke and subsequently to the ManagedClusters on the hub. Per-spoke annotations take precedence over the global annotations. | `{}` | @@ -80,6 +80,7 @@ Resource specifications for all klusterlet-managed containers. | `fleetConfig.registrationAuth.driver` | The authentication driver to use (default: "csr"). Set to "awsirsa" to use AWS IAM Roles for Service Accounts (IRSA) for EKS FleetConfigs. | `csr` | | `fleetConfig.registrationAuth.hubClusterARN` | The ARN of the hub cluster. This is only required if configuring an EKS FleetConfig. Example: "arn:aws:eks:us-west-2::cluster/". | `""` | | `fleetConfig.registrationAuth.autoApprovedARNPatterns` | Optional list of spoke cluster name ARN patterns that the hub will auto-approve. | `[]` | +| `fleetConfig.hub.name` | Name of the Hub resource which will manage the spoke cluster. | `hub` | | `fleetConfig.hub.addOnConfigs` | Global add-on configuration for the hub cluster. | `[]` | | `fleetConfig.hub.hubAddOns` | Built-in add-on configuration for the hub cluster. | `[]` | | `fleetConfig.hub.clusterManager.enabled` | Whether to enable the cluster manager. Set to false if using Singleton Control Plane. | `true` | @@ -109,6 +110,8 @@ Resource specifications for all klusterlet-managed containers. | `fleetConfig.hub.singletonControlPlane.helm.setLiteral` | List of comma-separated Helm literal STRING values. | `[]` | | `fleetConfig.hub.singletonControlPlane.helm.setString` | List of comma-separated Helm STRING values, e.g., key1=val1,key2=val2. | `[]` | | `fleetConfig.spokes[0].name` | Name of the spoke cluster. | `hub-as-spoke` | +| `fleetConfig.spokes[0].hubRef.name` | Name of the Hub resource which will manage the spoke cluster. | `hub` | +| `fleetConfig.spokes[0].hubRef.namespace` | Namespace of the Hub resource which will manage the spoke cluster. | `""` | | `fleetConfig.spokes[0].createNamespace` | If true, create open-cluster-management namespace and agent namespace (open-cluster-management-agent for Default mode, for Hosted mode), otherwise use existing one. Do not edit this name if you are using the default hub-as-spoke mode. | `true` | | `fleetConfig.spokes[0].syncLabels` | If true, sync the labels from klusterlet to all agent resources. | `false` | | `fleetConfig.spokes[0].clusterARN` | The ARN of the spoke cluster. This is only required if configuring an EKS FleetConfig. Example: "arn:aws:eks:us-west-2::cluster/". | `""` | @@ -148,6 +151,7 @@ Resource specifications for all klusterlet-managed containers. | Name | Description | Value | | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- | +| `spokeConcurrentReconciles` | Maximum number of Spoke resources that will be reconciled at the same time. | `5` | | `kubernetesProvider` | Kubernetes provider of the cluster that fleetconfig-controller will be installed on. Valid values are "Generic", "EKS", "GKE-Ingress". | `Generic` | | `replicas` | fleetconfig-controller replica count | `1` | | `imageRegistry` | Image registry | `""` | @@ -160,8 +164,8 @@ Resource specifications for all klusterlet-managed containers. | `containerSecurityContext.capabilities.drop` | capabilities to drop | `["ALL"]` | | `containerSecurityContext.runAsNonRoot` | runAsNonRoot | `true` | | `resources.limits.cpu` | fleetconfig controller's cpu limit | `500m` | -| `resources.limits.memory` | fleetconfig controller's memory limit | `256Mi` | -| `resources.requests.cpu` | fleetconfig controller's cpu request | `100m` | +| `resources.limits.memory` | fleetconfig controller's memory limit | `512Mi` | +| `resources.requests.cpu` | fleetconfig controller's cpu request | `200m` | | `resources.requests.memory` | fleetconfig controller's memory request | `256Mi` | | `healthCheck.port` | port the liveness & readiness probes are bound to | `9440` | | `kubernetesClusterDomain` | kubernetes cluster domain | `cluster.local` | diff --git a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_hubs.yaml b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_hubs.yaml index 2b9548a4..ec1b970c 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_hubs.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_hubs.yaml @@ -12,9 +12,16 @@ spec: listKind: HubList plural: hubs singular: hub - scope: Cluster + scope: Namespaced versions: - - name: v1beta1 + - additionalPrinterColumns: + - jsonPath: .status.phase + name: PHASE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 schema: openAPIV3Schema: description: Hub is the Schema for the hubs API @@ -39,13 +46,373 @@ spec: spec: description: spec defines the desired state of Hub properties: - foo: - description: foo is an example field of Hub. Edit hub_types.go to - remove/update + addOnConfigs: + items: + description: AddOnConfig is the configuration of a custom AddOn + that can be installed on a cluster. + properties: + clusterRoleBinding: + description: The rolebinding to the clusterrole in the cluster + namespace for the addon agent + type: string + hubRegistration: + default: false + description: Enable the agent to register to the hub cluster. + Optional, defaults to false. + type: boolean + name: + description: The name of the add-on. + type: string + overwrite: + default: false + description: Whether to overwrite the add-on if it already exists. + Optional, defaults to false. + type: boolean + version: + default: v0.0.1 + description: The add-on version. Optional, defaults to "v0.0.1" + type: string + required: + - name + type: object + type: array + apiServer: + description: |- + APIServer is the API server URL for the Hub cluster. If provided, spokes clusters will + join the hub using this API server instead of the one in the bootstrap kubeconfig. + Spoke clusters with ForceInternalEndpointLookup set to true will ignore this field. type: string + ca: + description: Hub cluster CA certificate, optional + type: string + clusterManager: + description: ClusterManager configuration. + properties: + featureGates: + default: AddonManagement=true + description: |- + A set of comma-separated pairs of the form 'key1=value1,key2=value2' that describe feature gates for alpha/experimental features. + Options are: + - AddonManagement (ALPHA - default=true) + - AllAlpha (ALPHA - default=false) + - AllBeta (BETA - default=false) + - CloudEventsDrivers (ALPHA - default=false) + - DefaultClusterSet (ALPHA - default=false) + - ManagedClusterAutoApproval (ALPHA - default=false) + - ManifestWorkReplicaSet (ALPHA - default=false) + - NilExecutorValidating (ALPHA - default=false) + - ResourceCleanup (BETA - default=true) + - V1beta1CSRAPICompatibility (ALPHA - default=false) + type: string + purgeOperator: + default: true + description: |- + If set, the cluster manager operator will be purged and the open-cluster-management namespace deleted + when the FleetConfig CR is deleted. + type: boolean + resources: + default: {} + description: Resource specifications for all clustermanager-managed + containers. + properties: + limits: + description: The resource limits of all the containers managed + by the Cluster Manager or Klusterlet operators. + properties: + cpu: + description: The number of CPU units to request, e.g., + '800m'. + type: string + memory: + description: The amount of memory to request, e.g., '8Gi'. + type: string + type: object + qosClass: + default: Default + description: |- + The resource QoS class of all the containers managed by the Cluster Manager or Klusterlet operators. + One of Default, BestEffort or ResourceRequirement. + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + requests: + description: The resource requests of all the containers managed + by the Cluster Manager or Klusterlet operators. + properties: + cpu: + description: The number of CPU units to request, e.g., + '800m'. + type: string + memory: + description: The amount of memory to request, e.g., '8Gi'. + type: string + type: object + type: object + source: + default: {} + description: Version and image registry details for the cluster + manager. + properties: + bundleVersion: + default: default + description: |- + The version of predefined compatible image versions (e.g. v0.6.0). Defaults to the latest released version. + You can also set "latest" to install the latest development version. + type: string + registry: + default: quay.io/open-cluster-management + description: The name of the image registry serving OCM images, + which will be used for all OCM components." + type: string + type: object + useBootstrapToken: + description: If set, the bootstrap token will used instead of + a service account token. + type: boolean + type: object + createNamespace: + default: true + description: If true, create open-cluster-management namespace, otherwise + use existing one. + type: boolean + force: + description: If set, the hub will be reinitialized. + type: boolean + hubAddOns: + items: + description: HubAddOn is the configuration for enabling a built-in + AddOn. + properties: + createNamespace: + default: false + description: Whether or not the selected namespace should be + created. If left empty, defaults to false. + type: boolean + installNamespace: + description: The namespace to install the add-on in. If left + empty, installs into the "open-cluster-management-addon" namespace. + type: string + name: + description: Name is the name of the HubAddOn. + enum: + - argocd + - governance-policy-framework + type: string + required: + - name + type: object + type: array + kubeconfig: + description: Kubeconfig details for the Hub cluster. + properties: + context: + description: The context to use in the kubeconfig file. + type: string + inCluster: + description: |- + If set, the kubeconfig will be read from the cluster. + Only applicable for same-cluster operations. + Defaults to false. + type: boolean + secretReference: + description: |- + A reference to an existing secret containing a kubeconfig. + Must be provided for remote clusters. + For same-cluster, must be provided unless InCluster is set to true. + properties: + kubeconfigKey: + default: kubeconfig + description: The map key to access the kubeconfig. Defaults + to 'kubeconfig'. + type: string + name: + description: The name of the secret. + type: string + required: + - name + type: object + type: object + logVerbosity: + default: 0 + description: LogVerbosity is the verbosity of the logs. + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + type: integer + registrationAuth: + default: {} + description: RegistrationAuth provides specifications for registration + authentication. + properties: + autoApprovedARNPatterns: + description: |- + List of AWS EKS ARN patterns so any EKS clusters with these patterns will be auto accepted to join with hub cluster. + Example pattern: "arn:aws:eks:us-west-2:123456789013:cluster/.*" + items: + type: string + type: array + driver: + default: csr + description: |- + The registration authentication driver to use. + Options are: + - csr: Use the default CSR-based registration authentication. + - awsirsa: Use AWS IAM Role for Service Accounts (IRSA) registration authentication. + The set of valid options is open for extension. + enum: + - csr + - awsirsa + type: string + hubClusterARN: + description: The Hub cluster ARN for awsirsa registration authentication. + Required when Type is awsirsa, otherwise ignored. + type: string + type: object + singleton: + description: |- + Singleton control plane configuration. If provided, deploy a singleton control plane instead of clustermanager. + This is an alpha stage flag. + properties: + helm: + description: |- + Helm configuration for the multicluster-controlplane Helm chart. + For now https://open-cluster-management.io/helm-charts/ocm/multicluster-controlplane is always used - no private registry support. + See: https://github.com/open-cluster-management-io/multicluster-controlplane/blob/main/charts/multicluster-controlplane/values.yaml + properties: + set: + description: Comma-separated Helm values, e.g., key1=val1,key2=val2. + items: + type: string + type: array + setJson: + description: Comma-separated Helm JSON values, e.g., key1=jsonval1,key2=jsonval2. + items: + type: string + type: array + setLiteral: + description: Comma-separated Helm literal STRING values. + items: + type: string + type: array + setString: + description: Comma-separated Helm STRING values, e.g., key1=val1,key2=val2. + items: + type: string + type: array + values: + description: Raw, YAML-formatted Helm values. + type: string + type: object + name: + default: singleton-controlplane + description: The name of the singleton control plane. + type: string + type: object + timeout: + default: 300 + description: Timeout is the timeout in seconds for all clusteradm + operations, including init, accept, join, upgrade, etc. + type: integer + required: + - kubeconfig type: object status: description: status defines the observed state of Hub + properties: + conditions: + description: Conditions are the current conditions of the Hub. + items: + description: Condition describes the state of a FleetConfig. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + wantStatus: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + - wantStatus + type: object + type: array + installedHubAddOns: + items: + description: InstalledHubAddOn tracks metadata for each hubAddon + that is successfully installed on the hub. + properties: + bundleVersion: + description: BundleVersion is the bundle version used when installing + the addon. + type: string + name: + description: Name is the name of the addon. + type: string + namespace: + description: Namespace is the namespace that the addon was installed + into. + type: string + required: + - bundleVersion + - name + type: object + type: array + phase: + description: Phase is the current phase of the Hub reconcile. + type: string type: object required: - spec diff --git a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml index 25b75c85..9bba4b41 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml @@ -12,9 +12,16 @@ spec: listKind: SpokeList plural: spokes singular: spoke - scope: Cluster + scope: Namespaced versions: - - name: v1beta1 + - additionalPrinterColumns: + - jsonPath: .status.phase + name: PHASE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 schema: openAPIV3Schema: description: Spoke is the Schema for the spokes API @@ -39,13 +46,2331 @@ spec: spec: description: spec defines the desired state of Spoke properties: - foo: - description: foo is an example field of Spoke. Edit spoke_types.go - to remove/update + addOns: + description: AddOns are the add-ons to enable for the spoke cluster. + items: + description: AddOn enables add-on installation on the cluster. + properties: + annotations: + additionalProperties: + type: string + description: Annotations to apply to the add-on. + type: object + configName: + description: The name of the add-on being enabled. Must match + one of the AddOnConfigs or HubAddOns names. + type: string + installNamespace: + description: The namespace to install the add-on in. If left + empty, installs into the "open-cluster-management-addon" namespace. + type: string + required: + - configName + type: object + type: array + clusterARN: + description: |- + ClusterARN is the ARN of the spoke cluster. + This field is optionally used for AWS IRSA registration authentication. type: string + createNamespace: + default: true + description: |- + If true, create open-cluster-management namespace and agent namespace (open-cluster-management-agent for Default mode, + for Hosted mode), otherwise use existing one. + type: boolean + hubRef: + description: HubRef is a reference to the Hub that this Spoke is managed + by. + properties: + name: + description: Name is the name of the Hub that this Spoke is managed + by. + type: string + namespace: + description: Namespace is namespace of the Hub that this Spoke + is managed by. + type: string + required: + - name + - namespace + type: object + klusterlet: + default: {} + description: Klusterlet configuration. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to apply to the spoke cluster. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. + Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on the spoke and subsequently to the ManagedCluster on the hub. + type: object + featureGates: + default: AddonManagement=true,ClusterClaim=true + description: |- + A set of comma-separated pairs of the form 'key1=value1,key2=value2' that describe feature gates for alpha/experimental features. + Options are: + - AddonManagement (ALPHA - default=true) + - AllAlpha (ALPHA - default=false) + - AllBeta (BETA - default=false) + - ClusterClaim (ALPHA - default=true) + - ExecutorValidatingCaches (ALPHA - default=false) + - RawFeedbackJsonString (ALPHA - default=false) + - V1beta1CSRAPICompatibility (ALPHA - default=false) + type: string + forceInternalEndpointLookup: + description: |- + If true, the installed klusterlet agent will start the cluster registration process by looking for the + internal endpoint from the public cluster-info in the Hub cluster instead of using hubApiServer. + type: boolean + forceInternalEndpointLookupManaged: + description: |- + If true, the klusterlet accesses the managed cluster using the internal endpoint from the public + cluster-info in the managed cluster instead of using managedClusterKubeconfig. + type: boolean + managedClusterKubeconfig: + description: External managed cluster kubeconfig, required if + using hosted mode. + properties: + context: + description: The context to use in the kubeconfig file. + type: string + inCluster: + description: |- + If set, the kubeconfig will be read from the cluster. + Only applicable for same-cluster operations. + Defaults to false. + type: boolean + secretReference: + description: |- + A reference to an existing secret containing a kubeconfig. + Must be provided for remote clusters. + For same-cluster, must be provided unless InCluster is set to true. + properties: + kubeconfigKey: + default: kubeconfig + description: The map key to access the kubeconfig. Defaults + to 'kubeconfig'. + type: string + name: + description: The name of the secret. + type: string + required: + - name + type: object + type: object + mode: + default: Default + description: Deployent mode for klusterlet + enum: + - Default + - Hosted + type: string + purgeOperator: + default: true + description: |- + If set, the klusterlet operator will be purged and all open-cluster-management namespaces deleted + when the klusterlet is unjoined from its Hub cluster. + type: boolean + resources: + default: {} + description: Resource specifications for all klusterlet-managed + containers. + properties: + limits: + description: The resource limits of all the containers managed + by the Cluster Manager or Klusterlet operators. + properties: + cpu: + description: The number of CPU units to request, e.g., + '800m'. + type: string + memory: + description: The amount of memory to request, e.g., '8Gi'. + type: string + type: object + qosClass: + default: Default + description: |- + The resource QoS class of all the containers managed by the Cluster Manager or Klusterlet operators. + One of Default, BestEffort or ResourceRequirement. + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + requests: + description: The resource requests of all the containers managed + by the Cluster Manager or Klusterlet operators. + properties: + cpu: + description: The number of CPU units to request, e.g., + '800m'. + type: string + memory: + description: The amount of memory to request, e.g., '8Gi'. + type: string + type: object + type: object + singleton: + description: |- + If true, deploy klusterlet in singleton mode, with registration and work agents running in a single pod. + This is an alpha stage flag. + type: boolean + values: + description: Values for the klusterlet Helm chart. Values defined + here override values which are defined in ValuesFrom. + properties: + affinity: + description: Affinity is the affinity of the operator deployment + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + bootstrapHubKubeConfig: + description: BootstrapHubKubeConfig should be the kubeConfig + file of the hub cluster via setting --set-file= optional + type: string + createNamespace: + description: CreateNamespace is used in the render function + to append the release ns in the objects. + type: boolean + enableSyncLabels: + description: EnableSyncLabels is to enable the feature which + can sync the labels from klusterlet to all agent resources. + type: boolean + externalManagedKubeConfig: + description: |- + ExternalManagedKubeConfig should be the kubeConfig file of the managed cluster via setting --set-file= + only need to set in the hosted mode. optional + type: string + images: + description: Images is the configurations for all images used + in operator deployment and klusterlet CR. + properties: + imageCredentials: + description: |- + The image pull secret name is open-cluster-management-image-pull-credentials. + Please set the userName and password if you use a private image registry. + properties: + createImageCredentials: + type: boolean + dockerConfigJson: + type: string + password: + type: string + userName: + type: string + type: object + imagePullPolicy: + description: ImagePullPolicy is the image pull policy + of operator image. Default is IfNotPresent. + type: string + overrides: + description: |- + Overrides is to override the image of the component, if this is specified, + the registry and tag will be ignored. + properties: + addOnManagerImage: + description: AddOnManagerImage is the image of the + addOnManager component + type: string + operatorImage: + description: OperatorImage is the image of the operator + component. + type: string + placementImage: + description: PlacementImage is the image of the placement + component + type: string + registrationImage: + description: RegistrationImage is the image of the + registration component. + type: string + workImage: + description: WorkImage is the image of the work component. + type: string + type: object + registry: + description: Registry is registry name must NOT contain + a trailing slash. + type: string + tag: + description: Tag is the operator image tag. + type: string + type: object + klusterlet: + description: Klusterlet is the configuration of klusterlet + CR + properties: + clusterName: + type: string + create: + description: Create determines if create the klusterlet + CR, default is true. + type: boolean + externalServerURLs: + description: |- + ExternalServerURLs represents a list of apiserver urls and ca bundles that is accessible externally + If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. + items: + description: ServerURL represents the apiserver url + and ca bundle that is accessible externally + properties: + caBundle: + description: |- + CABundle is the ca bundle to connect to apiserver of the managed cluster. + System certs are used if it is not set. + format: byte + type: string + url: + description: URL is the url of apiserver endpoint + of the managed cluster. + type: string + required: + - url + type: object + type: array + mode: + description: InstallMode represents the mode of deploy + klusterlet + type: string + name: + type: string + namespace: + type: string + nodePlacement: + description: NodePlacement enables explicit control over + the scheduling of the deployed pods. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines which Nodes the + Pods are scheduled on. The default is an empty list. + type: object + tolerations: + description: |- + Tolerations are attached by pods to tolerate any taint that matches + the triple using the matching operator . + The default is an empty list. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + registrationConfiguration: + description: RegistrationConfiguration contains the configuration + of registration + properties: + bootstrapKubeConfigs: + description: |- + BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap. + + When the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR + is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as "failed". + + A failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. + But if the user updates the content of a failed bootstrapkubeconfig, the "failed" mark will be cleared. + properties: + localSecretsConfig: + description: |- + LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. + The secrets must be in the same namespace where the agent controller runs. + properties: + hubConnectionTimeoutSeconds: + default: 600 + description: |- + HubConnectionTimeoutSeconds is used to set the timeout of connecting to the hub cluster. + When agent loses the connection to the hub over the timeout seconds, the agent do a rebootstrap. + By default is 10 mins. + format: int32 + minimum: 180 + type: integer + kubeConfigSecrets: + description: KubeConfigSecrets is a list of + secret names. The secrets are in the same + namespace where the agent controller runs. + items: + properties: + name: + description: Name is the name of the + secret. + type: string + required: + - name + type: object + type: array + required: + - kubeConfigSecrets + type: object + type: + default: None + description: |- + Type specifies the type of priority bootstrap kubeconfigs. + By default, it is set to None, representing no priority bootstrap kubeconfigs are set. + enum: + - None + - LocalSecrets + type: string + required: + - type + type: object + clientCertExpirationSeconds: + description: |- + clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default + duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on + the hub cluster, the max signing duration seconds will be set. + format: int32 + type: integer + clusterAnnotations: + additionalProperties: + type: string + description: |- + ClusterAnnotations is annotations with the reserve prefix "agent.open-cluster-management.io" set on + ManagedCluster when creating only, other actors can update it afterwards. + type: object + clusterClaimConfiguration: + description: |- + ClusterClaimConfiguration represents the configuration of ClusterClaim + Effective only when the `ClusterClaim` feature gate is enabled. + properties: + maxCustomClusterClaims: + default: 20 + description: Maximum number of custom ClusterClaims + allowed. + format: int32 + type: integer + reservedClusterClaimSuffixes: + description: Custom suffixes for reserved ClusterClaims. + items: + maxLength: 64 + minLength: 1 + type: string + maxItems: 10 + type: array + required: + - maxCustomClusterClaims + type: object + featureGates: + description: "FeatureGates represents the list of + feature gates for registration\nIf it is set empty, + default feature gates will be used.\nIf it is set, + featuregate/Foo is an example of one item in FeatureGates:\n + \ 1. If featuregate/Foo does not exist, registration-operator + will discard it\n 2. If featuregate/Foo exists + and is false by default. It is now possible to set + featuregate/Foo=[false|true]\n 3. If featuregate/Foo + exists and is true by default. If a cluster-admin + upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n + \the can set featuregate/Foo=false before upgrading. + Let's say the cluster-admin wants featuregate/Foo=false." + items: + properties: + feature: + description: Feature is the key of feature gate. + e.g. featuregate/Foo. + type: string + mode: + default: Disable + description: |- + Mode is either Enable, Disable, "" where "" is Disable by default. + In Enable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=true". + In Disable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=false". + enum: + - Enable + - Disable + type: string + required: + - feature + type: object + type: array + kubeAPIBurst: + default: 100 + description: |- + KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver on the spoke cluster. + If it is set empty, use the default value: 100 + format: int32 + type: integer + kubeAPIQPS: + default: 50 + description: |- + KubeAPIQPS indicates the maximum QPS while talking with apiserver on the spoke cluster. + If it is set empty, use the default value: 50 + format: int32 + type: integer + registrationDriver: + description: This provides driver details required + to register with hub + properties: + authType: + default: csr + description: Type of the authentication used by + managedcluster to register as well as pull work + from hub. Possible values are csr and awsirsa. + enum: + - csr + - awsirsa + type: string + awsIrsa: + description: |- + Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + This is required only when the authType is awsirsa. + properties: + hubClusterArn: + description: |- + The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + minLength: 1 + pattern: ^arn:aws:eks:([a-zA-Z0-9-]+):(\d{12}):cluster/([a-zA-Z0-9-]+)$ + type: string + managedClusterArn: + description: |- + The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + minLength: 1 + pattern: ^arn:aws:eks:([a-zA-Z0-9-]+):(\d{12}):cluster/([a-zA-Z0-9-]+)$ + type: string + required: + - hubClusterArn + - managedClusterArn + type: object + required: + - authType + type: object + type: object + resourceRequirement: + description: |- + ResourceRequirement specify QoS classes of deployments managed by clustermanager. + It applies to all the containers in the deployments. + properties: + resourceRequirements: + description: ResourceRequirements defines resource + requests and limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: + default: Default + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + type: object + workConfiguration: + description: WorkConfiguration contains the configuration + of work + properties: + appliedManifestWorkEvictionGracePeriod: + description: |- + AppliedManifestWorkEvictionGracePeriod is the eviction grace period the work agent will wait before + evicting the AppliedManifestWorks, whose corresponding ManifestWorks are missing on the hub cluster, from + the managed cluster. If not present, the default value of the work agent will be used. + pattern: ^([0-9]+(s|m|h))+$ + type: string + featureGates: + description: "FeatureGates represents the list of + feature gates for work\nIf it is set empty, default + feature gates will be used.\nIf it is set, featuregate/Foo + is an example of one item in FeatureGates:\n 1. + If featuregate/Foo does not exist, registration-operator + will discard it\n 2. If featuregate/Foo exists + and is false by default. It is now possible to set + featuregate/Foo=[false|true]\n 3. If featuregate/Foo + exists and is true by default. If a cluster-admin + upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n + \the can set featuregate/Foo=false before upgrading. + Let's say the cluster-admin wants featuregate/Foo=false." + items: + properties: + feature: + description: Feature is the key of feature gate. + e.g. featuregate/Foo. + type: string + mode: + default: Disable + description: |- + Mode is either Enable, Disable, "" where "" is Disable by default. + In Enable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=true". + In Disable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=false". + enum: + - Enable + - Disable + type: string + required: + - feature + type: object + type: array + hubKubeAPIBurst: + default: 100 + description: |- + HubKubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver on the hub cluster. + If it is set empty, use the default value: 100 + format: int32 + type: integer + hubKubeAPIQPS: + default: 50 + description: |- + HubKubeAPIQPS indicates the maximum QPS while talking with apiserver on the hub cluster. + If it is set empty, use the default value: 50 + format: int32 + type: integer + kubeAPIBurst: + default: 100 + description: |- + KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver on the spoke cluster. + If it is set empty, use the default value: 100 + format: int32 + type: integer + kubeAPIQPS: + default: 50 + description: |- + KubeAPIQPS indicates the maximum QPS while talking with apiserver on the spoke cluster. + If it is set empty, use the default value: 50 + format: int32 + type: integer + statusSyncInterval: + description: |- + StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks. + Larger value means less frequent status sync and less api calls to the managed cluster, vice versa. + The value(x) should be: 5s <= x <= 1h. + pattern: ^([0-9]+(s|m|h))+$ + type: string + type: object + type: object + multiHubBootstrapHubKubeConfigs: + description: when MultipleHubs feature gate in klusterlet.registrationConfiguration + is enabled, need to set multiple bootstrap hub kubeConfigs + here. + items: + properties: + kubeConfig: + description: the kubeConfig file of the hub cluster + type: string + name: + description: the boostStrap secret name + type: string + type: object + type: array + noOperator: + description: NoOperator is to only deploy the klusterlet CR + if set true. + type: boolean + nodeSelector: + additionalProperties: + type: string + description: NodeSelector is the nodeSelector of the operator + deployment + type: object + podSecurityContext: + description: PodSecurityContext is the pod SecurityContext + in the operator deployment + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + priorityClassName: + description: PriorityClassName is the name of the PriorityClass + that will be used by the deployed klusterlet agent and operator. + type: string + replicaCount: + description: ReplicaCount is the replicas for the klusterlet + operator deployment. + type: integer + resources: + description: Resources is the resource requirements of the + operator deployment + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: SecurityContext is the container SecurityContext + in operator deployment + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + tolerations: + description: Tolerations is the tolerations of the operator + deployment + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + valuesFrom: + description: |- + ValuesFrom is an optional reference to a ConfigMap containing values for the klusterlet Helm chart. + optional + properties: + key: + description: Key is the key under which the data is stored. + minLength: 1 + type: string + name: + description: Name is the name of the ConfigMap + minLength: 1 + type: string + required: + - key + - name + type: object + type: object + kubeconfig: + description: Kubeconfig details for the Spoke cluster. + properties: + context: + description: The context to use in the kubeconfig file. + type: string + inCluster: + description: |- + If set, the kubeconfig will be read from the cluster. + Only applicable for same-cluster operations. + Defaults to false. + type: boolean + secretReference: + description: |- + A reference to an existing secret containing a kubeconfig. + Must be provided for remote clusters. + For same-cluster, must be provided unless InCluster is set to true. + properties: + kubeconfigKey: + default: kubeconfig + description: The map key to access the kubeconfig. Defaults + to 'kubeconfig'. + type: string + name: + description: The name of the secret. + type: string + required: + - name + type: object + type: object + logVerbosity: + default: 0 + description: |- + LogVerbosity is the verbosity of the logs. + If not set, defaults to the Hub's logVerbosity. + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + type: integer + proxyCa: + description: Proxy CA certificate, optional + type: string + proxyUrl: + description: URL of a forward proxy server used by agents to connect + to the Hub cluster. + type: string + syncLabels: + description: If true, sync the labels from klusterlet to all agent + resources. + type: boolean + timeout: + default: 300 + description: |- + Timeout is the timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. + If not set, defaults to the Hub's timeout. + type: integer + required: + - hubRef + - kubeconfig type: object status: description: status defines the observed state of Spoke + properties: + conditions: + description: Conditions are the current conditions of the Spoke. + items: + description: Condition describes the state of a FleetConfig. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + wantStatus: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + - wantStatus + type: object + type: array + enabledAddons: + default: [] + description: EnabledAddons is the list of addons that are currently + enabled on the Spoke. + items: + type: string + type: array + klusterletHash: + default: "" + description: KlusterletHash is a hash of the Spoke's .spec.klusterlet.values. + type: string + phase: + description: Phase is the current phase of the Spoke reconcile. + type: string type: object required: - spec diff --git a/fleetconfig-controller/charts/fleetconfig-controller/templates/admission-webhooks/validating-webhook-configuration.yaml b/fleetconfig-controller/charts/fleetconfig-controller/templates/admission-webhooks/validating-webhook-configuration.yaml index ab4d2e3d..16f6b9a7 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/templates/admission-webhooks/validating-webhook-configuration.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/templates/admission-webhooks/validating-webhook-configuration.yaml @@ -36,4 +36,56 @@ webhooks: {{- else }} timeoutSeconds: 30 {{- end }} +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: Cg== + service: + name: {{ template "chart.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /validate-fleetconfig-open-cluster-management-io-v1beta1-hub + failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }} + name: vhub-v1beta1.kb.io + rules: + - apiGroups: + - fleetconfig.open-cluster-management.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - hubs + sideEffects: None + {{- if not .Values.devspaceEnabled }} + timeoutSeconds: 5 + {{- else }} + timeoutSeconds: 30 + {{- end }} +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: Cg== + service: + name: {{ template "chart.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /validate-fleetconfig-open-cluster-management-io-v1beta1-spoke + failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }} + name: vspoke-v1beta1.kb.io + rules: + - apiGroups: + - fleetconfig.open-cluster-management.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - spokes + sideEffects: None + {{- if not .Values.devspaceEnabled }} + timeoutSeconds: 5 + {{- else }} + timeoutSeconds: 30 + {{- end }} {{- end }} \ No newline at end of file diff --git a/fleetconfig-controller/charts/fleetconfig-controller/templates/deployment.yaml b/fleetconfig-controller/charts/fleetconfig-controller/templates/deployment.yaml index f4b2bf4e..9a661072 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/templates/deployment.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/templates/deployment.yaml @@ -36,6 +36,7 @@ spec: - "--use-webhook=true" - "--webhook-port={{ .Values.webhookService.port }}" - "--webhook-cert-dir={{ .Values.admissionWebhooks.certificate.mountPath }}" + - "--spoke-concurrent-reconciles={{ .Values.spokeConcurrentReconciles }}" {{ end }} command: - /manager diff --git a/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml b/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml index 35a69d5c..95bb8c1b 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml @@ -1,11 +1,13 @@ {{- if index .Values "fleetConfig" "enabled" }} -apiVersion: fleetconfig.open-cluster-management.io/v1alpha1 -kind: FleetConfig +{{- $releaseNamespace := .Release.Namespace }} +{{- $spokeFeatureGates := .Values.fleetConfig.spokeFeatureGates }} +apiVersion: fleetconfig.open-cluster-management.io/v1beta1 +kind: Hub metadata: annotations: helm.sh/resource-policy: keep - name: fleetconfig - namespace: {{ .Release.Namespace }} + name: {{ .Values.fleetConfig.hub.name }} + namespace: {{ $releaseNamespace }} spec: timeout: {{ .Values.fleetConfig.timeout }} logVerbosity: {{ .Values.fleetConfig.logVerbosity }} @@ -22,86 +24,32 @@ spec: {{- end }} {{- end }} {{- end }} - hub: - {{- if (.Values.fleetConfig.hub.clusterManager | default dict).enabled }} - clusterManager: - featureGates: {{ include "featureGates" (dict "dict" .Values.fleetConfig.hub.clusterManager.featureGates) | quote }} - purgeOperator: {{ .Values.fleetConfig.hub.clusterManager.purgeOperator }} - resources: {{- include "deepClean" .Values.fleetConfig.hub.clusterManager.resources | nindent 8 }} - source: - bundleVersion: {{ .Values.fleetConfig.source.bundleVersion }} - registry: {{ .Values.fleetConfig.source.registry }} - {{- end }} - {{- if (.Values.fleetConfig.hub.singletonControlPlane | default dict).enabled }} - {{- $scp := omit .Values.fleetConfig.hub.singletonControlPlane "enabled" -}} - singleton: {{- toYaml $scp | nindent 6 }} - {{- end }} - createNamespace: {{ .Values.fleetConfig.hub.createNamespace }} - force: {{ .Values.fleetConfig.hub.force }} - {{- with .Values.fleetConfig.hub.kubeconfig }} - kubeconfig: - context: {{ .context | quote }} - inCluster: {{ .inCluster }} - {{- if and .secretReference (not (empty .secretReference.name)) }} - secretReference: {{ toYaml .secretReference | nindent 8 }} - {{- end }} - {{- end }} - apiServer: {{ .Values.fleetConfig.hub.apiServer | quote }} - ca: {{ .Values.fleetConfig.hub.ca | quote }} - spokes: - {{- $spokeFeatureGates := .Values.fleetConfig.spokeFeatureGates }} - {{- range .Values.fleetConfig.spokes }} - - name: {{ .name }} - createNamespace: {{ .createNamespace }} - syncLabels: {{ .syncLabels }} - {{- with .kubeconfig }} - kubeconfig: - context: {{ .context | quote }} - inCluster: {{ .inCluster }} - {{- if and .secretReference (not (empty .secretReference.name)) }} - secretReference: {{ toYaml .secretReference | nindent 10 }} - {{- end }} - {{- end }} - proxyCa: {{ .proxyCa | quote }} - proxyUrl: {{ .proxyUrl | quote }} - {{- if .clusterARN }} - clusterARN: {{ .clusterARN | quote }} - {{- end }} - klusterlet: - {{- if and .klusterlet.annotations $.Values.fleetConfig.spokeAnnotations }} - annotations: {{- toYaml (merge .klusterlet.annotations $.Values.fleetConfig.spokeAnnotations) | nindent 10 }} - {{- else if .klusterlet.annotations }} - annotations: {{- toYaml .klusterlet.annotations | nindent 10 }} - {{- else if $.Values.fleetConfig.spokeAnnotations }} - annotations: {{- toYaml $.Values.fleetConfig.spokeAnnotations | nindent 10 }} - {{- end }} - mode: {{ .klusterlet.mode | quote }} - purgeOperator: {{ .klusterlet.purgeOperator }} - featureGates: {{ include "featureGates" (dict "dict" $spokeFeatureGates) | quote }} - forceInternalEndpointLookup: {{ .klusterlet.forceInternalEndpointLookup }} - forceInternalEndpointLookupManaged: {{ .klusterlet.forceInternalEndpointLookupManaged }} - singleton: {{ .klusterlet.singleton }} - {{- $mck := .klusterlet.managedClusterKubeconfig -}} - {{- if or $mck.context $mck.inCluster (and $mck.secretReference (not (empty $mck.secretReference.name))) }} - managedClusterKubeconfig: - context: {{ $mck.context | quote }} - inCluster: {{ $mck.inCluster }} - {{- if and $mck.secretReference (not (empty $mck.secretReference.name)) }} - secretReference: {{ toYaml $mck.secretReference | nindent 12 }} - {{- end }} - {{- end }} - resources: {{- include "deepClean" .klusterlet.resources | nindent 10 }} - source: - bundleVersion: {{ $.Values.fleetConfig.source.bundleVersion }} - registry: {{ $.Values.fleetConfig.source.registry }} - {{- if .klusterlet.values }} - values: {{- toYaml .klusterlet.values | nindent 10 }} - {{- end }} - {{- if .addOns }} - addOns: {{- toYaml .addOns | nindent 8 }} - {{- end }} + {{- if (.Values.fleetConfig.hub.clusterManager | default dict).enabled }} + clusterManager: + featureGates: {{ include "featureGates" (dict "dict" .Values.fleetConfig.hub.clusterManager.featureGates) | quote }} + purgeOperator: {{ .Values.fleetConfig.hub.clusterManager.purgeOperator }} + resources: {{- include "deepClean" .Values.fleetConfig.hub.clusterManager.resources | nindent 6 }} + source: + bundleVersion: {{ .Values.fleetConfig.source.bundleVersion }} + registry: {{ .Values.fleetConfig.source.registry }} + {{- end }} + {{- if (.Values.fleetConfig.hub.singletonControlPlane | default dict).enabled }} + {{- $scp := omit .Values.fleetConfig.hub.singletonControlPlane "enabled" -}} + singleton: {{- toYaml $scp | nindent 6 }} + {{- end }} + createNamespace: {{ .Values.fleetConfig.hub.createNamespace }} + force: {{ .Values.fleetConfig.hub.force }} + {{- with .Values.fleetConfig.hub.kubeconfig }} + kubeconfig: + context: {{ .context | quote }} + inCluster: {{ .inCluster }} + {{- if and .secretReference (not (empty .secretReference.name)) }} + secretReference: {{ toYaml .secretReference | nindent 6 }} {{- end }} - {{- if .Values.fleetConfig.hub.addOnConfigs }} + {{- end }} + apiServer: {{ .Values.fleetConfig.hub.apiServer | quote }} + ca: {{ .Values.fleetConfig.hub.ca | quote }} + {{- if .Values.fleetConfig.hub.addOnConfigs }} addOnConfigs: {{- range .Values.fleetConfig.hub.addOnConfigs }} - name: {{ .name }} @@ -112,4 +60,66 @@ spec: {{- end }} {{- end }} hubAddOns: {{- toYaml .Values.fleetConfig.hub.hubAddOns | nindent 4 }} +{{- range .Values.fleetConfig.spokes }} +--- +apiVersion: fleetconfig.open-cluster-management.io/v1beta1 +kind: Spoke +metadata: + annotations: + helm.sh/resource-policy: keep + name: {{ .name }} + namespace: {{ $releaseNamespace }} +spec: + hubRef: + name: {{ .hubRef.name }} + namespace: {{ $releaseNamespace }} + createNamespace: {{ .createNamespace }} + syncLabels: {{ .syncLabels }} + {{- with .kubeconfig }} + kubeconfig: + context: {{ .context | quote }} + inCluster: {{ .inCluster }} + {{- if and .secretReference (not (empty .secretReference.name)) }} + secretReference: {{ toYaml .secretReference | nindent 6 }} + {{- end }} + {{- end }} + proxyCa: {{ .proxyCa | quote }} + proxyUrl: {{ .proxyUrl | quote }} + {{- if .clusterARN }} + clusterARN: {{ .clusterARN | quote }} + {{- end }} + klusterlet: + {{- if and .klusterlet.annotations $.Values.fleetConfig.spokeAnnotations }} + annotations: {{- toYaml (merge .klusterlet.annotations $.Values.fleetConfig.spokeAnnotations) | nindent 6 }} + {{- else if .klusterlet.annotations }} + annotations: {{- toYaml .klusterlet.annotations | nindent 6 }} + {{- else if $.Values.fleetConfig.spokeAnnotations }} + annotations: {{- toYaml $.Values.fleetConfig.spokeAnnotations | nindent 6 }} + {{- end }} + mode: {{ .klusterlet.mode | quote }} + purgeOperator: {{ .klusterlet.purgeOperator }} + featureGates: {{ include "featureGates" (dict "dict" $spokeFeatureGates) | quote }} + forceInternalEndpointLookup: {{ .klusterlet.forceInternalEndpointLookup }} + forceInternalEndpointLookupManaged: {{ .klusterlet.forceInternalEndpointLookupManaged }} + singleton: {{ .klusterlet.singleton }} + {{- $mck := .klusterlet.managedClusterKubeconfig -}} + {{- if or $mck.context $mck.inCluster (and $mck.secretReference (not (empty $mck.secretReference.name))) }} + managedClusterKubeconfig: + context: {{ $mck.context | quote }} + inCluster: {{ $mck.inCluster }} + {{- if and $mck.secretReference (not (empty $mck.secretReference.name)) }} + secretReference: {{ toYaml $mck.secretReference | nindent 8 }} + {{- end }} + {{- end }} + resources: {{- include "deepClean" .klusterlet.resources | nindent 6 }} + {{- if .klusterlet.values }} + values: {{- toYaml .klusterlet.values | nindent 6 }} + {{- end }} + {{- if .klusterlet.valuesFrom }} + valuesFrom: {{- toYaml .klusterlet.valuesFrom | nindent 6 }} + {{- end }} + {{- if .addOns }} + addOns: {{- toYaml .addOns | nindent 4 }} + {{- end }} +{{- end }} {{- end }} diff --git a/fleetconfig-controller/charts/fleetconfig-controller/values.yaml b/fleetconfig-controller/charts/fleetconfig-controller/values.yaml index 871d2855..617c13b0 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/values.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/values.yaml @@ -5,10 +5,10 @@ ## @section FleetConfig Configuration ## @descriptionStart -## Configuration for the FleetConfig resource created on the Hub. By default, bootstraps the Hub cluster in hub-as-spoke mode. +## Configuration for the FleetConfig resources (Hub and Spoke) created on the Hub. By default, bootstraps the Hub cluster in hub-as-spoke mode. ## @descriptionEnd fleetConfig: - ## @param fleetConfig.enabled Whether to create a FleetConfig resource. + ## @param fleetConfig.enabled Whether to create a FleetConfig resources. enabled: true ## @param fleetConfig.timeout Timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. timeout: 300 @@ -60,6 +60,8 @@ fleetConfig: autoApprovedARNPatterns: [] ## Configuration for the Hub cluster. hub: + ## @param fleetConfig.hub.name Name of the Hub resource which will manage the spoke cluster. + name: hub ## @param fleetConfig.hub.addOnConfigs Global add-on configuration for the hub cluster. addOnConfigs: [] # - name: "" # Name of the add-on. @@ -176,6 +178,8 @@ fleetConfig: setString: [] ## Configuration for Spoke clusters. ## @param fleetConfig.spokes[0].name Name of the spoke cluster. + ## @param fleetConfig.spokes[0].hubRef.name Name of the Hub resource which will manage the spoke cluster. + ## @param fleetConfig.spokes[0].hubRef.namespace Namespace of the Hub resource which will manage the spoke cluster. ## @param fleetConfig.spokes[0].createNamespace If true, create open-cluster-management namespace and agent namespace (open-cluster-management-agent for Default mode, for Hosted mode), otherwise use existing one. Do not edit this name if you are using the default hub-as-spoke mode. ## @param fleetConfig.spokes[0].syncLabels If true, sync the labels from klusterlet to all agent resources. ## @param fleetConfig.spokes[0].clusterARN The ARN of the spoke cluster. This is only required if configuring an EKS FleetConfig. Example: "arn:aws:eks:us-west-2::cluster/". @@ -204,6 +208,9 @@ fleetConfig: ## @param fleetConfig.spokes[0].klusterlet.singleton If true, deploy klusterlet in singleton mode, with registration and work agents running in a single pod. This is an alpha stage flag. spokes: - name: hub-as-spoke + hubRef: + name: hub + namespace: "" createNamespace: true syncLabels: false clusterARN: "" @@ -263,6 +270,9 @@ topologyResources: ## @section fleetconfig-controller parameters +## @param spokeConcurrentReconciles Maximum number of Spoke resources that will be reconciled at the same time. +spokeConcurrentReconciles: 5 + ## @param kubernetesProvider Kubernetes provider of the cluster that fleetconfig-controller will be installed on. Valid values are "Generic", "EKS", "GKE-Ingress". kubernetesProvider: "Generic" @@ -310,9 +320,9 @@ containerSecurityContext: resources: limits: cpu: 500m - memory: 256Mi + memory: 512Mi requests: - cpu: 100m + cpu: 200m memory: 256Mi ## @param healthCheck.port port the liveness & readiness probes are bound to diff --git a/fleetconfig-controller/cmd/main.go b/fleetconfig-controller/cmd/main.go index 4c362db0..efb90066 100644 --- a/fleetconfig-controller/cmd/main.go +++ b/fleetconfig-controller/cmd/main.go @@ -20,6 +20,7 @@ package main import ( "crypto/tls" "flag" + "fmt" "os" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) @@ -58,14 +59,15 @@ func init() { func main() { var ( - metricsAddr string - enableLeaderElection bool - probeAddr string - secureMetrics bool - enableHTTP2 bool - useWebhook bool - certDir string - webhookPort int + metricsAddr string + enableLeaderElection bool + probeAddr string + secureMetrics bool + enableHTTP2 bool + useWebhook bool + certDir string + webhookPort int + spokeConcurrentReconciles int ) flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metric endpoint binds to. Use the port :8080. If not set, it will be 0 to disable the metrics server.") @@ -78,6 +80,8 @@ func main() { flag.StringVar(&certDir, "webhook-cert-dir", certDir, "Admission webhook cert/key dir") flag.IntVar(&webhookPort, "webhook-port", webhookPort, "Admission webhook port") + flag.IntVar(&spokeConcurrentReconciles, "spoke-concurrent-reconciles", apiv1beta1.SpokeDefaultMaxConcurrentReconciles, fmt.Sprintf("Maximum number of Spoke resources that may be reconciled in parallel. Defaults to %d.", apiv1beta1.SpokeDefaultMaxConcurrentReconciles)) + opts := zap.Options{ Development: true, } @@ -147,14 +151,18 @@ func main() { if err := (&controllerv1beta1.HubReconciler{ Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Hub"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Hub") os.Exit(1) } + if err := (&controllerv1beta1.SpokeReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Spoke"), + ConcurrentReconciles: spokeConcurrentReconciles, + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Spoke") os.Exit(1) diff --git a/fleetconfig-controller/config/webhook/manifests.yaml b/fleetconfig-controller/config/webhook/manifests.yaml index c9715870..6ae9d891 100644 --- a/fleetconfig-controller/config/webhook/manifests.yaml +++ b/fleetconfig-controller/config/webhook/manifests.yaml @@ -24,46 +24,6 @@ webhooks: resources: - fleetconfigs sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-fleetconfig-open-cluster-management-io-v1beta1-hub - failurePolicy: Fail - name: mhub-v1beta1.kb.io - rules: - - apiGroups: - - fleetconfig.open-cluster-management.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - hubs - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-fleetconfig-open-cluster-management-io-v1beta1-spoke - failurePolicy: Fail - name: mspoke-v1beta1.kb.io - rules: - - apiGroups: - - fleetconfig.open-cluster-management.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - spokes - sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration diff --git a/fleetconfig-controller/devspace.yaml b/fleetconfig-controller/devspace.yaml index 5b94f381..12fc7839 100644 --- a/fleetconfig-controller/devspace.yaml +++ b/fleetconfig-controller/devspace.yaml @@ -14,6 +14,21 @@ vars: PROVIDER: value: "production" # production (generic), eks, gke DEVSPACE_ENV_FILE: './hack/.versions.env' + FLEETCONFIG_ENABLED: + value: true + +# profiles are used to set the fleetconfig-controller values for the different versions of the fleetconfig-controller API +profiles: + - name: v1alpha1 + patches: + - path: vars.FLEETCONFIG_ENABLED.value + op: replace + value: false + - name: v1beta1 + patches: + - path: vars.FLEETCONFIG_ENABLED.value + op: replace + value: true pipelines: dev: |- @@ -96,6 +111,8 @@ deployments: chart: name: ${CONTEXT}/charts/fleetconfig-controller values: + fleetConfig: + enabled: ${FLEETCONFIG_ENABLED} image: repository: ${IMAGE_REPOSITORY}-local tag: local diff --git a/fleetconfig-controller/go.mod b/fleetconfig-controller/go.mod index 593f3f49..dcd598f2 100644 --- a/fleetconfig-controller/go.mod +++ b/fleetconfig-controller/go.mod @@ -3,6 +3,7 @@ module github.com/open-cluster-management-io/lab/fleetconfig-controller go 1.24.4 require ( + dario.cat/mergo v1.0.2 github.com/Masterminds/semver v1.5.0 github.com/go-logr/logr v1.4.3 github.com/mitchellh/hashstructure/v2 v2.0.2 @@ -24,7 +25,6 @@ require ( ) require ( - dario.cat/mergo v1.0.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect diff --git a/fleetconfig-controller/hack/support-bundle/serve.sh b/fleetconfig-controller/hack/support-bundle/serve.sh index 328690dc..ed20a0ca 100755 --- a/fleetconfig-controller/hack/support-bundle/serve.sh +++ b/fleetconfig-controller/hack/support-bundle/serve.sh @@ -141,7 +141,7 @@ if [[ -n "$local_bundle" ]]; then # Bundle is exactly the fleetconfig-support-bundle directory echo "Bundle is already in the expected location" else - # Bundle is outside mural-support-bundle, create symlink + # Bundle is outside fleetconfig-support-bundle, create symlink if [[ -d "fleetconfig-support-bundle" ]]; then rm -rf fleetconfig-support-bundle fi diff --git a/fleetconfig-controller/internal/args/args.go b/fleetconfig-controller/internal/args/args.go new file mode 100644 index 00000000..803208a6 --- /dev/null +++ b/fleetconfig-controller/internal/args/args.go @@ -0,0 +1,67 @@ +// Package args provides helpers for formatting clusteradm args. +package args + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" +) + +// PrepareKubeconfig parses a kubeconfig spec and returns updated clusteradm args. +// The '--kubeconfig' flag is added and a cleanup function is returned to remove the temp kubeconfig file. +func PrepareKubeconfig(ctx context.Context, rawKubeconfig []byte, context string, args []string) ([]string, func(), error) { + logger := log.FromContext(ctx) + + kubeconfigPath, cleanup, err := file.TmpFile(rawKubeconfig, "kubeconfig") + if err != nil { + return args, cleanup, err + } + if context != "" { + args = append(args, "--context", context) + } + + logger.V(1).Info("Using kubeconfig", "path", kubeconfigPath) + args = append(args, "--kubeconfig", kubeconfigPath) + return args, cleanup, nil +} + +// PrepareResources returns resource-related flags +func PrepareResources(resources ResourceSpec) []string { + qos := resources.GetQosClass() + if qos == "" { + qos = "Default" + } + flags := []string{"--resource-qos-class", qos} + if req := resources.GetRequests(); req != nil { + rv := reflect.ValueOf(req) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if s := req.String(); s != "" { + flags = append(flags, "--resource-requests", s) + } + } + } + if lim := resources.GetLimits(); lim != nil { + rv := reflect.ValueOf(lim) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if s := lim.String(); s != "" { + flags = append(flags, "--resource-limits", s) + } + } + } + return flags +} + +// ResourceSpec is an interface implemented by any API version's ResourceSpec +type ResourceSpec interface { + GetRequests() ResourceValues + GetLimits() ResourceValues + GetQosClass() string +} + +// ResourceValues is an interface implemented by any API version's ResourceValues +type ResourceValues interface { + String() string +} diff --git a/fleetconfig-controller/internal/args/args_test.go b/fleetconfig-controller/internal/args/args_test.go new file mode 100644 index 00000000..ffc0a2a5 --- /dev/null +++ b/fleetconfig-controller/internal/args/args_test.go @@ -0,0 +1,220 @@ +package args + +import ( + "context" + "os" + "reflect" + "slices" + "testing" +) + +// Mock implementations for testing +type mockResourceValues struct { + cpu string + memory string +} + +func (m *mockResourceValues) String() string { + if m.cpu != "" && m.memory != "" { + return "cpu=" + m.cpu + ",memory=" + m.memory + } else if m.cpu != "" { + return "cpu=" + m.cpu + } else if m.memory != "" { + return "memory=" + m.memory + } + return "" +} + +type mockResourceSpec struct { + requests *mockResourceValues + limits *mockResourceValues + qosClass string +} + +func (m *mockResourceSpec) GetRequests() ResourceValues { + if m.requests == nil { + return &mockResourceValues{} + } + return m.requests +} + +func (m *mockResourceSpec) GetLimits() ResourceValues { + if m.limits == nil { + return &mockResourceValues{} + } + return m.limits +} + +func (m *mockResourceSpec) GetQosClass() string { + return m.qosClass +} + +func TestPrepareKubeconfig(t *testing.T) { + ctx := context.Background() + kubeconfig := []byte(`apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://test-server:6443 + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +users: +- name: test-user + user: + token: test-token`) + + args := []string{"init", "--hub-name", "test-hub"} + + t.Run("with context", func(t *testing.T) { + resultArgs, cleanup, err := PrepareKubeconfig(ctx, kubeconfig, "test-context", args) + defer cleanup() + + if err != nil { + t.Errorf("PrepareKubeconfig() error = %v", err) + } + + // Check that kubeconfig flag is added + if !slices.Contains(resultArgs, "--kubeconfig") { + t.Error("PrepareKubeconfig() should add --kubeconfig flag") + } + + // Check that context flag is added + if !slices.Contains(resultArgs, "--context") { + t.Error("PrepareKubeconfig() should add --context flag") + } + + // Check that the kubeconfig file exists + kubeconfigIndex := slices.Index(resultArgs, "--kubeconfig") + if kubeconfigIndex == -1 || kubeconfigIndex+1 >= len(resultArgs) { + t.Fatal("PrepareKubeconfig() should add kubeconfig path") + } + + kubeconfigPath := resultArgs[kubeconfigIndex+1] + if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) { + t.Errorf("PrepareKubeconfig() kubeconfig file should exist at %s", kubeconfigPath) + } + }) + + t.Run("without context", func(t *testing.T) { + resultArgs, cleanup, err := PrepareKubeconfig(ctx, kubeconfig, "", args) + defer cleanup() + + if err != nil { + t.Errorf("PrepareKubeconfig() error = %v", err) + } + + // Check that kubeconfig flag is added but context is not + if !slices.Contains(resultArgs, "--kubeconfig") { + t.Error("PrepareKubeconfig() should add --kubeconfig flag") + } + + if slices.Contains(resultArgs, "--context") { + t.Error("PrepareKubeconfig() should not add --context flag when context is empty") + } + }) +} + +func TestPrepareResources(t *testing.T) { + tests := []struct { + name string + spec ResourceSpec + expected []string + }{ + { + name: "with requests and limits", + spec: &mockResourceSpec{ + requests: &mockResourceValues{cpu: "100m", memory: "128Mi"}, + limits: &mockResourceValues{cpu: "500m", memory: "512Mi"}, + qosClass: "BestEffort", + }, + expected: []string{ + "--resource-qos-class", "BestEffort", + "--resource-requests", "cpu=100m,memory=128Mi", + "--resource-limits", "cpu=500m,memory=512Mi", + }, + }, + { + name: "with only requests", + spec: &mockResourceSpec{ + requests: &mockResourceValues{cpu: "200m"}, + qosClass: "Default", + }, + expected: []string{ + "--resource-qos-class", "Default", + "--resource-requests", "cpu=200m", + }, + }, + { + name: "with only limits", + spec: &mockResourceSpec{ + limits: &mockResourceValues{memory: "1Gi"}, + qosClass: "ResourceRequirement", + }, + expected: []string{ + "--resource-qos-class", "ResourceRequirement", + "--resource-limits", "memory=1Gi", + }, + }, + { + name: "with empty resources", + spec: &mockResourceSpec{ + qosClass: "Default", + }, + expected: []string{ + "--resource-qos-class", "Default", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := PrepareResources(tt.spec) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("PrepareResources() = %v, want %v", result, tt.expected) + } + }) + } +} + +func TestMockResourceValues_String(t *testing.T) { + tests := []struct { + name string + values *mockResourceValues + expected string + }{ + { + name: "both cpu and memory", + values: &mockResourceValues{cpu: "100m", memory: "128Mi"}, + expected: "cpu=100m,memory=128Mi", + }, + { + name: "only cpu", + values: &mockResourceValues{cpu: "200m"}, + expected: "cpu=200m", + }, + { + name: "only memory", + values: &mockResourceValues{memory: "256Mi"}, + expected: "memory=256Mi", + }, + { + name: "empty values", + values: &mockResourceValues{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.values.String() + if result != tt.expected { + t.Errorf("mockResourceValues.String() = %v, want %v", result, tt.expected) + } + }) + } +} diff --git a/fleetconfig-controller/internal/controller/v1alpha1/fleetconfig_controller.go b/fleetconfig-controller/internal/controller/v1alpha1/fleetconfig_controller.go index 357a7c7a..fd68ef7c 100644 --- a/fleetconfig-controller/internal/controller/v1alpha1/fleetconfig_controller.go +++ b/fleetconfig-controller/internal/controller/v1alpha1/fleetconfig_controller.go @@ -184,7 +184,7 @@ func ret(ctx context.Context, res ctrl.Result, err error) (ctrl.Result, error) { // cleanup cleans up a FleetConfig and its associated resources. func (r *FleetConfigReconciler) cleanup(ctx context.Context, fc *v1alpha1.FleetConfig) error { - hubKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, r.Client, fc.Spec.Hub.Kubeconfig) + hubKubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, r.Client, fc.Spec.Hub.Kubeconfig) if err != nil { return err } diff --git a/fleetconfig-controller/internal/controller/v1alpha1/hub.go b/fleetconfig-controller/internal/controller/v1alpha1/hub.go index 8284bea5..6680c5a2 100644 --- a/fleetconfig-controller/internal/controller/v1alpha1/hub.go +++ b/fleetconfig-controller/internal/controller/v1alpha1/hub.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" @@ -34,7 +35,7 @@ func handleHub(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon logger.V(0).Info("handleHub", "fleetconfig", fc.Name) // check if the hub is already initialized - hubKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, kClient, fc.Spec.Hub.Kubeconfig) + hubKubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, fc.Spec.Hub.Kubeconfig) if err != nil { return err } @@ -77,7 +78,7 @@ func handleHub(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon return errors.New(msg) } } else { - if err := initializeHub(ctx, kClient, fc); err != nil { + if err := initializeHub(ctx, fc, hubKubeconfig); err != nil { return err } } @@ -122,7 +123,7 @@ func handleHub(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon } // initializeHub initializes the Hub cluster via 'clusteradm init' -func initializeHub(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConfig) error { +func initializeHub(ctx context.Context, fc *v1alpha1.FleetConfig, hubKubeconfig []byte) error { logger := log.FromContext(ctx) logger.V(0).Info("initHub", "fleetconfig", fc.Name) @@ -181,13 +182,13 @@ func initializeHub(ctx context.Context, kClient client.Client, fc *v1alpha1.Flee initArgs = append(initArgs, "--bundle-version", fc.Spec.Hub.ClusterManager.Source.BundleVersion) initArgs = append(initArgs, "--image-registry", fc.Spec.Hub.ClusterManager.Source.Registry) // resources args - initArgs = append(initArgs, common.PrepareResources(fc.Spec.Hub.ClusterManager.Resources)...) + initArgs = append(initArgs, args.PrepareResources(fc.Spec.Hub.ClusterManager.Resources)...) } else { // one of clusterManager or singletonControlPlane must be specified, per validating webhook, but handle the edge case anyway return fmt.Errorf("unknown hub type, must specify either hub.clusterManager or hub.singletonControlPlane") } - initArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, fc.Spec.Hub.Kubeconfig, initArgs) + initArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, hubKubeconfig, fc.Spec.Hub.Kubeconfig.Context, initArgs) if cleanupKcfg != nil { defer cleanupKcfg() } diff --git a/fleetconfig-controller/internal/controller/v1alpha1/spoke.go b/fleetconfig-controller/internal/controller/v1alpha1/spoke.go index 55df0dbb..acf09d9f 100644 --- a/fleetconfig-controller/internal/controller/v1alpha1/spoke.go +++ b/fleetconfig-controller/internal/controller/v1alpha1/spoke.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/yaml" "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/hash" @@ -47,7 +48,7 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet logger := log.FromContext(ctx) logger.V(0).Info("handleSpokes", "fleetconfig", fc.Name) - hubKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, kClient, fc.Spec.Hub.Kubeconfig) + hubKubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, fc.Spec.Hub.Kubeconfig) if err != nil { return err } @@ -87,7 +88,7 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet // attempt to join the spoke cluster if it hasn't already been joined if managedCluster == nil { - tokenMeta, err := getToken(ctx, kClient, fc) + tokenMeta, err := getToken(ctx, fc, hubKubeconfig) if err != nil { return fmt.Errorf("failed to get join token: %w", err) } @@ -266,7 +267,7 @@ type tokenMeta struct { } // getToken gets a join token from the Hub cluster via 'clusteradm get token' -func getToken(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConfig) (*tokenMeta, error) { +func getToken(ctx context.Context, fc *v1alpha1.FleetConfig, hubKubeconfig []byte) (*tokenMeta, error) { logger := log.FromContext(ctx) logger.V(0).Info("getToken") @@ -277,7 +278,7 @@ func getToken(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConf if fc.Spec.Hub.ClusterManager != nil { tokenArgs = append(tokenArgs, fmt.Sprintf("--use-bootstrap-token=%t", fc.Spec.Hub.ClusterManager.UseBootstrapToken)) } - tokenArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, fc.Spec.Hub.Kubeconfig, tokenArgs) + tokenArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, hubKubeconfig, fc.Spec.Hub.Kubeconfig.Context, tokenArgs) if cleanupKcfg != nil { defer cleanupKcfg() } @@ -329,7 +330,7 @@ func joinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon } // resources args - joinArgs = append(joinArgs, common.PrepareResources(spoke.Klusterlet.Resources)...) + joinArgs = append(joinArgs, args.PrepareResources(spoke.Klusterlet.Resources)...) // Use hub API server from spec if provided and not forced to use internal endpoint, // otherwise fall back to the hub API server from the tokenMeta @@ -368,7 +369,7 @@ func joinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon joinArgs = append(joinArgs, fmt.Sprintf("--force-internal-endpoint-lookup-managed=%t", spoke.Klusterlet.ForceInternalEndpointLookupManaged), ) - raw, err := kube.KubeconfigFromSecretOrCluster(ctx, kClient, spoke.Klusterlet.ManagedClusterKubeconfig) + raw, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, spoke.Klusterlet.ManagedClusterKubeconfig) if err != nil { return err } @@ -405,7 +406,11 @@ func joinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetCon } joinArgs = append(joinArgs, valuesArgs...) - joinArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, spoke.Kubeconfig, joinArgs) + kubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, spoke.Kubeconfig) + if err != nil { + return err + } + joinArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, kubeconfig, spoke.Kubeconfig.Context, joinArgs) if cleanupKcfg != nil { defer cleanupKcfg() } @@ -456,7 +461,7 @@ func spokeNeedsUpgrade(ctx context.Context, kClient client.Client, spoke v1alpha return true, nil } - kubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, kClient, spoke.Kubeconfig) + kubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, spoke.Kubeconfig) if err != nil { return false, err } @@ -518,7 +523,11 @@ func upgradeSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet } upgradeArgs = append(upgradeArgs, valuesArgs...) - upgradeArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, spoke.Kubeconfig, upgradeArgs) + kubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, spoke.Kubeconfig) + if err != nil { + return err + } + upgradeArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, kubeconfig, spoke.Kubeconfig.Context, upgradeArgs) if cleanupKcfg != nil { defer cleanupKcfg() } @@ -574,7 +583,11 @@ func unjoinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetC fmt.Sprintf("--purge-operator=%t", spoke.GetPurgeKlusterletOperator()), }, fc.BaseArgs()...) - unjoinArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, spoke.GetKubeconfig(), unjoinArgs) + kubeconfig, err := kube.KubeconfigFromNamespacedSecretOrCluster(ctx, kClient, spoke.GetKubeconfig()) + if err != nil { + return err + } + unjoinArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, kubeconfig, spoke.GetKubeconfig().Context, unjoinArgs) if cleanupKcfg != nil { defer cleanupKcfg() } diff --git a/fleetconfig-controller/internal/controller/v1beta1/addon.go b/fleetconfig-controller/internal/controller/v1beta1/addon.go new file mode 100644 index 00000000..ada0fef0 --- /dev/null +++ b/fleetconfig-controller/internal/controller/v1beta1/addon.go @@ -0,0 +1,687 @@ +package v1beta1 + +import ( + "context" + "fmt" + "net/url" + "os/exec" + "slices" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "k8s.io/apimachinery/pkg/types" + addonapi "open-cluster-management.io/api/client/addon/clientset/versioned" + workapi "open-cluster-management.io/api/client/work/clientset/versioned" + workv1 "open-cluster-management.io/api/work/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" +) + +const ( + // commands + addon = "addon" + create = "create" + enable = "enable" + disable = "disable" + + install = "install" + uninstall = "uninstall" + hubAddon = "hub-addon" + + addonArgoCD = "argocd" + addonGPF = "governance-policy-framework" + + managedClusterAddOn = "ManagedClusterAddOn" +) + +var supportedHubAddons = []string{ + addonArgoCD, + addonGPF, +} + +// getManagedClusterAddOns returns the list of ManagedClusterAddOns currently installed on a spoke cluster +func getManagedClusterAddOns(ctx context.Context, addonC *addonapi.Clientset, spokeName string) ([]string, error) { + managedClusterAddOns, err := addonC.AddonV1alpha1().ManagedClusterAddOns(spokeName).List(ctx, metav1.ListOptions{ + LabelSelector: v1beta1.ManagedBySelector.String(), + }) + if err != nil { + return nil, fmt.Errorf("failed to list ManagedClusterAddOns for spoke %s: %w", spokeName, err) + } + + addons := make([]string, len(managedClusterAddOns.Items)) + for i, addon := range managedClusterAddOns.Items { + addons[i] = addon.Name + } + return addons, nil +} + +// getHubAddOns returns the list of hub addons (ClusterManagementAddOns without managed-by label) +func getHubAddOns(ctx context.Context, addonC *addonapi.Clientset) ([]string, error) { + // Hub addons are ClusterManagementAddOns that don't have the managed-by label + allClusterManagementAddOns, err := addonC.AddonV1alpha1().ClusterManagementAddOns().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list all ClusterManagementAddOns: %w", err) + } + + var hubAddons []string + for _, addon := range allClusterManagementAddOns.Items { + if slices.Contains(supportedHubAddons, addon.Name) && addon.Labels[v1beta1.LabelAddOnManagedBy] == "" { + hubAddons = append(hubAddons, addon.Name) + } + } + return hubAddons, nil +} + +func handleAddonConfig(ctx context.Context, kClient client.Client, addonC *addonapi.Clientset, hub *v1beta1.Hub) (bool, error) { + logger := log.FromContext(ctx) + logger.V(0).Info("handleAddOnConfig") + + requestedAddOns := hub.Spec.AddOnConfigs + + // get existing addon templates from cluster + createdAddOns, err := addonC.AddonV1alpha1().AddOnTemplates().List(ctx, metav1.ListOptions{LabelSelector: v1beta1.ManagedBySelector.String()}) + if err != nil { + logger.V(1).Info("failed to list AddOnTemplates, ensure CRDs are installed.", "error", err) + return len(requestedAddOns) > 0, err + } + + // nothing to do + if len(requestedAddOns) == 0 && len(createdAddOns.Items) == 0 { + logger.V(5).Info("no addons to reconcile") + return false, nil + } + + // compare existing to requested + createdVersionedNames := make([]string, len(createdAddOns.Items)) + for i, ca := range createdAddOns.Items { + createdVersionedNames[i] = ca.Name + } + + requestedVersionedNames := make([]string, len(requestedAddOns)) + for i, ra := range requestedAddOns { + requestedVersionedNames[i] = fmt.Sprintf("%s-%s", ra.Name, ra.Version) + } + + // Find addons that need to be created (present in requested, missing from created) + addonsToCreate := make([]v1beta1.AddOnConfig, 0) + for i, requestedName := range requestedVersionedNames { + if !slices.Contains(createdVersionedNames, requestedName) { + addonsToCreate = append(addonsToCreate, requestedAddOns[i]) + } + } + + // Find addons that need to be deleted (present in created, missing from requested) + addonsToDelete := make([]string, 0) + for _, createdName := range createdVersionedNames { + if !slices.Contains(requestedVersionedNames, createdName) { + addonsToDelete = append(addonsToDelete, createdName) + } + } + + // do deletes first, then creates. + err = handleAddonDelete(ctx, addonC, addonsToDelete) + if err != nil { + return true, err + } + + err = handleAddonCreate(ctx, kClient, hub, addonsToCreate) + if err != nil { + return true, err + } + + return true, nil +} + +func handleAddonCreate(ctx context.Context, kClient client.Client, hub *v1beta1.Hub, addons []v1beta1.AddOnConfig) error { + if len(addons) == 0 { + return nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("createAddOns") + + // set up array of clusteradm addon create commands + for _, a := range addons { + // look up manifests CM for the addon + cm := corev1.ConfigMap{} + cmName := fmt.Sprintf("%s-%s-%s", v1beta1.AddonConfigMapNamePrefix, a.Name, a.Version) + err := kClient.Get(ctx, types.NamespacedName{Name: cmName, Namespace: hub.Namespace}, &cm) + if err != nil { + return errors.Wrapf(err, "could not load configuration for add-on %s version %s", a.Name, a.Version) + } + + args := append([]string{ + addon, + create, + a.Name, + fmt.Sprintf("--version=%s", a.Version), + fmt.Sprintf("--labels=%v", v1beta1.ManagedBySelector.String()), + }, hub.BaseArgs()...) + + // Extract manifest configuration from ConfigMap + // validation was already done by the webhook, so simply check if raw manifests are provided and if not, use the URL. + manifestsRaw, ok := cm.Data[v1beta1.AddonConfigMapManifestRawKey] + if ok { + // Write raw manifests to temporary file + filename, cleanup, err := file.TmpFile([]byte(manifestsRaw), "yaml") + if cleanup != nil { + defer cleanup() + } + if err != nil { + return err + } + args = append(args, fmt.Sprintf("--filename=%s", filename)) + } else { + manifestsURL := cm.Data[v1beta1.AddonConfigMapManifestURLKey] + url, err := url.Parse(manifestsURL) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to create addon %s version %s", a.Name, a.Version)) + } + switch url.Scheme { + case "http", "https": + // pass URL directly + args = append(args, fmt.Sprintf("--filename=%s", manifestsURL)) + default: + return fmt.Errorf("unsupported URL scheme %s for addon %s version %s. Must be one of %v", url.Scheme, a.Name, a.Version, v1beta1.AllowedAddonURLSchemes) + } + } + + if a.HubRegistration { + args = append(args, "--hub-registration") + } + if a.Overwrite { + args = append(args, "--overwrite") + } + if a.ClusterRoleBinding != "" { + args = append(args, fmt.Sprintf("--cluster-role-bind=%s", a.ClusterRoleBinding)) + } + + logger.V(7).Info("running", "command", clusteradm, "args", args) + cmd := exec.Command(clusteradm, args...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon create' to complete...") + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf("failed to create addon: %v, output: %s", err, string(out)) + } + logger.V(0).Info("created addon", "AddOnTemplate", a.Name, "output", string(stdout)) + } + return nil +} + +func handleAddonDelete(ctx context.Context, addonC *addonapi.Clientset, addons []string) error { + if len(addons) == 0 { + return nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("deleteAddOns") + + // a list of addons which may or may not need to be purged at the end (ClusterManagementAddOns needs to be deleted) + purgeList := make([]string, 0) + errs := make([]error, 0) + for _, addonName := range addons { + // get the addon template, so we can extract spec.addonName + addon, err := addonC.AddonV1alpha1().AddOnTemplates().Get(ctx, addonName, metav1.GetOptions{}) + if err != nil && !kerrs.IsNotFound(err) { + errs = append(errs, fmt.Errorf("failed to delete addon %s: %v", addonName, err)) + continue + } + + // delete the addon template + if addon == nil { + logger.V(0).Info("addon not found, nothing to do", "AddOnTemplate", addonName) + continue + } + + err = addonC.AddonV1alpha1().AddOnTemplates().Delete(ctx, addonName, metav1.DeleteOptions{}) + if err != nil && !kerrs.IsNotFound(err) { + errs = append(errs, fmt.Errorf("failed to delete addon %s: %v", addonName, err)) + continue + } + baseAddonName := addon.Spec.AddonName + // get the addon name without a version suffix, add it to purge list + purgeList = append(purgeList, baseAddonName) + + logger.V(0).Info("deleted addon", "AddOnTemplate", addonName) + } + + // check if there are any remaining addon templates for the same addon names as what was just deleted (different versions of the same addon) + // dont use a label selector here - in case an addon with the same name was created out of band, and it is the last remaining version, we dont want + // to delete its ClusterManagementAddOn + allAddons, err := addonC.AddonV1alpha1().AddOnTemplates().List(ctx, metav1.ListOptions{}) + if err != nil && !kerrs.IsNotFound(err) { + return fmt.Errorf("failed to clean up addons %v: %v", purgeList, err) + } + for _, a := range allAddons.Items { + // if other versions of the same addon remain, remove it from the purge list + purgeList = slices.DeleteFunc(purgeList, func(name string) bool { + return name == a.Spec.AddonName + }) + } + // if list is empty, nothing else to do + if len(purgeList) == 0 { + return nil + } + + // delete the ClusterManagementAddOn for any addon which has no active templates left + for _, name := range purgeList { + err = addonC.AddonV1alpha1().ClusterManagementAddOns().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrs.IsNotFound(err) { + return fmt.Errorf("failed to purge addon %s: %v", name, err) + } + logger.V(0).Info("purged addon", "ClusterManagementAddOn", name) + } + + // only return aggregated errs after trying to delete ClusterManagementAddOns. + // this way, we dont accidentally leave any orphaned resources for addons which were successfully deleted. + if len(errs) > 0 { + return fmt.Errorf("one or more addons were not deleted: %v", errs) + } + + return nil +} + +func handleSpokeAddons(ctx context.Context, addonC *addonapi.Clientset, spoke *v1beta1.Spoke) ([]string, error) { + logger := log.FromContext(ctx) + addons := spoke.Spec.AddOns + + // Get actual enabled addons from cluster instead of status + enabledAddons, err := getManagedClusterAddOns(ctx, addonC, spoke.Name) + if err != nil { + logger.V(1).Info("failed to get ManagedClusterAddOns, assuming none enabled", "error", err, "spokeName", spoke.Name) + enabledAddons = []string{} + } + + if len(addons) == 0 && len(enabledAddons) == 0 { + // nothing to do + return enabledAddons, nil + } + + // compare existing to requested + requestedAddonNames := make([]string, len(addons)) + for i, addon := range addons { + requestedAddonNames[i] = addon.ConfigName + } + + // Find addons that need to be enabled (present in requested, missing from enabledAddons) + addonsToEnable := make([]v1beta1.AddOn, 0) + for i, requestedName := range requestedAddonNames { + if !slices.Contains(enabledAddons, requestedName) { + addonsToEnable = append(addonsToEnable, addons[i]) + } + } + + // Find addons that need to be disabled (present in enabledAddons, missing from requested) + addonsToDisable := make([]string, 0) + for _, enabledAddon := range enabledAddons { + if !slices.Contains(requestedAddonNames, enabledAddon) { + addonsToDisable = append(addonsToDisable, enabledAddon) + } + } + + // do disables first, then enables/updates + err = handleAddonDisable(ctx, spoke, addonsToDisable) + if err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return enabledAddons, err + } + + // Remove disabled addons from enabledAddons + for _, disabledAddon := range addonsToDisable { + enabledAddons = slices.DeleteFunc(enabledAddons, func(ea string) bool { + return ea == disabledAddon + }) + } + + // Enable new addons and updated addons + newEnabledAddons, err := handleAddonEnable(ctx, spoke, addonsToEnable) + // even if an error is returned, any addon which was successfully enabled is tracked, so append before returning + enabledAddons = append(enabledAddons, newEnabledAddons...) + if err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return enabledAddons, err + } + spoke.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, + )) + + return enabledAddons, nil +} + +func handleAddonEnable(ctx context.Context, spoke *v1beta1.Spoke, addons []v1beta1.AddOn) ([]string, error) { + if len(addons) == 0 { + return nil, nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("enableAddOns", "managedcluster", spoke.Name) + + baseArgs := append([]string{ + addon, + enable, + fmt.Sprintf("--cluster=%s", spoke.Name), + fmt.Sprintf("--labels=%v", v1beta1.ManagedBySelector.String()), + }, spoke.BaseArgs()...) + + var enableErrs []error + enabledAddons := make([]string, 0) + for _, a := range addons { + args := []string{ + fmt.Sprintf("--names=%s", a.ConfigName), + } + if a.InstallNamespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", a.InstallNamespace)) + } + var annots []string + for k, v := range a.Annotations { + annots = append(annots, fmt.Sprintf("%s=%s", k, v)) + } + annot := strings.Join(annots, ",") + if annot != "" { + args = append(args, fmt.Sprintf("--annotate=%s", annot)) + } + + args = append(baseArgs, args...) + logger.V(7).Info("running", "command", clusteradm, "args", args) + cmd := exec.Command(clusteradm, args...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon enable' to complete...") + if err != nil { + out := append(stdout, stderr...) + enableErrs = append(enableErrs, fmt.Errorf("failed to enable addon: %v, output: %s", err, string(out))) + continue + } + enabledAddons = append(enabledAddons, a.ConfigName) + logger.V(1).Info("enabled addon", "managedcluster", spoke.Name, "addon", a.ConfigName, "output", string(stdout)) + } + + if len(enableErrs) > 0 { + return enabledAddons, fmt.Errorf("one or more addons were not enabled: %v", enableErrs) + } + return enabledAddons, nil +} + +func handleAddonDisable(ctx context.Context, spoke *v1beta1.Spoke, enabledAddons []string) error { + if len(enabledAddons) == 0 { + return nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("disableAddOns", "managedcluster", spoke.Name) + + args := append([]string{ + addon, + disable, + fmt.Sprintf("--names=%s", strings.Join(enabledAddons, ",")), + fmt.Sprintf("--clusters=%s", spoke.Name), + }, spoke.BaseArgs()...) + + logger.V(7).Info("running", "command", clusteradm, "args", args) + cmd := exec.Command(clusteradm, args...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon disable' to complete...") + if err != nil { + out := append(stdout, stderr...) + outStr := string(out) + + // Check if the error is due to addon not being found or cluster not found - these are success cases + if strings.Contains(outStr, "add-on not found") { + logger.V(5).Info("addon already disabled (not found)", "managedcluster", spoke.Name, "addons", enabledAddons, "output", outStr) + return nil + } + if strings.Contains(outStr, "managedclusters.cluster.open-cluster-management.io") && strings.Contains(outStr, "not found") { + logger.V(5).Info("addon disable skipped (cluster not found)", "managedcluster", spoke.Name, "addons", enabledAddons, "output", outStr) + return nil + } + + return fmt.Errorf("failed to disable addons: %v, output: %s", err, outStr) + } + logger.V(1).Info("disabled addons", "managedcluster", spoke.Name, "addons", enabledAddons, "output", string(stdout)) + return nil +} + +// isHubAddOnMatching checks if an installed addon matches a desired addon spec +func isHubAddOnMatching(installed v1beta1.InstalledHubAddOn, desired v1beta1.HubAddOn, bundleVersion string) bool { + return installed.Name == desired.Name && + installed.Namespace == desired.InstallNamespace && + installed.BundleVersion == bundleVersion +} + +func handleHubAddons(ctx context.Context, addonC *addonapi.Clientset, hub *v1beta1.Hub) (bool, error) { + logger := log.FromContext(ctx) + logger.V(0).Info("handleHubAddons", "fleetconfig", hub.Name) + + desiredAddOns := hub.Spec.HubAddOns + bundleVersion := hub.Spec.ClusterManager.Source.BundleVersion + + hubAddons, err := getHubAddOns(ctx, addonC) + if err != nil { + logger.V(1).Info("failed to get hub addons, assuming none installed", "error", err) + hubAddons = []string{} + } + + // use status as the source of truth for detailed addon information (namespace, version) + // but cross-reference with actual cluster state to handle discrepancies + installedAddOns := hub.Status.DeepCopy().InstalledHubAddOns + + // reconcile status with actual cluster state - remove from status any addons not found in cluster + reconciledInstalledAddOns := make([]v1beta1.InstalledHubAddOn, 0) + for _, installed := range installedAddOns { + if slices.Contains(hubAddons, installed.Name) { + reconciledInstalledAddOns = append(reconciledInstalledAddOns, installed) + } else { + logger.V(1).Info("addon in status but not found in cluster, removing from status", "addon", installed.Name) + } + } + + // nothing to do + if len(desiredAddOns) == 0 && len(reconciledInstalledAddOns) == 0 { + logger.V(5).Info("no hub addons to reconcile") + return false, nil + } + + // find addons that need to be uninstalled (present in installed, missing from desired or version mismatch) + addonsToUninstall := make([]v1beta1.InstalledHubAddOn, 0) + for _, installed := range reconciledInstalledAddOns { + found := slices.ContainsFunc(desiredAddOns, func(desired v1beta1.HubAddOn) bool { + return isHubAddOnMatching(installed, desired, bundleVersion) + }) + if !found { + addonsToUninstall = append(addonsToUninstall, installed) + } + } + + // find addons that need to be installed (present in desired, missing from installed or version upgrade) + addonsToInstall := make([]v1beta1.HubAddOn, 0) + for _, desired := range desiredAddOns { + found := slices.ContainsFunc(reconciledInstalledAddOns, func(installed v1beta1.InstalledHubAddOn) bool { + return isHubAddOnMatching(installed, desired, bundleVersion) + }) + if !found { + addonsToInstall = append(addonsToInstall, desired) + } + } + + // do uninstalls first, then installs + err = handleHubAddonUninstall(ctx, addonsToUninstall, hub) + if err != nil { + return true, err + } + + err = handleHubAddonInstall(ctx, addonC, addonsToInstall, bundleVersion, hub) + if err != nil { + return true, err + } + + // Update status to reflect desired state - build the new installed addons list + newInstalledAddOns := make([]v1beta1.InstalledHubAddOn, 0, len(desiredAddOns)) + for _, d := range desiredAddOns { + newInstalledAddOns = append(newInstalledAddOns, v1beta1.InstalledHubAddOn{ + Name: d.Name, + Namespace: d.InstallNamespace, + BundleVersion: bundleVersion, + }) + } + hub.Status.InstalledHubAddOns = newInstalledAddOns + return true, nil +} + +func handleHubAddonUninstall(ctx context.Context, addons []v1beta1.InstalledHubAddOn, hub *v1beta1.Hub) error { + if len(addons) == 0 { + return nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("uninstalling hub addons", "count", len(addons)) + + var errs []error + for _, addon := range addons { + args := append([]string{ + uninstall, + hubAddon, + fmt.Sprintf("--names=%s", addon.Name), + }, hub.BaseArgs()...) + if addon.Namespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", addon.Namespace)) + } + + logger.V(7).Info("running", "command", clusteradm, "args", args) + cmd := exec.Command(clusteradm, args...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm uninstall hub-addon' to complete...") + if err != nil { + out := append(stdout, stderr...) + outStr := string(out) + errs = append(errs, fmt.Errorf("failed to uninstall hubAddon %s: %v, output: %s", addon.Name, err, outStr)) + continue + } + logger.V(1).Info("uninstalled hub addon", "name", addon.Name, "namespace", addon.Namespace, "output", string(stdout)) + } + + if len(errs) > 0 { + return fmt.Errorf("one or more hub addons were not uninstalled: %v", errs) + } + return nil +} + +func handleHubAddonInstall(ctx context.Context, addonC *addonapi.Clientset, addons []v1beta1.HubAddOn, bundleVersion string, hub *v1beta1.Hub) error { + if len(addons) == 0 { + return nil + } + + logger := log.FromContext(ctx) + logger.V(0).Info("installing hub addons", "count", len(addons)) + + var errs []error + for _, addon := range addons { + // Check if already installed (defensive check) + installed, err := isAddonInstalled(ctx, addonC, addon.Name) + if err != nil { + errs = append(errs, fmt.Errorf("failed to check if hubAddon %s is installed: %v", addon.Name, err)) + continue + } + if installed { + logger.V(3).Info("hubAddon already installed, skipping", "name", addon.Name) + continue + } + + args := append([]string{ + install, + hubAddon, + fmt.Sprintf("--names=%s", addon.Name), + fmt.Sprintf("--bundle-version=%s", bundleVersion), + fmt.Sprintf("--create-namespace=%t", addon.CreateNamespace), + }, hub.BaseArgs()...) + if addon.InstallNamespace != "" { + args = append(args, fmt.Sprintf("--namespace=%s", addon.InstallNamespace)) + } + + cmd := exec.Command(clusteradm, args...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm install hub-addon' to complete...") + if err != nil { + out := append(stdout, stderr...) + outStr := string(out) + errs = append(errs, fmt.Errorf("failed to install hubAddon %s: %v, output: %s", addon.Name, err, outStr)) + continue + } + // the argocd pull integration addon logs the entire helm template output including CRDs to stdout. + // to prevent flooding the logs, overwrite it. + if addon.Name == addonArgoCD { + stdout = []byte("ArgoCD hub addon successfully installed") + } + logger.V(1).Info("installed hubAddon", "name", addon.Name, "output", string(stdout)) + } + + if len(errs) > 0 { + return fmt.Errorf("one or more hub addons were not installed: %v", errs) + } + return nil +} + +func isAddonInstalled(ctx context.Context, addonC *addonapi.Clientset, addonName string) (bool, error) { + if _, err := addonC.AddonV1alpha1().ClusterManagementAddOns().Get(ctx, addonName, metav1.GetOptions{}); err != nil { + return false, client.IgnoreNotFound(err) + } + + // we enforce unique names between hubAddOns and addOnConfigs, + // and handle deleting addOnConfigs first + // so if the addon is found here, we can assume it was previously installed by `install hub-addon` + return true, nil +} + +// waitForAddonManifestWorksCleanup polls for addon-related manifestWorks to be removed +// after addon disable operation to avoid race conditions during spoke unjoin +func waitForAddonManifestWorksCleanup(ctx context.Context, workC *workapi.Clientset, spokeName string, timeout time.Duration) error { + logger := log.FromContext(ctx) + logger.V(1).Info("waiting for addon manifestWorks cleanup", "spokeName", spokeName, "timeout", timeout) + + err := wait.PollUntilContextTimeout(ctx, addonCleanupPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + manifestWorks, err := workC.WorkV1().ManifestWorks(spokeName).List(ctx, metav1.ListOptions{}) + if err != nil { + logger.V(3).Info("failed to list manifestWorks during cleanup wait", "error", err) + // Return false to continue polling on transient errors + return false, nil + } + + // Success condition: no manifestWorks remaining + if len(manifestWorks.Items) == 0 { + logger.V(1).Info("addon manifestWorks cleanup completed", "spokeName", spokeName, "remainingManifestWorks", len(manifestWorks.Items)) + return true, nil + } + + logger.V(3).Info("waiting for addon manifestWorks cleanup", + "spokeName", spokeName, + "addonManifestWorks", len(manifestWorks.Items)) + + // Continue polling + return false, nil + }) + + if err != nil { + return fmt.Errorf("timeout waiting for addon manifestWorks cleanup for spoke %s: %w", spokeName, err) + } + + return nil +} + +func allOwnersAddOns(mws []workv1.ManifestWork) bool { + for _, m := range mws { + if !slices.ContainsFunc(m.OwnerReferences, func(or metav1.OwnerReference) bool { + return or.Kind == managedClusterAddOn + }) { + return false + } + } + return true +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/common.go b/fleetconfig-controller/internal/controller/v1beta1/common.go new file mode 100644 index 00000000..611396a4 --- /dev/null +++ b/fleetconfig-controller/internal/controller/v1beta1/common.go @@ -0,0 +1,53 @@ +package v1beta1 + +import ( + "context" + "fmt" + "regexp" + "time" + + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + operatorapi "open-cluster-management.io/api/client/operator/clientset/versioned" + operatorv1 "open-cluster-management.io/api/operator/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + clusteradm = "clusteradm" + requeue = 30 * time.Second + amwExistsError = "you should manually clean them, uninstall kluster will cause those works out of control." + addonCleanupTimeout = 1 * time.Minute + addonCleanupPollInterval = 2 * time.Second +) + +var csrSuffixPattern = regexp.MustCompile(`-[a-zA-Z0-9]{5}$`) + +func ret(ctx context.Context, res ctrl.Result, err error) (ctrl.Result, error) { + logger := log.FromContext(ctx) + if err != nil { + logger.Error(err, "requeueing due to error") + return reconcile.Result{}, err + } + if res.RequeueAfter > 0 { + logger.Info("requeueing", "after", res.RequeueAfter) + } else { + logger.Info("reconciliation complete; no requeue or error") + } + return res, nil + +} + +// getClusterManager retrieves the ClusterManager resource from the Hub cluster +func getClusterManager(ctx context.Context, operatorC *operatorapi.Clientset) (*operatorv1.ClusterManager, error) { + cm, err := operatorC.OperatorV1().ClusterManagers().Get(ctx, "cluster-manager", metav1.GetOptions{}) + if err != nil { + if kerrs.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("unexpected error getting cluster-manager: %w", err) + } + return cm, nil +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/hub_controller.go b/fleetconfig-controller/internal/controller/v1beta1/hub_controller.go index aade9e39..fbc5a764 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/hub_controller.go +++ b/fleetconfig-controller/internal/controller/v1beta1/hub_controller.go @@ -14,23 +14,47 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1beta1 contains the main reconciliation logic for fleetconfig-controller's v1alpha1 resources. +// Package v1beta1 contains the main reconciliation logic for fleetconfig-controller's v1beta1 resources. package v1beta1 import ( "context" + "errors" + "fmt" + "os/exec" + "slices" + "strings" + "github.com/go-logr/logr" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + operatorapi "open-cluster-management.io/api/client/operator/clientset/versioned" + operatorv1 "open-cluster-management.io/api/operator/v1" + "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" - v1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" + exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/version" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" ) // HubReconciler reconciles a Hub object type HubReconciler struct { client.Client + Log logr.Logger Scheme *runtime.Scheme } @@ -38,27 +62,495 @@ type HubReconciler struct { // +kubebuilder:rbac:groups=fleetconfig.open-cluster-management.io,resources=hubs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=fleetconfig.open-cluster-management.io,resources=hubs/finalizers,verbs=update -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Hub object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile -func (r *HubReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { - _ = logf.FromContext(ctx) - - // TODO(user): your logic here - - return ctrl.Result{}, nil +// Reconcile is the main reconcile loop for the Hub resource. +func (r *HubReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("request", req) + ctx = log.IntoContext(ctx, logger) + + // Fetch the Hub instance + hub := &v1beta1.Hub{} + err := r.Get(ctx, req.NamespacedName, hub) + if err != nil { + if !kerrs.IsNotFound(err) { + logger.Error(err, "failed to fetch Hub", "key", req) + } + return ret(ctx, ctrl.Result{}, client.IgnoreNotFound(err)) + } + ctx = withOriginalHub(ctx, hub) + + // Create a patch helper for this reconciliation + patchHelper, err := patch.NewHelper(hub, r.Client) + if err != nil { + return ret(ctx, ctrl.Result{}, err) + } + + // Ensure patch is applied at the end + defer func() { + if err := patchHelper.Patch(ctx, hub); err != nil && !kerrs.IsNotFound(err) { + logger.Error(err, "failed to patch Hub") + } + }() + + // Add a finalizer and requeue if not already present + if !slices.Contains(hub.Finalizers, v1beta1.HubCleanupFinalizer) { + hub.Finalizers = append(hub.Finalizers, v1beta1.HubCleanupFinalizer) + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + hubKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, r.Client, hub.Spec.Kubeconfig, hub.Namespace) + if err != nil { + return ret(ctx, ctrl.Result{}, err) + } + + // Handle deletion logic with finalizer + if !hub.DeletionTimestamp.IsZero() { + if hub.Status.Phase != v1beta1.Deleting { + hub.Status.Phase = v1beta1.Deleting + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + if slices.Contains(hub.Finalizers, v1beta1.HubCleanupFinalizer) { + if err := r.cleanHub(ctx, hub, hubKubeconfig); err != nil { + hub.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.CleanupFailed, metav1.ConditionTrue, metav1.ConditionFalse, + )) + return ret(ctx, ctrl.Result{}, err) + } + } + hub.Finalizers = slices.DeleteFunc(hub.Finalizers, func(s string) bool { + return s == v1beta1.HubCleanupFinalizer + }) + // end reconciliation + return ret(ctx, ctrl.Result{}, nil) + } + + // Initialize phase & conditions + previousPhase := hub.Status.Phase + hub.Status.Phase = v1beta1.HubStarting + initConditions := []v1beta1.Condition{ + v1beta1.NewCondition( + v1beta1.HubInitialized, v1beta1.HubInitialized, metav1.ConditionFalse, metav1.ConditionTrue, + ), + v1beta1.NewCondition( + v1beta1.CleanupFailed, v1beta1.CleanupFailed, metav1.ConditionFalse, metav1.ConditionFalse, + ), + v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionFalse, + ), + } + hub.SetConditions(false, initConditions...) + + if previousPhase == "" { + // set initial phase/conditions and requeue + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + // Handle Hub cluster: initialization and/or upgrade + if err := r.handleHub(ctx, hub, hubKubeconfig); err != nil { + logger.Error(err, "Failed to handle hub operations") + hub.Status.Phase = v1beta1.Unhealthy + } + hubInitializedCond := hub.GetCondition(v1beta1.HubInitialized) + if hubInitializedCond == nil || hubInitializedCond.Status == metav1.ConditionFalse { + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + // Finalize phase + for _, c := range hub.Status.Conditions { + if c.Status != c.WantStatus { + logger.Info("WARNING: condition does not have the desired status", "type", c.Type, "reason", c.Reason, "message", c.Message, "status", c.Status, "wantStatus", c.WantStatus) + hub.Status.Phase = v1beta1.Unhealthy + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + } + if hub.Status.Phase == v1beta1.HubStarting { + hub.Status.Phase = v1beta1.HubRunning + } + + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) +} + +type contextKey int + +const ( + // originalHubKey is the key in the context that records the incoming original Hub + originalHubKey contextKey = iota +) + +func withOriginalHub(ctx context.Context, hub *v1beta1.Hub) context.Context { + return context.WithValue(ctx, originalHubKey, hub.DeepCopy()) +} + +// cleanup cleans up a Hub and its associated resources. +func (r *HubReconciler) cleanHub(ctx context.Context, hub *v1beta1.Hub, hubKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("cleanHub", "hub", hub.Name) + + // Check if there are any Spokes that need to be deleted + spokeList := &v1beta1.SpokeList{} + err := r.List(ctx, spokeList) + if err != nil { + return err + } + + spokes := spokeList.Items + if len(spokes) > 0 { + // Mark all Spokes for deletion if they haven't been deleted yet + for i := range spokes { + spoke := &spokes[i] + if spoke.DeletionTimestamp.IsZero() { + if !spoke.IsManagedBy(hub.ObjectMeta) { + continue + } + logger.Info("Marking Spoke for deletion", "spoke", spoke.Name) + if err := r.Delete(ctx, spoke); err != nil && !kerrs.IsNotFound(err) { + return fmt.Errorf("failed to delete spoke %s: %w", spoke.Name, err) + } + } + } + + logger.V(1).Info("Waiting for all Spokes to be deleted before proceeding with Hub cleanup", + "remainingSpokes", len(spokes)) + // Return a retriable error to requeue and check again later + return fmt.Errorf("waiting for background spoke deletion. Remaining: %d spokes", len(spokes)) + } + + logger.Info("All Spokes have been deleted, proceeding with Hub cleanup") + + addonC, err := common.AddOnClient(hubKubeconfig) + if err != nil { + return fmt.Errorf("failed to create addon client for cleanup: %w", err) + } + + hubCopy := hub.DeepCopy() + hubCopy.Spec.AddOnConfigs = nil + hubCopy.Spec.HubAddOns = nil + _, err = handleAddonConfig(ctx, r.Client, addonC, hubCopy) + if err != nil { + return err + } + _, err = handleHubAddons(ctx, addonC, hubCopy) + if err != nil { + return err + } + + purgeOperator := false + if hub.Spec.ClusterManager != nil { + purgeOperator = hub.Spec.ClusterManager.PurgeOperator + } + cleanArgs := []string{ + "clean", + // name is omitted, as the default name, 'cluster-manager', is always used + fmt.Sprintf("--purge-operator=%t", purgeOperator), + } + cleanArgs = append(cleanArgs, hub.BaseArgs()...) + + cmd := exec.Command(clusteradm, cleanArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm clean' to complete...") + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf("failed to clean hub cluster: %v, output: %s", err, string(out)) + } + logger.V(1).Info("hub cleaned", "output", string(stdout)) + + return nil + +} + +// handleHub manages Hub cluster init and upgrade operations +func (r *HubReconciler) handleHub(ctx context.Context, hub *v1beta1.Hub, hubKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("handleHub", "hub", hub.Name) + + operatorC, err := common.OperatorClient(hubKubeconfig) + if err != nil { + return err + } + addonC, err := common.AddOnClient(hubKubeconfig) + if err != nil { + return err + } + cm, err := getClusterManager(ctx, operatorC) + if err != nil { + return err + } + + // if a clustermanager already exists, we don't need to init the hub + if cm != nil && cm.Status.Conditions != nil { + msgs := make([]string, 0) + for _, c := range cm.Status.Conditions { + if c.Type == operatorv1.ConditionProgressing && c.Status == metav1.ConditionTrue { + msgs = append(msgs, fmt.Sprintf("%s: %s", c.Type, c.Message)) + } + if c.Type == operatorv1.ConditionClusterManagerApplied && c.Status == metav1.ConditionFalse { + msgs = append(msgs, fmt.Sprintf("%s: %s", c.Type, c.Message)) + } + if c.Type == operatorv1.ConditionHubRegistrationDegraded && c.Status == metav1.ConditionTrue { + msgs = append(msgs, fmt.Sprintf("%s: %s", c.Type, c.Message)) + } + if c.Type == operatorv1.ConditionHubPlacementDegraded && c.Status == metav1.ConditionTrue { + msgs = append(msgs, fmt.Sprintf("%s: %s", c.Type, c.Message)) + } + } + if len(msgs) > 0 { + msg := strings.TrimSuffix(strings.Join(msgs, "; "), "; ") + msg = fmt.Sprintf("hub pending/degraded: %s", msg) + hub.SetConditions(true, v1beta1.NewCondition( + msg, v1beta1.HubInitialized, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return errors.New(msg) + } + } else { + if err := r.initializeHub(ctx, hub, hubKubeconfig); err != nil { + return err + } + } + + hub.SetConditions(true, v1beta1.NewCondition( + v1beta1.HubInitialized, v1beta1.HubInitialized, metav1.ConditionTrue, metav1.ConditionTrue, + )) + + addonConfigChanged, err := handleAddonConfig(ctx, r.Client, addonC, hub) + if err != nil && addonConfigChanged { + hub.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return err + } + + hubAddonChanged, err := handleHubAddons(ctx, addonC, hub) + if err != nil && hubAddonChanged { + hub.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return err + } + + if addonConfigChanged || hubAddonChanged { + hub.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, + )) + } + + // attempt an upgrade whenever the clustermanager's bundleVersion changes + if hub.Spec.ClusterManager != nil { + upgrade, err := r.hubNeedsUpgrade(ctx, hub, operatorC) + if err != nil { + return fmt.Errorf("failed to check if hub needs upgrade: %w", err) + } + if upgrade { + return r.upgradeHub(ctx, hub) + } + } + + return nil +} + +// initializeHub initializes the Hub cluster via 'clusteradm init' +func (r *HubReconciler) initializeHub(ctx context.Context, hub *v1beta1.Hub, hubKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("initHub", "hub", hub.Name) + + initArgs := append([]string{ + "init", + fmt.Sprintf("--create-namespace=%t", hub.Spec.CreateNamespace), + fmt.Sprintf("--force=%t", hub.Spec.Force), + "--wait=true", + }, hub.BaseArgs()...) + + if hub.Spec.RegistrationAuth.Driver == v1beta1.AWSIRSARegistrationDriver { + raArgs := []string{ + fmt.Sprintf("--registration-drivers=%s", hub.Spec.RegistrationAuth.Driver), + } + if hub.Spec.RegistrationAuth.HubClusterARN != "" { + raArgs = append(raArgs, fmt.Sprintf("--hub-cluster-arn=%s", hub.Spec.RegistrationAuth.HubClusterARN)) + } + if len(hub.Spec.RegistrationAuth.AutoApprovedARNPatterns) > 0 { + raArgs = append(raArgs, fmt.Sprintf("--auto-approved-arn-patterns=%s", strings.Join(hub.Spec.RegistrationAuth.AutoApprovedARNPatterns, ","))) + } + initArgs = append(initArgs, raArgs...) + } + + if hub.Spec.SingletonControlPlane != nil { + initArgs = append(initArgs, "--singleton=true") + initArgs = append(initArgs, "--singleton-name", hub.Spec.SingletonControlPlane.Name) + if hub.Spec.SingletonControlPlane.Helm != nil { + if hub.Spec.SingletonControlPlane.Helm.Values != "" { + values, cleanupValues, err := file.TmpFile([]byte(hub.Spec.SingletonControlPlane.Helm.Values), "values") + if cleanupValues != nil { + defer cleanupValues() + } + if err != nil { + return err + } + initArgs = append(initArgs, "--values", values) + } + for _, s := range hub.Spec.SingletonControlPlane.Helm.Set { + initArgs = append(initArgs, "--set", s) + } + for _, s := range hub.Spec.SingletonControlPlane.Helm.SetJSON { + initArgs = append(initArgs, "--set-json", s) + } + for _, s := range hub.Spec.SingletonControlPlane.Helm.SetLiteral { + initArgs = append(initArgs, "--set-literal", s) + } + for _, s := range hub.Spec.SingletonControlPlane.Helm.SetString { + initArgs = append(initArgs, "--set-string", s) + } + } + } else if hub.Spec.ClusterManager != nil { + // clustermanager args + initArgs = append(initArgs, "--feature-gates", hub.Spec.ClusterManager.FeatureGates) + initArgs = append(initArgs, fmt.Sprintf("--use-bootstrap-token=%t", hub.Spec.ClusterManager.UseBootstrapToken)) + // source args + initArgs = append(initArgs, "--bundle-version", hub.Spec.ClusterManager.Source.BundleVersion) + initArgs = append(initArgs, "--image-registry", hub.Spec.ClusterManager.Source.Registry) + // resources args + initArgs = append(initArgs, args.PrepareResources(hub.Spec.ClusterManager.Resources)...) + } else { + // one of clusterManager or singletonControlPlane must be specified, per validating webhook, but handle the edge case anyway + return fmt.Errorf("unknown hub type, must specify either hub.clusterManager or hub.singletonControlPlane") + } + + initArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, hubKubeconfig, hub.Spec.Kubeconfig.Context, initArgs) + if cleanupKcfg != nil { + defer cleanupKcfg() + } + if err != nil { + return err + } + + logger.V(1).Info("clusteradm init", "args", initArgs) + + cmd := exec.Command(clusteradm, initArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm init' to complete...") + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf("failed to init hub: %v, output: %s", err, string(out)) + } + logger.V(1).Info("hub initialized", "output", string(stdout)) + + return nil +} + +// hubNeedsUpgrade checks if the clustermanager on the Hub cluster has the desired bundle version +func (r *HubReconciler) hubNeedsUpgrade(ctx context.Context, hub *v1beta1.Hub, operatorC *operatorapi.Clientset) (bool, error) { + logger := log.FromContext(ctx) + logger.V(0).Info("hubNeedsUpgrade", "hub", hub.Name) + + if hub.Spec.ClusterManager.Source.BundleVersion == "default" { + logger.V(0).Info("clustermanager bundleVersion is default, skipping upgrade") + return false, nil + } + if hub.Spec.ClusterManager.Source.BundleVersion == "latest" { + logger.V(0).Info("clustermanager bundleVersion is latest, attempting upgrade") + return true, nil + } + + cm, err := getClusterManager(ctx, operatorC) + if err != nil { + return false, err + } + + // identify lowest bundleVersion referenced in the clustermanager spec + bundleSpecs := make([]string, 0) + if cm.Spec.AddOnManagerImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, cm.Spec.AddOnManagerImagePullSpec) + } + if cm.Spec.PlacementImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, cm.Spec.PlacementImagePullSpec) + } + if cm.Spec.RegistrationImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, cm.Spec.RegistrationImagePullSpec) + } + if cm.Spec.WorkImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, cm.Spec.WorkImagePullSpec) + } + activeBundleVersion, err := version.LowestBundleVersion(ctx, bundleSpecs) + if err != nil { + return false, fmt.Errorf("failed to detect bundleVersion from clustermanager spec: %w", err) + } + desiredBundleVersion, err := version.Normalize(hub.Spec.ClusterManager.Source.BundleVersion) + if err != nil { + return false, err + } + + logger.V(0).Info("found clustermanager bundleVersions", + "activeBundleVersion", activeBundleVersion, + "desiredBundleVersion", desiredBundleVersion, + ) + return activeBundleVersion != desiredBundleVersion, nil +} + +// upgradeHub upgrades the Hub cluster's clustermanager to the specified version +func (r *HubReconciler) upgradeHub(ctx context.Context, hub *v1beta1.Hub) error { + logger := log.FromContext(ctx) + logger.V(0).Info("upgradeHub", "hub", hub.Name) + + upgradeArgs := append([]string{ + "upgrade", "clustermanager", + "--bundle-version", hub.Spec.ClusterManager.Source.BundleVersion, + "--image-registry", hub.Spec.ClusterManager.Source.Registry, + "--wait=true", + }, hub.BaseArgs()...) + + logger.V(1).Info("clusteradm upgrade clustermanager", "args", upgradeArgs) + + cmd := exec.Command(clusteradm, upgradeArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm upgrade clustermanager' to complete...") + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf( + "failed to upgrade hub clustermanager to %s: %v, output: %s", + hub.Spec.ClusterManager.Source.BundleVersion, err, string(out), + ) + } + logger.V(1).Info("clustermanager upgraded", "output", string(stdout)) + + return nil } // SetupWithManager sets up the controller with the Manager. func (r *HubReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&v1beta1.Hub{}). + // watch for deleted Spokes to prevent idly waiting after all spokes have been GCd during Hub deletion + Watches( + &v1beta1.Spoke{}, + handler.EnqueueRequestsFromMapFunc(r.mapSpokeEventToHub), + builder.WithPredicates( + predicate.Funcs{ + DeleteFunc: func(_ event.DeleteEvent) bool { + return true + }, + CreateFunc: func(_ event.CreateEvent) bool { + return false + }, + UpdateFunc: func(_ event.UpdateEvent) bool { + return false + }, + GenericFunc: func(_ event.GenericEvent) bool { + return false + }, + }, + ), + ). Named("hub"). Complete(r) } + +func (r *HubReconciler) mapSpokeEventToHub(_ context.Context, obj client.Object) []reconcile.Request { + spoke, ok := obj.(*v1beta1.Spoke) + if !ok { + r.Log.V(1).Info("failed to enqueue hub requests", "expected", "spoke", "got", fmt.Sprintf("%T", obj)) + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: spoke.Spec.HubRef.Name, + Namespace: spoke.Spec.HubRef.Namespace, + }, + }, + } +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/hub_controller_test.go b/fleetconfig-controller/internal/controller/v1beta1/hub_controller_test.go index c14d8615..b0f5ff0c 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/hub_controller_test.go +++ b/fleetconfig-controller/internal/controller/v1beta1/hub_controller_test.go @@ -18,67 +18,121 @@ package v1beta1 import ( "context" + "fmt" + "time" + "github.com/go-logr/logr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) -var _ = Describe("Hub Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +var ( + hub *v1beta1.Hub + hubReconciler *HubReconciler + hubNN types.NamespacedName +) +var _ = Describe("Hub Controller", Ordered, func() { + Context("When reconciling a Hub", func() { ctx := context.Background() - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - hub := &v1beta1.Hub{} - - BeforeEach(func() { - By("creating the custom resource for the Kind Hub") - err := k8sClient.Get(ctx, typeNamespacedName, hub) - if err != nil && errors.IsNotFound(err) { - resource := &v1beta1.Hub{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + BeforeAll(func() { + hubNN = types.NamespacedName{ + Name: "test-hub", + Namespace: "default", + } + hubReconciler = &HubReconciler{ + Client: k8sClient, + Log: logr.Logger{}, + Scheme: k8sClient.Scheme(), + } + hub = &v1beta1.Hub{ + ObjectMeta: metav1.ObjectMeta{ + Name: hubNN.Name, + Namespace: hubNN.Namespace, + }, + Spec: v1beta1.HubSpec{ + Kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, } }) - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &v1beta1.Hub{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + It("Should create a Hub", func() { + Expect(k8sClient.Create(ctx, hub)).To(Succeed()) + }) + + It("Should add a finalizer to the Hub", func() { + By("Reconciling the Hub") + Expect(reconcileHub(ctx)).To(Succeed()) - By("Cleanup the specific resource instance Hub") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + By("Verifying the Hub's finalizer") + Expect(k8sClient.Get(ctx, hubNN, hub)).To(Succeed()) + Expect(hub.Finalizers).To(ContainElement(v1beta1.HubCleanupFinalizer), + "Hub %s wasn't given a finalizer", hubNN.Name) }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &HubReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + It("Should initialize the Hub", func() { + By("Reconciling the Hub") + Expect(reconcileHub(ctx)).To(Succeed()) + + By("Verifying the Hub's phase and conditions") + Expect(k8sClient.Get(ctx, hubNN, hub)).To(Succeed()) + Expect(hub.Status.Phase).To(Equal(v1beta1.HubStarting), + "Hub %s is not in the Initializing phase", hubNN.Name) + Expect(assertHubConditions(hub.Status.Conditions, map[string]metav1.ConditionStatus{ + v1beta1.HubInitialized: metav1.ConditionFalse, + v1beta1.CleanupFailed: metav1.ConditionFalse, + v1beta1.AddonsConfigured: metav1.ConditionFalse, + })).To(Succeed()) + }) + + // cannot test full provisioning without an e2e test + + It("Should delete the Hub", func() { + By("Deleting the Hub") + Expect(k8sClient.Delete(ctx, hub)).To(Succeed()) + Eventually(func() error { + err := k8sClient.Get(ctx, hubNN, hub) + if kerrs.IsNotFound(err) { + return nil + } + return err + }, 5*time.Minute).Should(Succeed()) }) }) }) + +func reconcileHub(ctx context.Context) error { + _, err := hubReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: hubNN, + }) + return err +} + +// assertHubConditions asserts that two sets of conditions match. +func assertHubConditions(conditions []v1beta1.Condition, expected map[string]metav1.ConditionStatus) error { + if len(conditions) != len(expected) { + return fmt.Errorf("expected %d conditions, got %d", len(expected), len(conditions)) + } + for _, c := range conditions { + expectedCondition, ok := expected[c.Type] + if !ok { + return fmt.Errorf("unhandled condition %s", c.Type) + } + if c.Status != expectedCondition { + return fmt.Errorf("condition %s has status %s, expected %s", c.Type, c.Status, expectedCondition) + } + } + return nil +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/spoke_controller.go b/fleetconfig-controller/internal/controller/v1beta1/spoke_controller.go index 43054799..5e580792 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/spoke_controller.go +++ b/fleetconfig-controller/internal/controller/v1beta1/spoke_controller.go @@ -18,46 +18,969 @@ package v1beta1 import ( "context" + "encoding/json" + "errors" + "fmt" + "maps" + "os/exec" + "reflect" + "slices" + "strings" + "dario.cat/mergo" + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "open-cluster-management.io/api/cluster/v1" + operatorv1 "open-cluster-management.io/api/operator/v1" + "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" - v1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/go-logr/logr" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" + exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/hash" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/version" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" ) // SpokeReconciler reconciles a Spoke object type SpokeReconciler struct { client.Client - Scheme *runtime.Scheme + Log logr.Logger + Scheme *runtime.Scheme + ConcurrentReconciles int } // +kubebuilder:rbac:groups=fleetconfig.open-cluster-management.io,resources=spokes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=fleetconfig.open-cluster-management.io,resources=spokes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=fleetconfig.open-cluster-management.io,resources=spokes/finalizers,verbs=update -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Spoke object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile -func (r *SpokeReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { - _ = logf.FromContext(ctx) +// Reconcile is the main reconcile loop for the Spoke resource. +func (r *SpokeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("request", req) + ctx = log.IntoContext(ctx, logger) - // TODO(user): your logic here + // Fetch the Spoke instance + spoke := &v1beta1.Spoke{} + err := r.Get(ctx, req.NamespacedName, spoke) + if err != nil { + if !kerrs.IsNotFound(err) { + logger.Error(err, "failed to fetch Spoke", "key", req) + } + return ret(ctx, ctrl.Result{}, client.IgnoreNotFound(err)) + } + ctx = withOriginalSpoke(ctx, spoke) - return ctrl.Result{}, nil + // Create a patch helper for this reconciliation + patchHelper, err := patch.NewHelper(spoke, r.Client) + if err != nil { + return ret(ctx, ctrl.Result{}, err) + } + + // Ensure patch is applied at the end + defer func() { + if err := patchHelper.Patch(ctx, spoke); err != nil && !kerrs.IsNotFound(err) { + logger.Error(err, "failed to patch Spoke") + } + }() + + hubMeta, err := r.getHubMeta(ctx, spoke.Spec.HubRef) + if err != nil { + // notFound does not return an error + logger.V(0).Info("Failed to get latest hub metadata", "error", err) + spoke.Status.Phase = v1beta1.Unhealthy + } + + // Add a finalizer if not already present, set defaults, and requeue + if !slices.Contains(spoke.Finalizers, v1beta1.SpokeCleanupFinalizer) { + setDefaults(ctx, spoke, hubMeta) + spoke.Finalizers = append(spoke.Finalizers, v1beta1.SpokeCleanupFinalizer) + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + spokeKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, r.Client, spoke.Spec.Kubeconfig, spoke.Namespace) + if err != nil { + return ret(ctx, ctrl.Result{}, err) + } + + // Handle deletion logic with finalizer + if !spoke.DeletionTimestamp.IsZero() { + if spoke.Status.Phase != v1beta1.Deleting { + spoke.Status.Phase = v1beta1.Deleting + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + if slices.Contains(spoke.Finalizers, v1beta1.SpokeCleanupFinalizer) { + if err := r.cleanup(ctx, spoke, spokeKubeconfig, hubMeta); err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.CleanupFailed, metav1.ConditionTrue, metav1.ConditionFalse, + )) + return ret(ctx, ctrl.Result{}, err) + } + } + spoke.Finalizers = slices.DeleteFunc(spoke.Finalizers, func(s string) bool { + return s == v1beta1.SpokeCleanupFinalizer + }) + // end reconciliation + return ret(ctx, ctrl.Result{}, nil) + } + + // Initialize phase & conditions + previousPhase := spoke.Status.Phase + spoke.Status.Phase = v1beta1.SpokeJoining + initConditions := []v1beta1.Condition{ + v1beta1.NewCondition( + v1beta1.SpokeJoined, v1beta1.SpokeJoined, metav1.ConditionFalse, metav1.ConditionTrue, + ), + v1beta1.NewCondition( + v1beta1.CleanupFailed, v1beta1.CleanupFailed, metav1.ConditionFalse, metav1.ConditionFalse, + ), + v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionFalse, + ), + } + spoke.SetConditions(false, initConditions...) + + if previousPhase == "" { + // set initial phase/conditions and requeue + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + + // Handle Spoke cluster: join and/or upgrade + if err := r.handleSpoke(ctx, spoke, hubMeta, spokeKubeconfig); err != nil { + logger.Error(err, "Failed to handle spoke operations") + spoke.Status.Phase = v1beta1.Unhealthy + } + + // Finalize phase + for _, c := range spoke.Status.Conditions { + if c.Status != c.WantStatus { + logger.Info("WARNING: condition does not have the desired status", "type", c.Type, "reason", c.Reason, "message", c.Message, "status", c.Status, "wantStatus", c.WantStatus) + spoke.Status.Phase = v1beta1.Unhealthy + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) + } + } + if spoke.Status.Phase == v1beta1.SpokeJoining { + spoke.Status.Phase = v1beta1.SpokeRunning + } + + return ret(ctx, ctrl.Result{RequeueAfter: requeue}, nil) +} + +type spokeContextKey int + +const ( + // originalSpokeKey is the key in the context that records the incoming original Spoke + originalSpokeKey spokeContextKey = iota +) + +func withOriginalSpoke(ctx context.Context, spoke *v1beta1.Spoke) context.Context { + return context.WithValue(ctx, originalSpokeKey, spoke.DeepCopy()) +} + +func setDefaults(ctx context.Context, spoke *v1beta1.Spoke, hubMeta hubMeta) { + logger := log.FromContext(ctx) + if hubMeta.hub == nil { + logger.V(0).Info("hub not found, skip overriding default timeout and log verbosity") + return + } + if spoke.Spec.Timeout == 300 { + spoke.Spec.Timeout = hubMeta.hub.Spec.Timeout + } + if spoke.Spec.LogVerbosity == 0 { + spoke.Spec.LogVerbosity = hubMeta.hub.Spec.LogVerbosity + } +} + +// cleanup cleans up a Spoke and its associated resources. +func (r *SpokeReconciler) cleanup(ctx context.Context, spoke *v1beta1.Spoke, spokeKubeconfig []byte, hubMeta hubMeta) error { + logger := log.FromContext(ctx) + + clusterC, err := common.ClusterClient(hubMeta.kubeconfig) + if err != nil { + return err + } + workC, err := common.WorkClient(hubMeta.kubeconfig) + if err != nil { + return err + } + addonC, err := common.AddOnClient(hubMeta.kubeconfig) + if err != nil { + return fmt.Errorf("failed to create addon client for cleanup: %w", err) + } + + // skip clean up if the ManagedCluster resource is not found or if any manifestWorks exist + managedCluster, err := clusterC.ClusterV1().ManagedClusters().Get(ctx, spoke.Name, metav1.GetOptions{}) + if kerrs.IsNotFound(err) { + logger.Info("ManagedCluster resource not found; nothing to do") + return nil + } else if err != nil { + return fmt.Errorf("unexpected error listing managedClusters: %w", err) + } + manifestWorks, err := workC.WorkV1().ManifestWorks(managedCluster.Name).List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list manifestWorks for managedCluster %s: %w", managedCluster.Name, err) + } + + // check that the number of manifestWorks is the same as the number of addons enabled for that spoke + if len(manifestWorks.Items) > 0 && !allOwnersAddOns(manifestWorks.Items) { + msg := fmt.Sprintf("Found manifestWorks for ManagedCluster %s; cannot unjoin spoke cluster while it has active ManifestWorks", managedCluster.Name) + logger.Info(msg) + return errors.New(msg) + } + + // remove addons only after confirming that the cluster can be unjoined - this avoids leaving dangling resources that may rely on the addon + spokeCopy := spoke.DeepCopy() + spokeCopy.Spec.AddOns = nil + if _, err := handleSpokeAddons(ctx, addonC, spokeCopy); err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionFalse, + )) + return err + } + + if len(spoke.Status.EnabledAddons) > 0 { + // Wait for addon manifestWorks to be fully cleaned up before proceeding with unjoin + if err := waitForAddonManifestWorksCleanup(ctx, workC, spoke.Name, addonCleanupTimeout); err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionFalse, + )) + return fmt.Errorf("addon manifestWorks cleanup failed: %w", err) + } + spoke.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionFalse, + )) + } + + if err := r.unjoinSpoke(ctx, spoke, spokeKubeconfig); err != nil { + return err + } + + // remove CSR + csrList := &certificatesv1.CertificateSigningRequestList{} + if err := r.List(ctx, csrList, client.HasLabels{"open-cluster-management.io/cluster-name"}); err != nil { + return err + } + for _, c := range csrList.Items { + trimmedName := csrSuffixPattern.ReplaceAllString(c.Name, "") + if trimmedName == spoke.Name { + if err := r.Delete(ctx, &c); err != nil { + return err + } + } + } + + // remove ManagedCluster + if err = clusterC.ClusterV1().ManagedClusters().Delete(ctx, spoke.Name, metav1.DeleteOptions{}); err != nil { + return client.IgnoreNotFound(err) + } + + // remove Namespace + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: spoke.Name}} + if err := r.Delete(ctx, ns); err != nil { + return client.IgnoreNotFound(err) + } + + return nil +} + +// handleSpoke manages Spoke cluster join and upgrade operations +func (r *SpokeReconciler) handleSpoke(ctx context.Context, spoke *v1beta1.Spoke, hubMeta hubMeta, spokeKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("handleSpoke", "spoke", spoke.Name) + + hub := hubMeta.hub + hubKubeconfig := hubMeta.kubeconfig + + clusterClient, err := common.ClusterClient(hubKubeconfig) + if err != nil { + return err + } + addonC, err := common.AddOnClient(hubKubeconfig) + if err != nil { + return fmt.Errorf("failed to create addon client: %w", err) + } + + // check if the spoke has already been joined to the hub + managedCluster, err := common.GetManagedCluster(ctx, clusterClient, spoke.Name) + if err != nil { + logger.Error(err, "failed to get managedCluster", "spoke", spoke.Name) + return err + } + + klusterletValues, err := r.mergeKlusterletValues(ctx, spoke) + if err != nil { + return err + } + + // attempt to join the spoke cluster if it hasn't already been joined + if managedCluster == nil { + if err := r.joinSpoke(ctx, spoke, hubMeta, klusterletValues, spokeKubeconfig); err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.SpokeJoined, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return err + } + + // Accept the cluster join request + if err := acceptCluster(ctx, spoke, false); err != nil { + spoke.SetConditions(true, v1beta1.NewCondition( + err.Error(), v1beta1.SpokeJoined, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return err + } + + managedCluster, err = common.GetManagedCluster(ctx, clusterClient, spoke.Name) + if err != nil { + logger.Error(err, "failed to get managedCluster after join", "spoke", spoke.Name) + return err + } + } + + // check managed clusters joined condition + jc := r.getJoinedCondition(managedCluster) + if jc == nil { + logger.V(0).Info("waiting for spoke cluster to join", "name", spoke.Name) + msg := fmt.Sprintf("ManagedClusterJoined condition not found in ManagedCluster for spoke cluster %s", spoke.Name) + spoke.SetConditions(true, v1beta1.NewCondition( + msg, v1beta1.SpokeJoined, metav1.ConditionFalse, metav1.ConditionTrue, + )) + // Re-accept all join requests for the spoke cluster + if err := acceptCluster(ctx, spoke, true); err != nil { + logger.Error(err, "failed to accept spoke cluster join request(s)", "spoke", spoke.Name) + } + return nil + } + + logger.V(0).Info("found join condition", "reason", jc.Reason, "status", jc.Status, "message", jc.Message) + if jc.Status != metav1.ConditionTrue { + msg := fmt.Sprintf("failed to join spoke cluster %s: %s", spoke.Name, jc.Message) + spoke.SetConditions(true, v1beta1.NewCondition( + msg, v1beta1.SpokeJoined, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return errors.New(msg) + } + + // spoke cluster has joined successfully + spoke.SetConditions(true, v1beta1.NewCondition( + "Joined", v1beta1.SpokeJoined, metav1.ConditionTrue, metav1.ConditionTrue, + )) + + // Label the spoke ManagedCluster if in hub-as-spoke mode. + // This allows the 'spoke' ManagedClusterSet to omit the hub-as-spoke cluster from its list + // of spoke clusters. + if managedCluster != nil && spoke.Spec.Kubeconfig.InCluster { + if managedCluster.Labels == nil { + managedCluster.Labels = make(map[string]string) + } + managedCluster.Labels[v1beta1.LabelManagedClusterType] = v1beta1.ManagedClusterTypeHubAsSpoke + if err := common.UpdateManagedCluster(ctx, clusterClient, managedCluster); err != nil { + return err + } + logger.V(0).Info("labeled ManagedCluster as hub-as-spoke", "name", spoke.Name) + } + + // attempt an upgrade whenever the klusterlet's bundleVersion or values change + currKlusterletHash, err := hash.ComputeHash(klusterletValues) + if err != nil { + return fmt.Errorf("failed to compute hash of spoke %s klusterlet values: %w", spoke.Name, err) + } + if hub != nil && hub.Spec.ClusterManager.Source.BundleVersion != "" { + upgrade, err := r.spokeNeedsUpgrade(ctx, spoke, currKlusterletHash, hub.Spec.ClusterManager.Source, spokeKubeconfig) + if err != nil { + return fmt.Errorf("failed to check if spoke cluster needs upgrade: %w", err) + } + + if upgrade { + if err := r.upgradeSpoke(ctx, spoke, klusterletValues, hub.Spec.ClusterManager.Source, spokeKubeconfig); err != nil { + return fmt.Errorf("failed to upgrade spoke cluster %s: %w", spoke.Name, err) + } + } + } + + enabledAddons, err := handleSpokeAddons(ctx, addonC, spoke) + if err != nil { + msg := fmt.Sprintf("failed to enable addons for spoke cluster %s: %s", spoke.Name, err.Error()) + spoke.SetConditions(true, v1beta1.NewCondition( + msg, v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, + )) + return err + } + + // Update status with enabled addons and klusterlet hash + spoke.Status.EnabledAddons = enabledAddons + spoke.Status.KlusterletHash = currKlusterletHash + + return nil +} + +type tokenMeta struct { + Token string `json:"hub-token"` + HubAPIServer string `json:"hub-apiserver"` +} + +type hubMeta struct { + hub *v1beta1.Hub + kubeconfig []byte +} + +// joinSpoke joins a Spoke cluster to the Hub cluster +func (r *SpokeReconciler) joinSpoke(ctx context.Context, spoke *v1beta1.Spoke, hubMeta hubMeta, klusterletValues *v1beta1.KlusterletChartConfig, spokeKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("joinSpoke", "spoke", spoke.Name) + + hub := hubMeta.hub + + if hub == nil { + return errors.New("hub not found") + } + // dont start join until the hub is ready + hubInitCond := hubMeta.hub.GetCondition(v1beta1.HubInitialized) + if hubInitCond == nil || hubInitCond.Status != metav1.ConditionTrue { + return errors.New("hub does not have initialized condition") + } + + tokenMeta, err := getToken(ctx, hubMeta) + if err != nil { + return fmt.Errorf("failed to get join token: %w", err) + } + + joinArgs := append([]string{ + "join", + "--cluster-name", spoke.Name, + fmt.Sprintf("--create-namespace=%t", spoke.Spec.CreateNamespace), + fmt.Sprintf("--enable-sync-labels=%t", spoke.Spec.SyncLabels), + "--hub-token", tokenMeta.Token, + "--wait=true", + // klusterlet args + "--mode", spoke.Spec.Klusterlet.Mode, + "--feature-gates", spoke.Spec.Klusterlet.FeatureGates, + fmt.Sprintf("--force-internal-endpoint-lookup=%t", spoke.Spec.Klusterlet.ForceInternalEndpointLookup), + fmt.Sprintf("--singleton=%t", spoke.Spec.Klusterlet.Singleton), + // source args + "--bundle-version", hub.Spec.ClusterManager.Source.BundleVersion, + "--image-registry", hub.Spec.ClusterManager.Source.Registry, + }, spoke.BaseArgs()...) + + for k, v := range spoke.Spec.Klusterlet.Annotations { + joinArgs = append(joinArgs, fmt.Sprintf("--klusterlet-annotation=%s=%s", k, v)) + } + + // resources args + joinArgs = append(joinArgs, args.PrepareResources(spoke.Spec.Klusterlet.Resources)...) + + // Use hub API server from spec if provided and not forced to use internal endpoint, + // otherwise fall back to the hub API server from the tokenMeta + if hub.Spec.APIServer != "" && !spoke.Spec.Klusterlet.ForceInternalEndpointLookup { + joinArgs = append(joinArgs, "--hub-apiserver", hub.Spec.APIServer) + } else if tokenMeta.HubAPIServer != "" { + joinArgs = append(joinArgs, "--hub-apiserver", tokenMeta.HubAPIServer) + } + + if hub.Spec.Ca != "" { + caFile, caCleanup, err := file.TmpFile([]byte(hub.Spec.Ca), "ca") + if caCleanup != nil { + defer caCleanup() + } + if err != nil { + return fmt.Errorf("failed to write hub CA to disk: %w", err) + } + joinArgs = append([]string{fmt.Sprintf("--ca-file=%s", caFile)}, joinArgs...) + } + + ra := hub.Spec.RegistrationAuth + if ra.Driver == v1alpha1.AWSIRSARegistrationDriver { + raArgs := []string{ + fmt.Sprintf("--registration-auth=%s", ra.Driver), + } + if ra.HubClusterARN != "" { + raArgs = append(raArgs, fmt.Sprintf("--hub-cluster-arn=%s", ra.HubClusterARN)) + } + if spoke.Spec.ClusterARN != "" { + raArgs = append(raArgs, fmt.Sprintf("--managed-cluster-arn=%s", spoke.Spec.ClusterARN)) + } + + joinArgs = append(joinArgs, raArgs...) + } + + if spoke.Spec.Klusterlet.Mode == string(operatorv1.InstallModeHosted) { + joinArgs = append(joinArgs, + fmt.Sprintf("--force-internal-endpoint-lookup-managed=%t", spoke.Spec.Klusterlet.ForceInternalEndpointLookupManaged), + ) + raw, err := kube.KubeconfigFromSecretOrCluster(ctx, r.Client, spoke.Spec.Klusterlet.ManagedClusterKubeconfig, spoke.Namespace) + if err != nil { + return err + } + mgdKcfg, mgdKcfgCleanup, err := file.TmpFile(raw, "kubeconfig") + if mgdKcfgCleanup != nil { + defer mgdKcfgCleanup() + } + if err != nil { + return fmt.Errorf("failed to write managedClusterKubeconfig to disk: %w", err) + } + joinArgs = append(joinArgs, "--managed-cluster-kubeconfig", mgdKcfg) + } + + if spoke.Spec.ProxyCa != "" { + proxyCaFile, proxyCaCleanup, err := file.TmpFile([]byte(spoke.Spec.ProxyCa), "proxy-ca") + if proxyCaCleanup != nil { + defer proxyCaCleanup() + } + if err != nil { + return fmt.Errorf("failed to write proxy CA to disk: %w", err) + } + joinArgs = append(joinArgs, fmt.Sprintf("--proxy-ca-file=%s", proxyCaFile)) + } + if spoke.Spec.ProxyURL != "" { + joinArgs = append(joinArgs, fmt.Sprintf("--proxy-url=%s", spoke.Spec.ProxyURL)) + } + + valuesArgs, valuesCleanup, err := prepareKlusterletValuesFile(klusterletValues) + if valuesCleanup != nil { + defer valuesCleanup() + } + if err != nil { + return err + } + joinArgs = append(joinArgs, valuesArgs...) + + joinArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, spokeKubeconfig, spoke.Spec.Kubeconfig.Context, joinArgs) + if cleanupKcfg != nil { + defer cleanupKcfg() + } + if err != nil { + return err + } + + logger.V(1).Info("clusteradm join", "args", joinArgs) + + cmd := exec.Command(clusteradm, joinArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm join' to complete for spoke %s...", spoke.Name)) + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf("clusteradm join command failed for spoke %s: %v, output: %s", spoke.Name, err, string(out)) + } + logger.V(1).Info("successfully requested spoke cluster join", "output", string(stdout)) + + return nil +} + +// acceptCluster accepts a Spoke cluster's join request +func acceptCluster(ctx context.Context, spoke *v1beta1.Spoke, skipApproveCheck bool) error { + logger := log.FromContext(ctx) + logger.V(0).Info("acceptCluster", "spoke", spoke.Name) + + acceptArgs := append([]string{ + "accept", "--cluster", spoke.Name, + }, spoke.BaseArgs()...) + + logger.V(1).Info("clusteradm accept", "args", acceptArgs) + + // TODO: handle other args: + // --requesters=[]: + // Common Names of agents to be approved. + + if skipApproveCheck { + acceptArgs = append(acceptArgs, "--skip-approve-check") + } + + cmd := exec.Command(clusteradm, acceptArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm accept' to complete for spoke %s...", spoke.Name)) + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf("failed to accept spoke cluster join request: %v, output: %s", err, string(out)) + } + logger.V(1).Info("spoke cluster join request accepted", "output", string(stdout)) + + return nil +} + +// getJoinedCondition gets the joined condition from a managed cluster +func (r *SpokeReconciler) getJoinedCondition(managedCluster *clusterv1.ManagedCluster) *metav1.Condition { + if managedCluster == nil || managedCluster.Status.Conditions == nil { + return nil + } + + for _, c := range managedCluster.Status.Conditions { + if c.Type == "ManagedClusterJoined" { + return &c + } + } + + return nil +} + +// spokeNeedsUpgrade checks if the klusterlet on a Spoke cluster requires an upgrade +func (r *SpokeReconciler) spokeNeedsUpgrade(ctx context.Context, spoke *v1beta1.Spoke, currKlusterletHash string, source v1beta1.OCMSource, spokeKubeconfig []byte) (bool, error) { + logger := log.FromContext(ctx) + logger.V(0).Info("spokeNeedsUpgrade", "spokeClusterName", spoke.Name) + + hashChanged := spoke.Status.KlusterletHash != currKlusterletHash + logger.V(2).Info("comparing klusterlet values hash", + "spoke", spoke.Name, + "prevHash", spoke.Status.KlusterletHash, + "currHash", currKlusterletHash, + ) + if hashChanged { + return true, nil + } + + if source.BundleVersion == "default" { + logger.V(0).Info("klusterlet bundleVersion is default, skipping upgrade") + return false, nil + } + if source.BundleVersion == "latest" { + logger.V(0).Info("klusterlet bundleVersion is latest, attempting upgrade") + return true, nil + } + + operatorC, err := common.OperatorClient(spokeKubeconfig) + if err != nil { + return false, err + } + + k, err := operatorC.OperatorV1().Klusterlets().Get(ctx, "klusterlet", metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("failed to get klusterlet: %w", err) + } + + // identify lowest bundleVersion referenced in the klusterlet spec + bundleSpecs := make([]string, 0) + if k.Spec.ImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, k.Spec.ImagePullSpec) + } + if k.Spec.RegistrationImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, k.Spec.RegistrationImagePullSpec) + } + if k.Spec.WorkImagePullSpec != "" { + bundleSpecs = append(bundleSpecs, k.Spec.WorkImagePullSpec) + } + activeBundleVersion, err := version.LowestBundleVersion(ctx, bundleSpecs) + if err != nil { + return false, fmt.Errorf("failed to detect bundleVersion from klusterlet spec: %w", err) + } + desiredBundleVersion, err := version.Normalize(source.BundleVersion) + if err != nil { + return false, err + } + + logger.V(0).Info("found klusterlet bundleVersions", + "activeBundleVersion", activeBundleVersion, + "desiredBundleVersion", desiredBundleVersion, + ) + return activeBundleVersion != desiredBundleVersion, nil +} + +// upgradeSpoke upgrades the Spoke cluster's klusterlet +func (r *SpokeReconciler) upgradeSpoke(ctx context.Context, spoke *v1beta1.Spoke, klusterletValues *v1beta1.KlusterletChartConfig, source v1beta1.OCMSource, spokeKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("upgradeSpoke", "spoke", spoke.Name) + + upgradeArgs := append([]string{ + "upgrade", "klusterlet", + "--bundle-version", source.BundleVersion, + "--image-registry", source.Registry, + "--wait=true", + }, spoke.BaseArgs()...) + + valuesArgs, valuesCleanup, err := prepareKlusterletValuesFile(klusterletValues) + if valuesCleanup != nil { + defer valuesCleanup() + } + if err != nil { + return err + } + upgradeArgs = append(upgradeArgs, valuesArgs...) + + upgradeArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, spokeKubeconfig, spoke.Spec.Kubeconfig.Context, upgradeArgs) + if cleanupKcfg != nil { + defer cleanupKcfg() + } + if err != nil { + return err + } + + logger.V(1).Info("clusteradm upgrade klusterlet", "args", upgradeArgs) + + cmd := exec.Command(clusteradm, upgradeArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm upgrade klusterlet' to complete for spoke %s...", spoke.Name)) + if err != nil { + out := append(stdout, stderr...) + return fmt.Errorf( + "failed to upgrade klusterlet on spoke cluster %s to %s: %v, output: %s", + spoke.Name, source.BundleVersion, err, string(out), + ) + } + logger.V(1).Info("klusterlet upgraded", "output", string(stdout)) + + return nil +} + +// unjoinSpoke unjoins a spoke from the hub +func (r *SpokeReconciler) unjoinSpoke(ctx context.Context, spoke *v1beta1.Spoke, spokeKubeconfig []byte) error { + logger := log.FromContext(ctx) + logger.V(0).Info("unjoinSpoke", "spoke", spoke.Name) + + unjoinArgs := append([]string{ + "unjoin", + "--cluster-name", spoke.GetName(), + fmt.Sprintf("--purge-operator=%t", spoke.Spec.Klusterlet.PurgeOperator), + }, spoke.BaseArgs()...) + + unjoinArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, spokeKubeconfig, spoke.Spec.Kubeconfig.Context, unjoinArgs) + if cleanupKcfg != nil { + defer cleanupKcfg() + } + if err != nil { + return fmt.Errorf("failed to unjoin spoke cluster %s: %w", spoke.GetName(), err) + } + + logger.V(1).Info("clusteradm unjoin", "args", unjoinArgs) + + cmd := exec.Command(clusteradm, unjoinArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm unjoin' to complete for spoke %s...", spoke.GetName())) + out := append(stdout, stderr...) + if err != nil || strings.Contains(string(out), amwExistsError) { + return fmt.Errorf("failed to unjoin spoke cluster %s: %v, output: %s", spoke.GetName(), err, string(out)) + } + logger.V(1).Info("spoke cluster unjoined", "output", string(stdout)) + + return nil +} + +// getToken gets a join token from the Hub cluster via 'clusteradm get token' +func getToken(ctx context.Context, hubMeta hubMeta) (*tokenMeta, error) { + logger := log.FromContext(ctx) + logger.V(0).Info("getToken") + + tokenArgs := append([]string{ + "get", "token", "--output=json", + }, hubMeta.hub.BaseArgs()...) + + if hubMeta.hub.Spec.ClusterManager != nil { + tokenArgs = append(tokenArgs, fmt.Sprintf("--use-bootstrap-token=%t", hubMeta.hub.Spec.ClusterManager.UseBootstrapToken)) + } + tokenArgs, cleanupKcfg, err := args.PrepareKubeconfig(ctx, hubMeta.kubeconfig, hubMeta.hub.Spec.Kubeconfig.Context, tokenArgs) + if cleanupKcfg != nil { + defer cleanupKcfg() + } + if err != nil { + return nil, fmt.Errorf("failed to prepare kubeconfig: %w", err) + } + + logger.V(1).Info("clusteradm get token", "args", tokenArgs) + + cmd := exec.Command(clusteradm, tokenArgs...) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm get token' to complete...") + if err != nil { + out := append(stdout, stderr...) + return nil, fmt.Errorf("failed to get join token: %v, output: %s", err, string(out)) + } + logger.V(1).Info("got join token", "output", string(stdout)) + + tokenMeta := &tokenMeta{} + if err := json.Unmarshal(stdout, &tokenMeta); err != nil { + return nil, fmt.Errorf("failed to unmarshal join token: %w", err) + } + return tokenMeta, nil +} + +func (r *SpokeReconciler) getHubMeta(ctx context.Context, hubRef v1beta1.HubRef) (hubMeta, error) { + hub := &v1beta1.Hub{} + hubMeta := hubMeta{} + nn := types.NamespacedName{Name: hubRef.Name, Namespace: hubRef.Namespace} + + // get Hub using local client + err := r.Get(ctx, nn, hub) + if err != nil { + return hubMeta, client.IgnoreNotFound(err) + } + hubMeta.hub = hub + // if found, load the hub's kubeconfig + hubKubeconfig, err := kube.KubeconfigFromSecretOrCluster(ctx, r.Client, hub.Spec.Kubeconfig, hub.Namespace) + if err != nil { + return hubMeta, err + } + hubMeta.kubeconfig = hubKubeconfig + return hubMeta, nil +} + +func (r *SpokeReconciler) mergeKlusterletValues(ctx context.Context, spoke *v1beta1.Spoke) (*v1beta1.KlusterletChartConfig, error) { + logger := log.FromContext(ctx) + + if spoke.Spec.Klusterlet.ValuesFrom == nil && spoke.Spec.Klusterlet.Values == nil { + logger.V(3).Info("no values or valuesFrom provided. Using default klusterlet chart values", "spoke", spoke.Name) + return nil, nil + } + + var fromInterface = map[string]any{} + var specInterface = map[string]any{} + + if spoke.Spec.Klusterlet.ValuesFrom != nil { + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{Name: spoke.Spec.Klusterlet.ValuesFrom.Name, Namespace: spoke.Namespace} + err := r.Get(ctx, nn, cm) + if err != nil { + if kerrs.IsNotFound(err) { + // cm not found, return spec's values + logger.V(1).Info("warning: Klusterlet values ConfigMap not found", "spoke", spoke.Name, "configMap", nn) + return spoke.Spec.Klusterlet.Values, nil + } + return nil, fmt.Errorf("failed to retrieve Klusterlet values ConfigMap %s: %w", nn, err) + } + fromValues, ok := cm.Data[spoke.Spec.Klusterlet.ValuesFrom.Key] + if !ok { + logger.V(1).Info("warning: Klusterlet values ConfigMap not found", "spoke", spoke.Name, "configMap", nn, "key", spoke.Spec.Klusterlet.ValuesFrom.Key) + return spoke.Spec.Klusterlet.Values, nil + } + fromBytes := []byte(fromValues) + err = yaml.Unmarshal(fromBytes, &fromInterface) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML values from ConfigMap %s key %s: %w", nn, spoke.Spec.Klusterlet.ValuesFrom.Key, err) + } + } + + if spoke.Spec.Klusterlet.Values != nil { + specBytes, err := yaml.Marshal(spoke.Spec.Klusterlet.Values) + if err != nil { + return nil, fmt.Errorf("failed to marshal Klusterlet values from spoke spec for spoke %s: %w", spoke.Name, err) + } + err = yaml.Unmarshal(specBytes, &specInterface) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal Klusterlet values from spoke spec for spoke %s: %w", spoke.Name, err) + } + } + + mergedMap := map[string]any{} + maps.Copy(mergedMap, fromInterface) + + // Merge spec on top but ignore zero-values from spec + if err := mergo.Map(&mergedMap, specInterface, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("merge failed for spoke %s: %w", spoke.Name, err) + } + + mergedBytes, err := yaml.Marshal(mergedMap) + if err != nil { + return nil, fmt.Errorf("failed to marshal merged Klusterlet values for spoke %s: %w", spoke.Name, err) + } + + merged := &v1beta1.KlusterletChartConfig{} + err = yaml.Unmarshal(mergedBytes, merged) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal merged values into KlusterletChartConfig for spoke %s: %w", spoke.Name, err) + } + + return merged, nil + +} + +// prepareKlusterletValuesFile creates a temporary file with klusterlet values and returns +// args to append and a cleanup function. Returns empty slice if values are empty. +func prepareKlusterletValuesFile(values *v1beta1.KlusterletChartConfig) ([]string, func(), error) { + if values == nil { + return nil, nil, nil + } + + if values.IsEmpty() { + return nil, nil, nil + } + valuesYAML, err := yaml.Marshal(values) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal klusterlet values to YAML: %w", err) + } + valuesFile, valuesCleanup, err := file.TmpFile(valuesYAML, "klusterlet-values") + if err != nil { + return nil, nil, fmt.Errorf("failed to write klusterlet values to disk: %w", err) + } + return []string{"--klusterlet-values-file", valuesFile}, valuesCleanup, nil } // SetupWithManager sets up the controller with the Manager. func (r *SpokeReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&v1beta1.Spoke{}). + WithOptions(controller.Options{ + MaxConcurrentReconciles: r.ConcurrentReconciles, + }). + // watch for Hub updates, to immediately propagate any updates to RegistrationAuth, OCMSource + Watches( + &v1beta1.Hub{}, + handler.EnqueueRequestsFromMapFunc(r.mapHubEventToSpoke), + builder.WithPredicates(predicate.Funcs{ + DeleteFunc: func(_ event.DeleteEvent) bool { + return false + }, + CreateFunc: func(_ event.CreateEvent) bool { + return false + }, + // only return true if old and new hub specs shared fields are different + UpdateFunc: func(e event.UpdateEvent) bool { + oldHub, ok := e.ObjectOld.(*v1beta1.Hub) + if !ok { + return false + } + newHub, ok := e.ObjectNew.(*v1beta1.Hub) + if !ok { + return false + } + return sharedFieldsChanged(oldHub.Spec.DeepCopy(), newHub.Spec.DeepCopy()) + }, + GenericFunc: func(_ event.GenericEvent) bool { + return false + }, + }), + ). Named("spoke"). Complete(r) } + +// sharedFieldsChanged checks whether the spec fields that are shared between Hub and Spokes were updated, +// to prevent unnecessary reconciles of Spokes +func sharedFieldsChanged(oldSpec, newSpec *v1beta1.HubSpec) bool { + return !reflect.DeepEqual(oldSpec.RegistrationAuth, newSpec.RegistrationAuth) || + !reflect.DeepEqual(oldSpec.ClusterManager.Source, newSpec.ClusterManager.Source) +} + +func (r *SpokeReconciler) mapHubEventToSpoke(ctx context.Context, obj client.Object) []reconcile.Request { + hub, ok := obj.(*v1beta1.Hub) + if !ok { + r.Log.V(1).Info("failed to enqueue spoke requests", "expected", "hub", "got", fmt.Sprintf("%T", obj)) + return nil + } + spokeList := &v1beta1.SpokeList{} + err := r.List(ctx, spokeList) + if err != nil { + r.Log.Error(err, "failed to List spokes") + return nil + } + req := make([]reconcile.Request, 0) + for _, s := range spokeList.Items { + if !s.IsManagedBy(hub.ObjectMeta) { + continue + } + req = append(req, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: s.Name, + Namespace: s.Namespace, + }, + }) + } + return req +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/spoke_controller_test.go b/fleetconfig-controller/internal/controller/v1beta1/spoke_controller_test.go index c5ca6675..3daf7e68 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/spoke_controller_test.go +++ b/fleetconfig-controller/internal/controller/v1beta1/spoke_controller_test.go @@ -18,67 +18,158 @@ package v1beta1 import ( "context" + "fmt" + "time" + "github.com/go-logr/logr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) -var _ = Describe("Spoke Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +var ( + spoke *v1beta1.Spoke + spokeReconciler *SpokeReconciler + spokeNN types.NamespacedName + testHub *v1beta1.Hub + testHubNN types.NamespacedName +) +var _ = Describe("Spoke Controller", Ordered, func() { + Context("When reconciling a Spoke", func() { ctx := context.Background() - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - spoke := &v1beta1.Spoke{} - - BeforeEach(func() { - By("creating the custom resource for the Kind Spoke") - err := k8sClient.Get(ctx, typeNamespacedName, spoke) - if err != nil && errors.IsNotFound(err) { - resource := &v1beta1.Spoke{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + BeforeAll(func() { + // Create a test Hub first since Spoke references it + testHubNN = types.NamespacedName{ + Name: "test-hub-2", + Namespace: "default", + } + testHub = &v1beta1.Hub{ + ObjectMeta: metav1.ObjectMeta{ + Name: testHubNN.Name, + Namespace: testHubNN.Namespace, + }, + Spec: v1beta1.HubSpec{ + Kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, } - }) - - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &v1beta1.Spoke{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient.Create(ctx, testHub)).To(Succeed()) - By("Cleanup the specific resource instance Spoke") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &SpokeReconciler{ + spokeNN = types.NamespacedName{ + Name: "hub-as-spoke", + Namespace: "default", + } + spokeReconciler = &SpokeReconciler{ Client: k8sClient, + Log: logr.Logger{}, Scheme: k8sClient.Scheme(), } + spoke = &v1beta1.Spoke{ + ObjectMeta: metav1.ObjectMeta{ + Name: spokeNN.Name, + Namespace: spokeNN.Namespace, + }, + Spec: v1beta1.SpokeSpec{ + HubRef: v1beta1.HubRef{ + Name: testHubNN.Name, + Namespace: testHubNN.Namespace, + }, + Kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, + }, + CreateNamespace: true, + SyncLabels: false, + Timeout: 300, + LogVerbosity: 0, + }, + } + }) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + AfterAll(func() { + // Clean up test Hub + By("Cleaning up test Hub") + err := k8sClient.Delete(ctx, testHub) + if err != nil && !kerrs.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred()) + } + }) + + It("Should create a Spoke", func() { + Expect(k8sClient.Create(ctx, spoke)).To(Succeed()) + }) + + It("Should add a finalizer to the Spoke", func() { + By("Reconciling the Spoke") + Expect(reconcileSpoke(ctx)).To(Succeed()) + + By("Verifying the Spoke's finalizer") + Expect(k8sClient.Get(ctx, spokeNN, spoke)).To(Succeed()) + Expect(spoke.Finalizers).To(ContainElement(v1beta1.SpokeCleanupFinalizer), + "Spoke %s wasn't given a finalizer", spokeNN.Name) + }) + + It("Should initialize the Spoke", func() { + By("Reconciling the Spoke") + Expect(reconcileSpoke(ctx)).To(Succeed()) + + By("Verifying the Spoke's phase and conditions") + Expect(k8sClient.Get(ctx, spokeNN, spoke)).To(Succeed()) + Expect(spoke.Status.Phase).To(Equal(v1beta1.SpokeJoining), + "Spoke %s is not in the Joining phase", spokeNN.Name) + Expect(assertSpokeConditions(spoke.Status.Conditions, map[string]metav1.ConditionStatus{ + v1beta1.SpokeJoined: metav1.ConditionFalse, + v1beta1.CleanupFailed: metav1.ConditionFalse, + v1beta1.AddonsConfigured: metav1.ConditionFalse, + })).To(Succeed()) + }) + + // cannot test full provisioning without an e2e test + + It("Should delete the Spoke", func() { + By("Deleting the Spoke") + Expect(k8sClient.Delete(ctx, spoke)).To(Succeed()) + Eventually(func() error { + err := k8sClient.Get(ctx, spokeNN, spoke) + if kerrs.IsNotFound(err) { + return nil + } + return err + }, 5*time.Minute).Should(Succeed()) }) }) }) + +func reconcileSpoke(ctx context.Context) error { + _, err := spokeReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: spokeNN, + }) + return err +} + +// assertSpokeConditions asserts that two sets of conditions match. +func assertSpokeConditions(conditions []v1beta1.Condition, expected map[string]metav1.ConditionStatus) error { + if len(conditions) != len(expected) { + return fmt.Errorf("expected %d conditions, got %d", len(expected), len(conditions)) + } + for _, c := range conditions { + expectedCondition, ok := expected[c.Type] + if !ok { + return fmt.Errorf("unhandled condition %s", c.Type) + } + if c.Status != expectedCondition { + return fmt.Errorf("condition %s has status %s, expected %s", c.Type, c.Status, expectedCondition) + } + } + return nil +} diff --git a/fleetconfig-controller/internal/controller/v1beta1/suite_test.go b/fleetconfig-controller/internal/controller/v1beta1/suite_test.go index 176976c3..b83ed84d 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/suite_test.go +++ b/fleetconfig-controller/internal/controller/v1beta1/suite_test.go @@ -33,6 +33,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" v1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/test" // +kubebuilder:scaffold:imports ) @@ -41,16 +43,24 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - ctx context.Context - cancel context.CancelFunc - testEnv *envtest.Environment - cfg *rest.Config - k8sClient client.Client + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client + testConfig *test.Config + err error + kubeconfigCleanup func() ) func TestControllers(t *testing.T) { RegisterFailHandler(Fail) + testConfig, err = test.LoadConfig() + if err != nil { + panic(err) + } + RunSpecs(t, "Controller Suite") } @@ -75,8 +85,9 @@ var _ = BeforeSuite(func() { } // Retrieve the first found binary directory to allow running tests from IDEs - if getFirstFoundEnvTestBinaryDir() != "" { - testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + envTestBinaryDir := test.FindEnvTestBinaryDir(testConfig) + if envTestBinaryDir != "" { + testEnv.BinaryAssetsDirectory = envTestBinaryDir } // cfg is defined in this file globally. @@ -87,6 +98,16 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + + // Generate, save, and configure kubeconfig so in-cluster client lookups succeed + var kubeconfigPath string + raw, err := kube.RawFromRestConfig(cfg) + Expect(err).ShouldNot(HaveOccurred()) + kubeconfigPath, kubeconfigCleanup, err = file.TmpFile(raw, "kubeconfig") + Expect(err).ShouldNot(HaveOccurred()) + + Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed()) + logf.Log.Info("Kubeconfig", "path", kubeconfigPath) }) var _ = AfterSuite(func() { @@ -94,27 +115,5 @@ var _ = AfterSuite(func() { cancel() err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) + kubeconfigCleanup() }) - -// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. -// ENVTEST-based tests depend on specific binaries, usually located in paths set by -// controller-runtime. When running tests directly (e.g., via an IDE) without using -// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. -// -// This function streamlines the process by finding the required binaries, similar to -// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are -// properly set up, run 'make setup-envtest' beforehand. -func getFirstFoundEnvTestBinaryDir() string { - basePath := filepath.Join("..", "..", "bin", "k8s") - entries, err := os.ReadDir(basePath) - if err != nil { - logf.Log.Error(err, "Failed to read directory", "path", basePath) - return "" - } - for _, entry := range entries { - if entry.IsDir() { - return filepath.Join(basePath, entry.Name()) - } - } - return "" -} diff --git a/fleetconfig-controller/internal/kube/kube.go b/fleetconfig-controller/internal/kube/kube.go index cffa70de..8fcf3fa7 100644 --- a/fleetconfig-controller/internal/kube/kube.go +++ b/fleetconfig-controller/internal/kube/kube.go @@ -14,10 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" -) - -var ( - defaultKubeconfigKey = "kubeconfig" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) // RestConfigFromKubeconfig either creates a rest.Config from a v1alpha1.Kubeconfig or @@ -83,34 +80,59 @@ func RawFromInClusterRestConfig() ([]byte, error) { return RawFromRestConfig(rc) } +// KubeconfigFromNamespacedSecretOrCluster loads a kubeconfig from a cross-namespace secret or generates one from inCluster +func KubeconfigFromNamespacedSecretOrCluster(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig) (raw []byte, err error) { + // exactly 1 of these 2 cases is always true + if kubeconfig.InCluster { + return RawFromInClusterRestConfig() + } + return KubeconfigFromNamespacedSecret(ctx, kClient, kubeconfig) +} + +// KubeconfigFromNamespacedSecret loads a kubeconfig from a cross-namespace secret in the cluster +func KubeconfigFromNamespacedSecret(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig) ([]byte, error) { + secretRef := kubeconfig.SecretReference + secret := corev1.Secret{} + nn := types.NamespacedName{ + Name: secretRef.Name, + Namespace: secretRef.Namespace, + } + if err := kClient.Get(ctx, nn, &secret); err != nil { + return nil, err + } + + raw, ok := secret.Data[secretRef.KubeconfigKey] + if !ok { + return nil, fmt.Errorf("kubeconfig key '%s' not found in %v secret", secretRef.KubeconfigKey, nn) + } + + return raw, nil +} + // KubeconfigFromSecretOrCluster loads a kubeconfig from a secret or generates one from inCluster -func KubeconfigFromSecretOrCluster(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig) (raw []byte, err error) { +func KubeconfigFromSecretOrCluster(ctx context.Context, kClient client.Client, kubeconfig v1beta1.Kubeconfig, namespace string) (raw []byte, err error) { // exactly 1 of these 2 cases is always true - switch { - case kubeconfig.InCluster: - raw, err = RawFromInClusterRestConfig() - case kubeconfig.SecretReference != nil: - raw, err = KubeconfigFromSecret(ctx, kClient, kubeconfig) + if kubeconfig.InCluster { + return RawFromInClusterRestConfig() } - return raw, err + return KubeconfigFromSecret(ctx, kClient, kubeconfig, namespace) } // KubeconfigFromSecret loads a kubeconfig from a secret in the cluster -func KubeconfigFromSecret(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig) ([]byte, error) { +func KubeconfigFromSecret(ctx context.Context, kClient client.Client, kubeconfig v1beta1.Kubeconfig, namespace string) ([]byte, error) { secretRef := kubeconfig.SecretReference secret := corev1.Secret{} - nn := types.NamespacedName{Name: secretRef.Name, Namespace: secretRef.Namespace} + nn := types.NamespacedName{ + Name: secretRef.Name, + Namespace: namespace, + } if err := kClient.Get(ctx, nn, &secret); err != nil { return nil, err } - kubeconfigKey := defaultKubeconfigKey - if secretRef.KubeconfigKey != "" { - kubeconfigKey = secretRef.KubeconfigKey - } - raw, ok := secret.Data[kubeconfigKey] + raw, ok := secret.Data[secretRef.KubeconfigKey] if !ok { - return nil, fmt.Errorf("kubeconfig key '%s' not found in %s/%s secret", kubeconfigKey, secretRef.Namespace, secretRef.Name) + return nil, fmt.Errorf("kubeconfig key '%s' not found in %v secret", secretRef.KubeconfigKey, nn) } return raw, nil diff --git a/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook.go b/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook.go index c2df38cf..099919fc 100644 --- a/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook.go +++ b/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook.go @@ -14,22 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO - remove once hub webhooks are implemented. -// -//nolint:all // Required because of `dupl` between this file and spoke_webhook.go package v1beta1 import ( "context" "fmt" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "open-cluster-management.io/api/client/addon/clientset/versioned" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - fleetconfigopenclustermanagementiov1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" ) // nolint:unused @@ -38,39 +41,17 @@ var hublog = logf.Log.WithName("hub-resource") // SetupHubWebhookWithManager registers the webhook for Hub in the manager. func SetupHubWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr).For(&fleetconfigopenclustermanagementiov1beta1.Hub{}). - WithValidator(&HubCustomValidator{}). - WithDefaulter(&HubCustomDefaulter{}). - Complete() -} - -// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -// +kubebuilder:webhook:path=/mutate-fleetconfig-open-cluster-management-io-v1beta1-hub,mutating=true,failurePolicy=fail,sideEffects=None,groups=fleetconfig.open-cluster-management.io,resources=hubs,verbs=create;update,versions=v1beta1,name=mhub-v1beta1.kb.io,admissionReviewVersions=v1 - -// HubCustomDefaulter struct is responsible for setting default values on the custom resource of the -// Kind Hub when those are created or updated. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as it is used only for temporary operations and does not need to be deeply copied. -type HubCustomDefaulter struct { - // TODO(user): Add more fields as needed for defaulting -} - -var _ webhook.CustomDefaulter = &HubCustomDefaulter{} - -// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Hub. -func (d *HubCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { - hub, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Hub) - - if !ok { - return fmt.Errorf("expected an Hub object but got %T", obj) + kubeconfig, err := kube.RawFromInClusterRestConfig() + if err != nil { + return err } - hublog.Info("Defaulting for Hub", "name", hub.GetName()) - - // TODO(user): fill in your defaulting logic. - - return nil + addonC, err := common.AddOnClient(kubeconfig) + if err != nil { + return err + } + return ctrl.NewWebhookManagedBy(mgr).For(&v1beta1.Hub{}). + WithValidator(&HubCustomValidator{client: mgr.GetClient(), addonC: addonC}). + Complete() } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -84,40 +65,81 @@ func (d *HubCustomDefaulter) Default(_ context.Context, obj runtime.Object) erro // NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, // as this struct is used only for temporary operations and does not need to be deeply copied. type HubCustomValidator struct { - // TODO(user): Add more fields as needed for validation + client client.Client + addonC *versioned.Clientset } var _ webhook.CustomValidator = &HubCustomValidator{} // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Hub. -func (v *HubCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - hub, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Hub) +func (v *HubCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + hub, ok := obj.(*v1beta1.Hub) if !ok { return nil, fmt.Errorf("expected a Hub object but got %T", obj) } hublog.Info("Validation for Hub upon creation", "name", hub.GetName()) - // TODO(user): fill in your validation logic upon object creation. + var allErrs field.ErrorList + + if valid, msg := isKubeconfigValid(hub.Spec.Kubeconfig); !valid { + allErrs = append(allErrs, field.Invalid( + field.NewPath("hub"), hub.Spec.Kubeconfig, msg), + ) + } + if hub.Spec.ClusterManager == nil && hub.Spec.SingletonControlPlane == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("hub"), hub.Spec, "either hub.clusterManager or hub.singletonControlPlane must be specified"), + ) + } + + if hub.Spec.ClusterManager != nil && hub.Spec.SingletonControlPlane != nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("hub"), hub.Spec, "only one of hub.clusterManager or hub.singletonControlPlane may be specified"), + ) + } + allErrs = append(allErrs, validateHubAddons(ctx, v.client, nil, hub, v.addonC)...) + if len(allErrs) > 0 { + return nil, errors.NewInvalid(v1beta1.HubGroupKind, hub.Name, allErrs) + } return nil, nil } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Hub. -func (v *HubCustomValidator) ValidateUpdate(_ context.Context, _, newObj runtime.Object) (admission.Warnings, error) { - hub, ok := newObj.(*fleetconfigopenclustermanagementiov1beta1.Hub) +func (v *HubCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + hub, ok := newObj.(*v1beta1.Hub) if !ok { return nil, fmt.Errorf("expected a Hub object for the newObj but got %T", newObj) } + oldHub, ok := oldObj.(*v1beta1.Hub) + if !ok { + return nil, fmt.Errorf("expected a Hub object for the oldObj but got %T", oldObj) + } hublog.Info("Validation for Hub upon update", "name", hub.GetName()) - // TODO(user): fill in your validation logic upon object update. + var allErrs field.ErrorList + + err := allowHubUpdate(oldHub, hub) + if err != nil { + return nil, err + } + if valid, msg := isKubeconfigValid(hub.Spec.Kubeconfig); !valid { + allErrs = append(allErrs, field.Invalid( + field.NewPath("hub"), hub.Spec.Kubeconfig, msg), + ) + } + allErrs = append(allErrs, validateHubAddons(ctx, v.client, oldHub, hub, v.addonC)...) + + if len(allErrs) > 0 { + return nil, errors.NewInvalid(v1beta1.HubGroupKind, hub.Name, allErrs) + } return nil, nil } // ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Hub. func (v *HubCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - hub, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Hub) + hub, ok := obj.(*v1beta1.Hub) if !ok { return nil, fmt.Errorf("expected a Hub object but got %T", obj) } diff --git a/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook_test.go b/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook_test.go index 62999319..3cc18a42 100644 --- a/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook_test.go +++ b/fleetconfig-controller/internal/webhook/v1beta1/hub_webhook_test.go @@ -21,68 +21,101 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - fleetconfigopenclustermanagementiov1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" - // TODO (user): Add any additional imports if needed + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) var _ = Describe("Hub Webhook", func() { var ( - obj *fleetconfigopenclustermanagementiov1beta1.Hub - oldObj *fleetconfigopenclustermanagementiov1beta1.Hub + obj *v1beta1.Hub + oldObj *v1beta1.Hub validator HubCustomValidator - defaulter HubCustomDefaulter ) BeforeEach(func() { - obj = &fleetconfigopenclustermanagementiov1beta1.Hub{} - oldObj = &fleetconfigopenclustermanagementiov1beta1.Hub{} - validator = HubCustomValidator{} + obj = &v1beta1.Hub{} + oldObj = &v1beta1.Hub{} + validator = HubCustomValidator{client: k8sClient} Expect(validator).NotTo(BeNil(), "Expected validator to be initialized") - defaulter = HubCustomDefaulter{} - Expect(defaulter).NotTo(BeNil(), "Expected defaulter to be initialized") Expect(oldObj).NotTo(BeNil(), "Expected oldObj to be initialized") Expect(obj).NotTo(BeNil(), "Expected obj to be initialized") - // TODO (user): Add any setup logic common to all tests }) - AfterEach(func() { - // TODO (user): Add any teardown logic common to all tests - }) + Context("When creating Hub under Validating Webhook", func() { + It("Should allow creation with valid configuration", func() { + By("setting up a valid Hub resource") + obj.ObjectMeta.Name = "hub" + obj.ObjectMeta.Namespace = "default" + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + obj.Spec.ClusterManager = &v1beta1.ClusterManager{ + FeatureGates: "AddonManagement=true", + } + + By("validating the creation") + _, err := validator.ValidateCreate(ctx, obj) + Expect(err).NotTo(HaveOccurred()) + }) - Context("When creating Hub under Defaulting Webhook", func() { - // TODO (user): Add logic for defaulting webhooks - // Example: - // It("Should apply defaults when a required field is empty", func() { - // By("simulating a scenario where defaults should be applied") - // obj.SomeFieldWithDefault = "" - // By("calling the Default method to apply defaults") - // defaulter.Default(ctx, obj) - // By("checking that the default values are set") - // Expect(obj.SomeFieldWithDefault).To(Equal("default_value")) - // }) + It("Should deny creation when neither ClusterManager nor SingletonControlPlane is specified", func() { + By("setting up a Hub without ClusterManager or SingletonControlPlane") + obj.ObjectMeta.Name = "hub" + obj.ObjectMeta.Namespace = "default" + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + // Both ClusterManager and SingletonControlPlane are nil + + By("validating the creation should fail") + _, err := validator.ValidateCreate(ctx, obj) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("either hub.clusterManager or hub.singletonControlPlane must be specified")) + }) }) - Context("When creating or updating Hub under Validating Webhook", func() { - // TODO (user): Add logic for validating webhooks - // Example: - // It("Should deny creation if a required field is missing", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "" - // Expect(validator.ValidateCreate(ctx, obj)).Error().To(HaveOccurred()) - // }) - // - // It("Should admit creation if all required fields are present", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "valid_value" - // Expect(validator.ValidateCreate(ctx, obj)).To(BeNil()) - // }) - // - // It("Should validate updates correctly", func() { - // By("simulating a valid update scenario") - // oldObj.SomeRequiredField = "updated_value" - // obj.SomeRequiredField = "updated_value" - // Expect(validator.ValidateUpdate(ctx, oldObj, obj)).To(BeNil()) - // }) + Context("When updating Hub under Validating Webhook", func() { + BeforeEach(func() { + oldObj.ObjectMeta.Name = "hub" + oldObj.ObjectMeta.Namespace = "default" + oldObj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + oldObj.Spec.ClusterManager = &v1beta1.ClusterManager{ + FeatureGates: "AddonManagement=true", + } + + obj.ObjectMeta.Name = "hub" + obj.ObjectMeta.Namespace = "default" + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + obj.Spec.ClusterManager = &v1beta1.ClusterManager{ + FeatureGates: "AddonManagement=true", + } + }) + + It("Should allow valid updates", func() { + By("updating Hub with valid changes - only allowed fields") + // Update timeout (which is allowed) + obj.Spec.Timeout = 600 + + By("validating the update") + _, err := validator.ValidateUpdate(ctx, oldObj, obj) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Should deny updates with invalid kubeconfig", func() { + By("setting up invalid kubeconfig in the update") + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: false, + // Missing SecretReference when InCluster is false + } + + By("validating the update should fail") + _, err := validator.ValidateUpdate(ctx, oldObj, obj) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("either secretReference or inCluster must be specified")) + }) }) }) diff --git a/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook.go b/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook.go index c5e4141b..61360b5f 100644 --- a/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook.go +++ b/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook.go @@ -14,22 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO - remove once spoke webhooks are implemented. -// -//nolint:all // Required because of `dupl` between this file and spoke_webhook.go package v1beta1 import ( "context" "fmt" + kerrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "open-cluster-management.io/api/client/addon/clientset/versioned" + operatorv1 "open-cluster-management.io/api/operator/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - fleetconfigopenclustermanagementiov1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" ) // nolint:unused @@ -38,42 +42,19 @@ var spokelog = logf.Log.WithName("spoke-resource") // SetupSpokeWebhookWithManager registers the webhook for Spoke in the manager. func SetupSpokeWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr).For(&fleetconfigopenclustermanagementiov1beta1.Spoke{}). - WithValidator(&SpokeCustomValidator{}). - WithDefaulter(&SpokeCustomDefaulter{}). - Complete() -} - -// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -// +kubebuilder:webhook:path=/mutate-fleetconfig-open-cluster-management-io-v1beta1-spoke,mutating=true,failurePolicy=fail,sideEffects=None,groups=fleetconfig.open-cluster-management.io,resources=spokes,verbs=create;update,versions=v1beta1,name=mspoke-v1beta1.kb.io,admissionReviewVersions=v1 - -// SpokeCustomDefaulter struct is responsible for setting default values on the custom resource of the -// Kind Spoke when those are created or updated. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as it is used only for temporary operations and does not need to be deeply copied. -type SpokeCustomDefaulter struct { - // TODO(user): Add more fields as needed for defaulting -} - -var _ webhook.CustomDefaulter = &SpokeCustomDefaulter{} - -// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Spoke. -func (d *SpokeCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { - spoke, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Spoke) - - if !ok { - return fmt.Errorf("expected an Spoke object but got %T", obj) + kubeconfig, err := kube.RawFromInClusterRestConfig() + if err != nil { + return err } - spokelog.Info("Defaulting for Spoke", "name", spoke.GetName()) - - // TODO(user): fill in your defaulting logic. - - return nil + addonC, err := common.AddOnClient(kubeconfig) + if err != nil { + return err + } + return ctrl.NewWebhookManagedBy(mgr).For(&v1beta1.Spoke{}). + WithValidator(&SpokeCustomValidator{client: mgr.GetClient(), addonC: addonC}). + Complete() } -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. // +kubebuilder:webhook:path=/validate-fleetconfig-open-cluster-management-io-v1beta1-spoke,mutating=false,failurePolicy=fail,sideEffects=None,groups=fleetconfig.open-cluster-management.io,resources=spokes,verbs=create;update,versions=v1beta1,name=vspoke-v1beta1.kb.io,admissionReviewVersions=v1 @@ -84,40 +65,90 @@ func (d *SpokeCustomDefaulter) Default(_ context.Context, obj runtime.Object) er // NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, // as this struct is used only for temporary operations and does not need to be deeply copied. type SpokeCustomValidator struct { - // TODO(user): Add more fields as needed for validation + client client.Client + addonC *versioned.Clientset } var _ webhook.CustomValidator = &SpokeCustomValidator{} // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Spoke. -func (v *SpokeCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - spoke, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Spoke) +func (v *SpokeCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + spoke, ok := obj.(*v1beta1.Spoke) if !ok { return nil, fmt.Errorf("expected a Spoke object but got %T", obj) } spokelog.Info("Validation for Spoke upon creation", "name", spoke.GetName()) - // TODO(user): fill in your validation logic upon object creation. + var allErrs field.ErrorList + + if spoke.Spec.Klusterlet.Mode == string(operatorv1.InstallModeHosted) { + if spoke.Spec.Klusterlet.ManagedClusterKubeconfig.SecretReference == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("klusterlet").Child("managedClusterKubeconfig").Child("secretReference"), + spoke.Name, "managedClusterKubeconfig.secretReference is required in hosted mode"), + ) + } else { + if valid, msg := isKubeconfigValid(spoke.Spec.Klusterlet.ManagedClusterKubeconfig); !valid { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("klusterlet").Child("managedClusterKubeconfig").Child("secretReference"), + spoke.Name, msg), + ) + } + } + } + if valid, msg := isKubeconfigValid(spoke.Spec.Kubeconfig); !valid { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("kubeconfig"), spoke, msg), + ) + } - return nil, nil + warn, errs := validateAddons(ctx, v.client, spoke, v.addonC) + allErrs = append(allErrs, errs...) + + if len(allErrs) > 0 { + return warn, kerrs.NewInvalid(v1beta1.SpokeGroupKind, spoke.Name, allErrs) + } + return warn, nil } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Spoke. -func (v *SpokeCustomValidator) ValidateUpdate(_ context.Context, _, newObj runtime.Object) (admission.Warnings, error) { - spoke, ok := newObj.(*fleetconfigopenclustermanagementiov1beta1.Spoke) +func (v *SpokeCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + spoke, ok := newObj.(*v1beta1.Spoke) if !ok { return nil, fmt.Errorf("expected a Spoke object for the newObj but got %T", newObj) } + oldSpoke, ok := oldObj.(*v1beta1.Spoke) + if !ok { + return nil, fmt.Errorf("expected a Spoke object for the oldObj but got %T", oldObj) + } spokelog.Info("Validation for Spoke upon update", "name", spoke.GetName()) - // TODO(user): fill in your validation logic upon object update. + err := allowSpokeUpdate(oldSpoke, spoke) + if err != nil { + return nil, err + } - return nil, nil + var allErrs field.ErrorList + + valid, msg := isKubeconfigValid(spoke.Spec.Kubeconfig) + if !valid { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("kubeconfig"), spoke, msg), + ) + } + + warn, valErrs := validateAddons(ctx, v.client, spoke, v.addonC) + allErrs = append(allErrs, valErrs...) + + if len(allErrs) > 0 { + return warn, kerrs.NewInvalid(v1beta1.SpokeGroupKind, spoke.Name, allErrs) + } + return warn, nil } // ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Spoke. func (v *SpokeCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - spoke, ok := obj.(*fleetconfigopenclustermanagementiov1beta1.Spoke) + spoke, ok := obj.(*v1beta1.Spoke) if !ok { return nil, fmt.Errorf("expected a Spoke object but got %T", obj) } diff --git a/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook_test.go b/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook_test.go index 4806b6dd..8b5db801 100644 --- a/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook_test.go +++ b/fleetconfig-controller/internal/webhook/v1beta1/spoke_webhook_test.go @@ -20,68 +20,118 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - fleetconfigopenclustermanagementiov1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" - // TODO (user): Add any additional imports if needed + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) var _ = Describe("Spoke Webhook", func() { var ( - obj *fleetconfigopenclustermanagementiov1beta1.Spoke - oldObj *fleetconfigopenclustermanagementiov1beta1.Spoke + obj *v1beta1.Spoke + oldObj *v1beta1.Spoke validator SpokeCustomValidator - defaulter SpokeCustomDefaulter ) BeforeEach(func() { - obj = &fleetconfigopenclustermanagementiov1beta1.Spoke{} - oldObj = &fleetconfigopenclustermanagementiov1beta1.Spoke{} - validator = SpokeCustomValidator{} + obj = &v1beta1.Spoke{} + oldObj = &v1beta1.Spoke{} + validator = SpokeCustomValidator{client: k8sClient} Expect(validator).NotTo(BeNil(), "Expected validator to be initialized") - defaulter = SpokeCustomDefaulter{} - Expect(defaulter).NotTo(BeNil(), "Expected defaulter to be initialized") Expect(oldObj).NotTo(BeNil(), "Expected oldObj to be initialized") Expect(obj).NotTo(BeNil(), "Expected obj to be initialized") - // TODO (user): Add any setup logic common to all tests }) - AfterEach(func() { - // TODO (user): Add any teardown logic common to all tests - }) + Context("When creating Spoke under Validating Webhook", func() { + It("Should allow creation with valid configuration", func() { + By("setting up a valid Spoke resource") + obj.ObjectMeta.Name = "test-spoke" + obj.ObjectMeta.Namespace = "default" + obj.Spec.HubRef = v1beta1.HubRef{ + Name: "hub", + Namespace: "default", + } + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + obj.Spec.Klusterlet.Mode = "Default" + + By("validating the creation") + warnings, err := validator.ValidateCreate(ctx, obj) + // Expect warnings about Hub not found, but no errors + Expect(err).NotTo(HaveOccurred()) + if warnings != nil { + Expect(warnings).To(ContainElement(ContainSubstring("hub not found"))) + } + }) - Context("When creating Spoke under Defaulting Webhook", func() { - // TODO (user): Add logic for defaulting webhooks - // Example: - // It("Should apply defaults when a required field is empty", func() { - // By("simulating a scenario where defaults should be applied") - // obj.SomeFieldWithDefault = "" - // By("calling the Default method to apply defaults") - // defaulter.Default(ctx, obj) - // By("checking that the default values are set") - // Expect(obj.SomeFieldWithDefault).To(Equal("default_value")) - // }) + It("Should deny creation when hosted mode lacks managedClusterKubeconfig", func() { + By("setting up a Spoke in hosted mode without managedClusterKubeconfig") + obj.ObjectMeta.Name = "test-spoke-hosted" + obj.ObjectMeta.Namespace = "default" + obj.Spec.HubRef = v1beta1.HubRef{ + Name: "hub", + Namespace: "default", + } + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + obj.Spec.Klusterlet.Mode = "Hosted" + // Missing ManagedClusterKubeconfig.SecretReference + + By("validating the creation should fail") + _, err := validator.ValidateCreate(ctx, obj) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("managedClusterKubeconfig.secretReference is required in hosted mode")) + }) }) - Context("When creating or updating Spoke under Validating Webhook", func() { - // TODO (user): Add logic for validating webhooks - // Example: - // It("Should deny creation if a required field is missing", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "" - // Expect(validator.ValidateCreate(ctx, obj)).Error().To(HaveOccurred()) - // }) - // - // It("Should admit creation if all required fields are present", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "valid_value" - // Expect(validator.ValidateCreate(ctx, obj)).To(BeNil()) - // }) - // - // It("Should validate updates correctly", func() { - // By("simulating a valid update scenario") - // oldObj.SomeRequiredField = "updated_value" - // obj.SomeRequiredField = "updated_value" - // Expect(validator.ValidateUpdate(ctx, oldObj, obj)).To(BeNil()) - // }) + Context("When updating Spoke under Validating Webhook", func() { + BeforeEach(func() { + // Set up valid old and new objects + oldObj.ObjectMeta.Name = "test-spoke" + oldObj.ObjectMeta.Namespace = "default" + oldObj.Spec.HubRef = v1beta1.HubRef{ + Name: "hub", + Namespace: "default", + } + oldObj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + oldObj.Spec.Klusterlet.Mode = "Default" + + obj.ObjectMeta.Name = "test-spoke" + obj.ObjectMeta.Namespace = "default" + obj.Spec.HubRef = v1beta1.HubRef{ + Name: "hub", + Namespace: "default", + } + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: true, + } + obj.Spec.Klusterlet.Mode = "Default" + }) + + It("Should allow valid updates", func() { + By("updating Spoke with valid changes - only annotations are allowed") + obj.Spec.Klusterlet.Annotations = map[string]string{ + "cluster.open-cluster-management.io/clusterset": "default", + } + + By("validating the update") + _, err := validator.ValidateUpdate(ctx, oldObj, obj) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Should deny updates with invalid kubeconfig", func() { + By("setting up invalid kubeconfig in the update") + obj.Spec.Kubeconfig = v1beta1.Kubeconfig{ + InCluster: false, + // Missing SecretReference when InCluster is false + } + + By("validating the update should fail") + _, err := validator.ValidateUpdate(ctx, oldObj, obj) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("either secretReference or inCluster must be specified")) + }) }) }) diff --git a/fleetconfig-controller/internal/webhook/v1beta1/validation.go b/fleetconfig-controller/internal/webhook/v1beta1/validation.go new file mode 100644 index 00000000..e2f7510c --- /dev/null +++ b/fleetconfig-controller/internal/webhook/v1beta1/validation.go @@ -0,0 +1,391 @@ +package v1beta1 + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "slices" + + corev1 "k8s.io/api/core/v1" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + "open-cluster-management.io/api/client/addon/clientset/versioned" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" +) + +const warnHubNotFound = "hub not found, cannot validate spoke addons" + +func isKubeconfigValid(kubeconfig v1beta1.Kubeconfig) (bool, string) { + if kubeconfig.SecretReference == nil && !kubeconfig.InCluster { + return false, "either secretReference or inCluster must be specified for the kubeconfig" + } + if kubeconfig.SecretReference != nil && kubeconfig.InCluster { + return false, "either secretReference or inCluster can be specified for the kubeconfig, not both" + } + return true, "" +} + +// allowHubUpdate validates that only allowed fields are changed when updating a Hub. +// Allowed changes include: +// - spec.apiServer +// - spec.clusterManager.source.* +// - spec.hubAddOns +// - spec.addOnConfigs +// - spec.logVerbosity +// - spec.timeout +// - spec.registrationAuth +// - spec.kubeconfig +func allowHubUpdate(oldHub, newHub *v1beta1.Hub) error { + if !reflect.DeepEqual(newHub.Spec, oldHub.Spec) { + oldHubCopy := oldHub.Spec.DeepCopy() + newHubCopy := newHub.Spec.DeepCopy() + + // Allow changes to ClusterManager.Source + if oldHubCopy.ClusterManager != nil { + oldHubCopy.ClusterManager.Source = (v1beta1.OCMSource{}) + } + if newHubCopy.ClusterManager != nil { + newHubCopy.ClusterManager.Source = (v1beta1.OCMSource{}) + } + + // Allow changes to API Server + oldHubCopy.APIServer = "" + newHubCopy.APIServer = "" + + // Allow changes to HubAddOns + oldHubCopy.HubAddOns = nil + newHubCopy.HubAddOns = nil + + // Allow changes to AddOnConfigs + oldHubCopy.AddOnConfigs = nil + newHubCopy.AddOnConfigs = nil + + // Allow changes to LogVerbosity + oldHubCopy.LogVerbosity = 0 + newHubCopy.LogVerbosity = 0 + + // Allow changes to Timeout + oldHubCopy.Timeout = 0 + newHubCopy.Timeout = 0 + + // Allow changes to RegistrationAuth + oldHubCopy.RegistrationAuth = v1beta1.RegistrationAuth{} + newHubCopy.RegistrationAuth = v1beta1.RegistrationAuth{} + + // Allow changes to Kubeconfig + oldHubCopy.Kubeconfig = v1beta1.Kubeconfig{} + newHubCopy.Kubeconfig = v1beta1.Kubeconfig{} + + if !reflect.DeepEqual(oldHubCopy, newHubCopy) { + return errors.New("only changes to spec.apiServer, spec.clusterManager.source.*, spec.hubAddOns, spec.addOnConfigs, spec.logVerbosity, spec.timeout, and spec.registrationAuth are allowed when updating the hub") + } + } + return nil +} + +// allowSpokeUpdate validates that only allowed fields are changed when updating a Spoke. +// Allowed changes include: +// - spec.klusterlet.annotations +// - spec.klusterlet.values +// - spec.kubeconfig +// - spec.addOns +// - spec.timeout +// - spec.logVerbosity +func allowSpokeUpdate(oldSpoke, newSpoke *v1beta1.Spoke) error { + if !reflect.DeepEqual(newSpoke.Spec, oldSpoke.Spec) { + oldSpokeCopy := oldSpoke.Spec.DeepCopy() + newSpokeCopy := newSpoke.Spec.DeepCopy() + newSpokeCopy.Klusterlet.Annotations = nil + oldSpokeCopy.Klusterlet.Annotations = nil + oldSpokeCopy.Klusterlet.Values = nil + newSpokeCopy.Klusterlet.Values = nil + oldSpokeCopy.Kubeconfig = v1beta1.Kubeconfig{} + newSpokeCopy.Kubeconfig = v1beta1.Kubeconfig{} + oldSpokeCopy.AddOns = []v1beta1.AddOn{} + newSpokeCopy.AddOns = []v1beta1.AddOn{} + oldSpokeCopy.LogVerbosity = 0 + newSpokeCopy.LogVerbosity = 0 + oldSpokeCopy.Timeout = 0 + newSpokeCopy.Timeout = 0 + + if !reflect.DeepEqual(oldSpokeCopy, newSpokeCopy) { + return errors.New("spoke contains changes which are not allowed; only changes to spec.klusterlet.annotations, spec.klusterlet.values, spec.kubeconfig, spec.addOns, spec.timeout, and spec.logVerbosity are allowed when updating a spoke") + } + } + + return nil +} + +// validateHubAddons checks that each addOnConfig specifies a valid source of manifests +// and validates uniqueness constraints between HubAddOns and AddOnConfigs +func validateHubAddons(ctx context.Context, cli client.Client, oldObject, newObject *v1beta1.Hub, addonC *versioned.Clientset) field.ErrorList { + errs := field.ErrorList{} + + // Validate uniqueness and cross-references + errs = append(errs, validateAddonUniqueness(newObject)...) + + // Validate AddOnConfig manifests + errs = append(errs, validateAddOnConfigManifests(ctx, cli, newObject)...) + + // Validate removal constraints + if oldObject != nil { + errs = append(errs, validateAddonRemovalConstraints(ctx, oldObject, newObject, addonC)...) + } + + return errs +} + +// validateAddonUniqueness validates uniqueness constraints for addons +func validateAddonUniqueness(newObject *v1beta1.Hub) field.ErrorList { + errs := field.ErrorList{} + + // Validate that AddOnConfig names are unique within the AddOnConfigs list + addOnConfigNames := make(map[string]int) + for i, a := range newObject.Spec.AddOnConfigs { + key := fmt.Sprintf("%s-%s", a.Name, a.Version) + if existingIndex, found := addOnConfigNames[key]; found { + errs = append(errs, field.Invalid(field.NewPath("addOnConfigs").Index(i), key, + fmt.Sprintf("duplicate addOnConfig %s (name-version) found at indices %d and %d", key, existingIndex, i))) + } else { + addOnConfigNames[key] = i + } + } + + // Validate that HubAddOn names are unique within the HubAddOns list + hubAddOnNames := make(map[string]int) + for i, ha := range newObject.Spec.HubAddOns { + if existingIndex, found := hubAddOnNames[ha.Name]; found { + errs = append(errs, field.Invalid(field.NewPath("hubAddOns").Index(i), ha.Name, + fmt.Sprintf("duplicate hubAddOn name %s found at indices %d and %d", ha.Name, existingIndex, i))) + } else { + hubAddOnNames[ha.Name] = i + } + } + + // Validate unique names between HubAddOns and AddOnConfigs + for i, ha := range newObject.Spec.HubAddOns { + if _, found := addOnConfigNames[ha.Name]; found { + errs = append(errs, field.Invalid(field.NewPath("hubAddOns").Index(i), ha.Name, + fmt.Sprintf("hubAddOn name %s clashes with an existing addOnConfig name.", ha.Name))) + } + } + + return errs +} + +// validateAddOnConfigManifests validates that each AddOnConfig has valid manifest sources +func validateAddOnConfigManifests(ctx context.Context, cli client.Client, newObject *v1beta1.Hub) field.ErrorList { + errs := field.ErrorList{} + + for i, a := range newObject.Spec.AddOnConfigs { + cm := corev1.ConfigMap{} + cmName := fmt.Sprintf("%s-%s-%s", v1beta1.AddonConfigMapNamePrefix, a.Name, a.Version) + err := cli.Get(ctx, types.NamespacedName{Name: cmName, Namespace: newObject.Namespace}, &cm) + if err != nil { + errs = append(errs, field.InternalError(field.NewPath("addOnConfigs").Index(i), err)) + continue + } + + errs = append(errs, validateManifestSource(i, a, cm)...) + } + + return errs +} + +// validateManifestSource validates the manifest source configuration for an AddOnConfig +func validateManifestSource(index int, addon v1beta1.AddOnConfig, cm corev1.ConfigMap) field.ErrorList { + errs := field.ErrorList{} + + // Extract manifest configuration from ConfigMap + _, hasRaw := cm.Data[v1beta1.AddonConfigMapManifestRawKey] + manifestsURL, hasURL := cm.Data[v1beta1.AddonConfigMapManifestURLKey] + + // Validate manifest configuration + if !hasRaw && !hasURL { + errs = append(errs, field.Invalid(field.NewPath("addOnConfigs").Index(index), addon.Name, + fmt.Sprintf("no inline manifests or URL found for addon %s version %s", addon.Name, addon.Version))) + } + if hasRaw && hasURL { + errs = append(errs, field.Invalid(field.NewPath("addOnConfigs").Index(index), addon.Name, + fmt.Sprintf("only 1 of inline manifests or URL can be set for addon %s version %s", addon.Name, addon.Version))) + } + + if hasURL { + errs = append(errs, validateManifestURL(index, addon, manifestsURL)...) + } + + return errs +} + +// validateManifestURL validates the URL format and scheme for manifest sources +func validateManifestURL(index int, addon v1beta1.AddOnConfig, manifestsURL string) field.ErrorList { + errs := field.ErrorList{} + + url, err := url.Parse(manifestsURL) + if err != nil { + errs = append(errs, field.Invalid(field.NewPath("addOnConfigs").Index(index), addon.Name, + fmt.Sprintf("invalid URL '%s' for addon %s version %s. %v", manifestsURL, addon.Name, addon.Version, err.Error()))) + return errs + } + + if !slices.Contains(v1beta1.AllowedAddonURLSchemes, url.Scheme) { + errs = append(errs, field.Invalid(field.NewPath("addOnConfigs").Index(index), addon.Name, + fmt.Sprintf("unsupported URL scheme %s for addon %s version %s. Must be one of %v", + url.Scheme, addon.Name, addon.Version, v1beta1.AllowedAddonURLSchemes))) + } + + return errs +} + +// validateAddonRemovalConstraints validates that removed addons are not still in use +func validateAddonRemovalConstraints(ctx context.Context, oldObject, newObject *v1beta1.Hub, addonC *versioned.Clientset) field.ErrorList { + errs := field.ErrorList{} + + // Check AddOnConfigs removal constraints + removedAddOnConfigs := getRemovedAddOnConfigs(oldObject, newObject) + if len(removedAddOnConfigs) > 0 { + if removalErrs := validateAddonNotInUse(ctx, removedAddOnConfigs, "addOnConfigs", addonC); len(removalErrs) > 0 { + errs = append(errs, removalErrs...) + } + } + + // Check HubAddOns removal constraints + removedHubAddOns := getRemovedHubAddOns(oldObject, newObject) + if len(removedHubAddOns) > 0 { + if removalErrs := validateAddonNotInUse(ctx, removedHubAddOns, "hubAddOns", addonC); len(removalErrs) > 0 { + errs = append(errs, removalErrs...) + } + } + + return errs +} + +// getRemovedAddOnConfigs returns the list of AddOnConfigs that were removed +func getRemovedAddOnConfigs(oldObject, newObject *v1beta1.Hub) []string { + oldAddOnConfigs := make(map[string]struct{}) + for _, a := range oldObject.Spec.AddOnConfigs { + key := fmt.Sprintf("%s-%s", a.Name, a.Version) + oldAddOnConfigs[key] = struct{}{} + } + + newAddOnConfigs := make(map[string]struct{}) + for _, a := range newObject.Spec.AddOnConfigs { + key := fmt.Sprintf("%s-%s", a.Name, a.Version) + newAddOnConfigs[key] = struct{}{} + } + + var removedAddOnConfigs []string + for key := range oldAddOnConfigs { + if _, found := newAddOnConfigs[key]; !found { + removedAddOnConfigs = append(removedAddOnConfigs, key) + } + } + + return removedAddOnConfigs +} + +// getRemovedHubAddOns returns the list of HubAddOns that were removed +func getRemovedHubAddOns(oldObject, newObject *v1beta1.Hub) []string { + oldHubAddOns := make(map[string]struct{}) + for _, ha := range oldObject.Spec.HubAddOns { + oldHubAddOns[ha.Name] = struct{}{} + } + + newHubAddOns := make(map[string]struct{}) + for _, ha := range newObject.Spec.HubAddOns { + newHubAddOns[ha.Name] = struct{}{} + } + + var removedHubAddOns []string + for name := range oldHubAddOns { + if _, found := newHubAddOns[name]; !found { + removedHubAddOns = append(removedHubAddOns, name) + } + } + + return removedHubAddOns +} + +// validateAddonNotInUse validates that removed addons are not still referenced by ManagedClusterAddOns +func validateAddonNotInUse(ctx context.Context, removedAddons []string, fieldPath string, addonC *versioned.Clientset) field.ErrorList { + errs := field.ErrorList{} + + mcAddOns, err := addonC.AddonV1alpha1().ManagedClusterAddOns(metav1.NamespaceAll).List(ctx, metav1.ListOptions{LabelSelector: v1beta1.ManagedBySelector.String()}) + if err != nil { + errs = append(errs, field.InternalError(field.NewPath(fieldPath), err)) + return errs + } + + var inUseAddons []string + for _, removedAddon := range removedAddons { + if isAddondEnabled(mcAddOns.Items, removedAddon) { + inUseAddons = append(inUseAddons, removedAddon) + } + } + + if len(inUseAddons) > 0 { + errs = append(errs, field.Invalid(field.NewPath(fieldPath), inUseAddons, + fmt.Sprintf("cannot remove %s %v as they are still in use by managedclusteraddons", fieldPath, inUseAddons))) + } + + return errs +} + +// validates that any addon which is enabled on a spoke is configured +func validateAddons(ctx context.Context, cli client.Client, newObject *v1beta1.Spoke, addonC *versioned.Clientset) (admission.Warnings, field.ErrorList) { + errs := field.ErrorList{} + + // try to get hub, if not present or not ready, log a warning that addons cant be properly validated + hub := &v1beta1.Hub{} + err := cli.Get(ctx, types.NamespacedName{Name: newObject.Spec.HubRef.Name, Namespace: newObject.Spec.HubRef.Namespace}, hub) + if err != nil { + if !kerrs.IsNotFound(err) { + return nil, field.ErrorList{field.InternalError(field.NewPath("spec").Child("addOns"), err)} + } + // warn instead of an error, so we don't block creating spokes and hub at the same time + return admission.Warnings{warnHubNotFound}, nil + } + + initCond := hub.GetCondition(v1beta1.HubInitialized) + if initCond == nil || initCond.Status != metav1.ConditionTrue { + // warn instead of an error, so we don't block creating spokes and hub at the same time + return admission.Warnings{warnHubNotFound}, nil + } + + cmaList, err := addonC.AddonV1alpha1().ClusterManagementAddOns().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, field.ErrorList{field.InternalError(field.NewPath("spec").Child("addOns"), err)} + } + cmaNames := make([]string, len(cmaList.Items)) + for i, cma := range cmaList.Items { + cmaNames[i] = cma.Name + } + + for i, a := range newObject.Spec.AddOns { + if !slices.Contains(cmaNames, a.ConfigName) { + errs = append(errs, field.Invalid(field.NewPath("spec").Child("addOns").Index(i), a.ConfigName, "no matching HubAddOn or AddOnConfig found for AddOn")) + } + } + + return nil, errs +} + +// isAddonConfigInUse checks if a removed addon config is still referenced by any ManagedClusterAddOn. +func isAddondEnabled(mcAddOns []addonv1alpha1.ManagedClusterAddOn, removedAddon string) bool { + for _, mcao := range mcAddOns { + for _, cr := range mcao.Status.ConfigReferences { + if cr.DesiredConfig.Name == removedAddon { + return true + } + } + } + return false +} diff --git a/fleetconfig-controller/internal/webhook/v1beta1/validation_test.go b/fleetconfig-controller/internal/webhook/v1beta1/validation_test.go new file mode 100644 index 00000000..92b3434e --- /dev/null +++ b/fleetconfig-controller/internal/webhook/v1beta1/validation_test.go @@ -0,0 +1,538 @@ +package v1beta1 + +import ( + "testing" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" +) + +func TestIsKubeconfigValid(t *testing.T) { + tests := []struct { + name string + kubeconfig v1beta1.Kubeconfig + wantValid bool + wantMsg string + }{ + { + name: "valid InCluster kubeconfig", + kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, + }, + wantValid: true, + wantMsg: "", + }, + { + name: "valid SecretReference kubeconfig", + kubeconfig: v1beta1.Kubeconfig{ + SecretReference: &v1beta1.SecretReference{ + Name: "test-secret", + KubeconfigKey: "kubeconfig", + }, + }, + wantValid: true, + wantMsg: "", + }, + { + name: "invalid - neither InCluster nor SecretReference", + kubeconfig: v1beta1.Kubeconfig{ + InCluster: false, + }, + wantValid: false, + wantMsg: "either secretReference or inCluster must be specified for the kubeconfig", + }, + { + name: "invalid - both InCluster and SecretReference", + kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, + SecretReference: &v1beta1.SecretReference{ + Name: "test-secret", + KubeconfigKey: "kubeconfig", + }, + }, + wantValid: false, + wantMsg: "either secretReference or inCluster can be specified for the kubeconfig, not both", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + valid, msg := isKubeconfigValid(tt.kubeconfig) + if valid != tt.wantValid { + t.Errorf("isKubeconfigValid() valid = %v, want %v", valid, tt.wantValid) + } + if msg != tt.wantMsg { + t.Errorf("isKubeconfigValid() msg = %v, want %v", msg, tt.wantMsg) + } + }) + } +} + +func TestAllowHubUpdate(t *testing.T) { + tests := []struct { + name string + oldHub *v1beta1.Hub + newHub *v1beta1.Hub + wantErr bool + errMsg string + }{ + { + name: "no changes", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, + }, + wantErr: false, + }, + { + name: "allowed - timeout change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Timeout: 300, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Timeout: 600, + }, + }, + wantErr: false, + }, + { + name: "allowed - logVerbosity change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + LogVerbosity: 0, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + LogVerbosity: 5, + }, + }, + wantErr: false, + }, + { + name: "allowed - ClusterManager source change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + ClusterManager: &v1beta1.ClusterManager{ + Source: v1beta1.OCMSource{Registry: "old-registry"}, + }, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + ClusterManager: &v1beta1.ClusterManager{ + Source: v1beta1.OCMSource{Registry: "new-registry"}, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - APIServer change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + APIServer: "https://old-api-server:6443", + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + APIServer: "https://new-api-server:6443", + }, + }, + wantErr: false, + }, + { + name: "allowed - HubAddOns change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + HubAddOns: []v1beta1.HubAddOn{ + {Name: "governance-policy-framework"}, + }, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + HubAddOns: []v1beta1.HubAddOn{ + {Name: "argocd"}, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - AddOnConfigs change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + AddOnConfigs: []v1beta1.AddOnConfig{ + {Name: "old-addon", Version: "v1.0.0"}, + }, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + AddOnConfigs: []v1beta1.AddOnConfig{ + {Name: "new-addon", Version: "v2.0.0"}, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - RegistrationAuth change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + RegistrationAuth: v1beta1.RegistrationAuth{Driver: "csr"}, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + RegistrationAuth: v1beta1.RegistrationAuth{ + Driver: "awsirsa", + HubClusterARN: "arn:aws:eks:us-west-2:123456789013:cluster/test-hub", + }, + }, + }, + wantErr: false, + }, + { + name: "disallowed - CreateNamespace change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + CreateNamespace: true, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + CreateNamespace: false, + }, + }, + wantErr: true, + errMsg: "only changes to spec.apiServer, spec.clusterManager.source.*, spec.hubAddOns, spec.addOnConfigs, spec.logVerbosity, spec.timeout, and spec.registrationAuth are allowed when updating the hub", + }, + { + name: "disallowed - Force change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Force: false, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Force: true, + }, + }, + wantErr: true, + errMsg: "only changes to spec.apiServer, spec.clusterManager.source.*, spec.hubAddOns, spec.addOnConfigs, spec.logVerbosity, spec.timeout, and spec.registrationAuth are allowed when updating the hub", + }, + { + name: "disallowed - ClusterManager non-source change", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + ClusterManager: &v1beta1.ClusterManager{ + FeatureGates: "AddonManagement=true", + }, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + ClusterManager: &v1beta1.ClusterManager{ + FeatureGates: "AddonManagement=true,ResourceCleanup=true", + }, + }, + }, + wantErr: true, + errMsg: "only changes to spec.apiServer, spec.clusterManager.source.*, spec.hubAddOns, spec.addOnConfigs, spec.logVerbosity, spec.timeout, and spec.registrationAuth are allowed when updating the hub", + }, + { + name: "multiple allowed changes", + oldHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Timeout: 300, + LogVerbosity: 0, + ClusterManager: &v1beta1.ClusterManager{ + Source: v1beta1.OCMSource{Registry: "old-registry"}, + }, + RegistrationAuth: v1beta1.RegistrationAuth{Driver: "csr"}, + }, + }, + newHub: &v1beta1.Hub{ + Spec: v1beta1.HubSpec{ + Timeout: 600, + LogVerbosity: 5, + ClusterManager: &v1beta1.ClusterManager{ + Source: v1beta1.OCMSource{Registry: "new-registry"}, + }, + RegistrationAuth: v1beta1.RegistrationAuth{Driver: "awsirsa"}, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := allowHubUpdate(tt.oldHub, tt.newHub) + if tt.wantErr { + if err == nil { + t.Error("expected error but got none") + } else if err.Error() != tt.errMsg { + t.Errorf("expected error message %q but got %q", tt.errMsg, err.Error()) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestAllowSpokeUpdate(t *testing.T) { + tests := []struct { + name string + oldSpoke *v1beta1.Spoke + newSpoke *v1beta1.Spoke + wantErr bool + errMsg string + }{ + { + name: "no changes", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + CreateNamespace: true, + Timeout: 300, + LogVerbosity: 0, + }, + }, + wantErr: false, + }, + { + name: "allowed - klusterlet annotations change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + Annotations: map[string]string{"old": "value"}, + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + Annotations: map[string]string{"new": "value"}, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - kubeconfig change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Kubeconfig: v1beta1.Kubeconfig{ + SecretReference: &v1beta1.SecretReference{ + Name: "new-secret", + KubeconfigKey: "kubeconfig", + }, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - addOns change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + AddOns: []v1beta1.AddOn{ + {ConfigName: "old-addon"}, + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + AddOns: []v1beta1.AddOn{ + {ConfigName: "new-addon"}, + }, + }, + }, + wantErr: false, + }, + { + name: "allowed - timeout change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Timeout: 300, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Timeout: 600, + }, + }, + wantErr: false, + }, + { + name: "allowed - logVerbosity change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + LogVerbosity: 0, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + LogVerbosity: 5, + }, + }, + wantErr: false, + }, + { + name: "disallowed - HubRef change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + HubRef: v1beta1.HubRef{ + Name: "old-hub", + Namespace: "default", + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + HubRef: v1beta1.HubRef{ + Name: "new-hub", + Namespace: "default", + }, + }, + }, + wantErr: true, + errMsg: "spoke contains changes which are not allowed; only changes to spec.klusterlet.annotations, spec.klusterlet.values, spec.kubeconfig, spec.addOns, spec.timeout, and spec.logVerbosity are allowed when updating a spoke", + }, + { + name: "disallowed - CreateNamespace change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + CreateNamespace: true, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + CreateNamespace: false, + }, + }, + wantErr: true, + errMsg: "spoke contains changes which are not allowed; only changes to spec.klusterlet.annotations, spec.klusterlet.values, spec.kubeconfig, spec.addOns, spec.timeout, and spec.logVerbosity are allowed when updating a spoke", + }, + { + name: "disallowed - klusterlet mode change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + Mode: "Default", + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + Mode: "Hosted", + }, + }, + }, + wantErr: true, + errMsg: "spoke contains changes which are not allowed; only changes to spec.klusterlet.annotations, spec.klusterlet.values, spec.kubeconfig, spec.addOns, spec.timeout, and spec.logVerbosity are allowed when updating a spoke", + }, + { + name: "disallowed - klusterlet feature gates change", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + FeatureGates: "AddonManagement=true", + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Klusterlet: v1beta1.Klusterlet{ + FeatureGates: "AddonManagement=true,ClusterClaim=true", + }, + }, + }, + wantErr: true, + errMsg: "spoke contains changes which are not allowed; only changes to spec.klusterlet.annotations, spec.klusterlet.values, spec.kubeconfig, spec.addOns, spec.timeout, and spec.logVerbosity are allowed when updating a spoke", + }, + { + name: "multiple allowed changes", + oldSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Timeout: 300, + LogVerbosity: 0, + Kubeconfig: v1beta1.Kubeconfig{ + InCluster: true, + }, + Klusterlet: v1beta1.Klusterlet{ + Annotations: map[string]string{"old": "value"}, + }, + AddOns: []v1beta1.AddOn{ + {ConfigName: "old-addon"}, + }, + }, + }, + newSpoke: &v1beta1.Spoke{ + Spec: v1beta1.SpokeSpec{ + Timeout: 600, + LogVerbosity: 5, + Kubeconfig: v1beta1.Kubeconfig{ + SecretReference: &v1beta1.SecretReference{ + Name: "new-secret", + KubeconfigKey: "kubeconfig", + }, + }, + Klusterlet: v1beta1.Klusterlet{ + Annotations: map[string]string{"new": "value"}, + }, + AddOns: []v1beta1.AddOn{ + {ConfigName: "new-addon"}, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := allowSpokeUpdate(tt.oldSpoke, tt.newSpoke) + if tt.wantErr { + if err == nil { + t.Error("expected error but got none") + } else if err.Error() != tt.errMsg { + t.Errorf("expected error message %q but got %q", tt.errMsg, err.Error()) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/fleetconfig-controller/internal/webhook/v1beta1/webhook_suite_test.go b/fleetconfig-controller/internal/webhook/v1beta1/webhook_suite_test.go index 5277e5df..f659d8d6 100644 --- a/fleetconfig-controller/internal/webhook/v1beta1/webhook_suite_test.go +++ b/fleetconfig-controller/internal/webhook/v1beta1/webhook_suite_test.go @@ -40,6 +40,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" fleetconfigopenclustermanagementiov1beta1 "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/test" // +kubebuilder:scaffold:imports ) @@ -48,16 +50,23 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - ctx context.Context - cancel context.CancelFunc - k8sClient client.Client - cfg *rest.Config - testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + k8sClient client.Client + cfg *rest.Config + testEnv *envtest.Environment + testConfig *test.Config + err error + kubeconfigCleanup func() ) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) + testConfig, err = test.LoadConfig() + if err != nil { + panic(err) + } RunSpecs(t, "Webhook Suite") } @@ -86,8 +95,9 @@ var _ = BeforeSuite(func() { } // Retrieve the first found binary directory to allow running tests from IDEs - if getFirstFoundEnvTestBinaryDir() != "" { - testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + envTestBinaryDir := test.FindEnvTestBinaryDir(testConfig) + if envTestBinaryDir != "" { + testEnv.BinaryAssetsDirectory = envTestBinaryDir } // cfg is defined in this file globally. @@ -113,6 +123,16 @@ var _ = BeforeSuite(func() { }) Expect(err).NotTo(HaveOccurred()) + // Generate, save, and configure kubeconfig so in-cluster client lookups succeed + var kubeconfigPath string + raw, err := kube.RawFromRestConfig(cfg) + Expect(err).ShouldNot(HaveOccurred()) + kubeconfigPath, kubeconfigCleanup, err = file.TmpFile(raw, "kubeconfig") + Expect(err).ShouldNot(HaveOccurred()) + + Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed()) + logf.Log.Info("Kubeconfig", "path", kubeconfigPath) + err = SetupSpokeWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) @@ -145,27 +165,5 @@ var _ = AfterSuite(func() { cancel() err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) + kubeconfigCleanup() }) - -// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. -// ENVTEST-based tests depend on specific binaries, usually located in paths set by -// controller-runtime. When running tests directly (e.g., via an IDE) without using -// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. -// -// This function streamlines the process by finding the required binaries, similar to -// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are -// properly set up, run 'make setup-envtest' beforehand. -func getFirstFoundEnvTestBinaryDir() string { - basePath := filepath.Join("..", "..", "..", "bin", "k8s") - entries, err := os.ReadDir(basePath) - if err != nil { - logf.Log.Error(err, "Failed to read directory", "path", basePath) - return "" - } - for _, entry := range entries { - if entry.IsDir() { - return filepath.Join(basePath, entry.Name()) - } - } - return "" -} diff --git a/fleetconfig-controller/pkg/common/common.go b/fleetconfig-controller/pkg/common/common.go index a6204bdb..aac8a483 100644 --- a/fleetconfig-controller/pkg/common/common.go +++ b/fleetconfig-controller/pkg/common/common.go @@ -12,11 +12,7 @@ import ( operatorapi "open-cluster-management.io/api/client/operator/clientset/versioned" workapi "open-cluster-management.io/api/client/work/clientset/versioned" clusterv1 "open-cluster-management.io/api/cluster/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" - "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/kube" ) @@ -91,45 +87,3 @@ func UpdateManagedCluster(ctx context.Context, client *clusterapi.Clientset, man } return nil } - -// PrepareKubeconfig parses a kubeconfig spec and returns updated clusteradm args. -// The '--kubeconfig' flag is added and a cleanup function is returned to remove the temp kubeconfig file. -func PrepareKubeconfig(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig, args []string) ([]string, func(), error) { - logger := log.FromContext(ctx) - - raw, err := kube.KubeconfigFromSecretOrCluster(ctx, kClient, kubeconfig) - if err != nil { - return args, nil, err - } - kubeconfigPath, cleanup, err := file.TmpFile(raw, "kubeconfig") - if err != nil { - return args, cleanup, err - } - if kubeconfig.Context != "" { - args = append(args, "--context", kubeconfig.Context) - } - - logger.V(1).Info("Using kubeconfig", "path", kubeconfigPath) - args = append(args, "--kubeconfig", kubeconfigPath) - return args, cleanup, nil -} - -// PrepareResources returns resource-related flags -func PrepareResources(resources v1alpha1.ResourceSpec) []string { - flags := []string{ - "--resource-qos-class", resources.QosClass, - } - if resources.Requests != nil { - requests := resources.Requests.String() - if requests != "" { - flags = append(flags, "--resource-requests", requests) - } - } - if resources.Limits != nil { - limits := resources.Limits.String() - if limits != "" { - flags = append(flags, "--resource-limits", limits) - } - } - return flags -} diff --git a/fleetconfig-controller/test/data/fleetconfig-v1alpha1.yaml b/fleetconfig-controller/test/data/fleetconfig-v1alpha1.yaml new file mode 100644 index 00000000..76e94833 --- /dev/null +++ b/fleetconfig-controller/test/data/fleetconfig-v1alpha1.yaml @@ -0,0 +1,72 @@ +apiVersion: fleetconfig.openclustermanagement.io/v1alpha1 +kind: FleetConfig +metadata: + name: fleetconfig + namespace: fleetconfig-system +spec: + timeout: 300 + logVerbosity: 5 + spokeAnnotations: + foo: "not-bar" + baz: "quux" + hub: + clusterManager: + featureGates: DefaultClusterSet=true,ManifestWorkReplicaSet=true,ResourceCleanup=true + purgeOperator: true + resources: + qosClass: Default + source: + bundleVersion: v1.0.0 + registry: quay.io/open-cluster-management + createNamespace: true + force: false + kubeconfig: + inCluster: true + context: "" + registrationAuth: + driver: csr + timeout: 120 + logVerbosity: 3 + spokes: + - name: hub-as-spoke + createNamespace: true + syncLabels: false + kubeconfig: + inCluster: true + klusterlet: + mode: "Default" + purgeOperator: true + forceInternalEndpointLookup: true + forceInternalEndpointLookupManaged: false + singleton: false + - name: spoke + addOns: + - configName: test-addon + createNamespace: true + syncLabels: false + kubeconfig: + # secret is provisioned during E2E test setup + secretReference: + name: "test-fleetconfig-kubeconfig" + namespace: "fleetconfig-system" + kubeconfigKey: "value" + klusterlet: + annotations: + foo: "bar" + baz: "quux" + mode: "Default" + purgeOperator: true + forceInternalEndpointLookup: true + forceInternalEndpointLookupManaged: false + singleton: false + addOnConfigs: + - name: test-addon + version: "v1.0.0" + manifests: | + apiVersion: v1 + kind: Namespace + metadata: + name: test-addon + clusterRoleBinding: "" + hubRegistration: false + overwrite: false diff --git a/fleetconfig-controller/test/data/fleetconfig-values.yaml b/fleetconfig-controller/test/data/fleetconfig-values.yaml index a14e4786..e2a61e23 100644 --- a/fleetconfig-controller/test/data/fleetconfig-values.yaml +++ b/fleetconfig-controller/test/data/fleetconfig-values.yaml @@ -1,9 +1,6 @@ fleetConfig: timeout: 300 logVerbosity: 5 - spokeAnnotations: - foo: "not-bar" - baz: "quux" hub: addOnConfigs: - name: test-addon @@ -18,17 +15,28 @@ fleetConfig: overwrite: false spokes: - name: hub-as-spoke + hubRef: + name: hub + namespace: fleetconfig-system + addOns: + - configName: test-addon createNamespace: true syncLabels: false kubeconfig: inCluster: true klusterlet: + annotations: + foo: "not-bar" + baz: "quux" mode: "Default" purgeOperator: true forceInternalEndpointLookup: true forceInternalEndpointLookupManaged: false singleton: false - name: spoke + hubRef: + name: hub + namespace: fleetconfig-system addOns: - configName: test-addon createNamespace: true @@ -37,10 +45,10 @@ fleetConfig: # secret is provisioned during E2E test setup secretReference: name: "test-fleetconfig-kubeconfig" - namespace: "default" kubeconfigKey: "value" klusterlet: annotations: + baz: "quux" foo: "bar" mode: "Default" purgeOperator: true diff --git a/fleetconfig-controller/test/e2e/helper.go b/fleetconfig-controller/test/e2e/helper.go index 4d2fa312..84892685 100644 --- a/fleetconfig-controller/test/e2e/helper.go +++ b/fleetconfig-controller/test/e2e/helper.go @@ -28,6 +28,7 @@ import ( workv1 "open-cluster-management.io/api/work/v1" "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" "github.com/open-cluster-management-io/lab/fleetconfig-controller/test/utils" ) @@ -45,9 +46,16 @@ var ( // global test context variables useExistingCluster bool + // v1alpha1 test variables + v1alpha1fleetConfigNN = ktypes.NamespacedName{Name: "fleetconfig", Namespace: fcNamespace} + + // v1beta1 test variables + v1beta1hubNN = ktypes.NamespacedName{Name: "hub", Namespace: fcNamespace} + v1beta1spokeNN = ktypes.NamespacedName{Name: "spoke", Namespace: fcNamespace} + v1beta1hubAsSpokeNN = ktypes.NamespacedName{Name: "hub-as-spoke", Namespace: fcNamespace} + // global test variables - fleetConfigNN = ktypes.NamespacedName{Name: "fleetconfig", Namespace: fcNamespace} - klusterletNN = ktypes.NamespacedName{Name: "klusterlet"} + klusterletNN = ktypes.NamespacedName{Name: "klusterlet"} // addon vars addonData = []struct { @@ -140,6 +148,7 @@ func setupTestEnvironment() *E2EContext { By("adding external APIs to the client-go scheme") Expect(v1alpha1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(v1beta1.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(clusterv1beta1.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(clusterv1beta2.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(operatorv1.AddToScheme(scheme.Scheme)).To(Succeed()) @@ -150,13 +159,18 @@ func setupTestEnvironment() *E2EContext { tc.kClient, err = utils.NewClient(tc.hubKubeconfig, scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + By("creating fleetconfig namespace") + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fcNamespace}} + err = tc.kClient.Create(tc.ctx, ns) + Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred()) + By("creating a kubeconfig secret for the spoke's internal kubeconfig") kcfg, err := os.ReadFile(tc.spokeKubeconfigInternal) // #nosec G304 ExpectWithOffset(1, err).NotTo(HaveOccurred()) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: spokeSecretName, - Namespace: "default", + Namespace: "fleetconfig-system", }, Data: map[string][]byte{ kubeconfigSecretKey: kcfg, @@ -169,11 +183,6 @@ func setupTestEnvironment() *E2EContext { tc.kClientSpoke, err = utils.NewClient(tc.spokeKubeconfig, scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - By("creating fleetconfig namespace") - cmd = exec.Command("kubectl", "create", "ns", fcNamespace) - _, err = utils.RunCommand(cmd, "", false) - Expect(err).NotTo(HaveOccurred()) - return tc } @@ -254,7 +263,7 @@ func ensureFleetConfigProvisioned(tc *E2EContext, fc *v1alpha1.FleetConfig, extr By("ensuring the FleetConfig is provisioned") EventuallyWithOffset(1, func() error { - if err := tc.kClient.Get(tc.ctx, fleetConfigNN, fc); err != nil { + if err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fc); err != nil { utils.WarnError(err, "FleetConfig not provisioned") return err } @@ -278,7 +287,7 @@ func ensureFleetConfigProvisioned(tc *E2EContext, fc *v1alpha1.FleetConfig, extr // removeSpokeFromHub removes the spoke from the FleetConfig func removeSpokeFromHub(tc *E2EContext, fc *v1alpha1.FleetConfig) { By("removing the spoke") - if err := tc.kClient.Get(tc.ctx, fleetConfigNN, fc); err != nil { + if err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fc); err != nil { utils.WarnError(err, "failed to get FleetConfig") ExpectWithOffset(1, err).NotTo(HaveOccurred()) } @@ -402,37 +411,13 @@ func ensureAddonCreated(tc *E2EContext, addonIdx int) { }, 2*time.Minute, 1*time.Second).Should(Succeed()) } -func updateAddon(tc *E2EContext, fc *v1alpha1.FleetConfig) { +func updateFleetConfigAddon(tc *E2EContext, fc *v1alpha1.FleetConfig) { By("creating a configmap containing the source manifests") - EventuallyWithOffset(1, func() error { - projDir, err := utils.GetProjectDir() - if err != nil { - return err - } - path := filepath.Join(projDir, "test", "data", "addon-2-cm.yaml") - cmYaml, err := os.ReadFile(path) - if err != nil { - return err - } - cm := &corev1.ConfigMap{} - err = yaml.Unmarshal(cmYaml, cm) - if err != nil { - utils.WarnError(err, "failed to unmarshal configmap") - return err - } - cm.Namespace = fcNamespace - err = tc.kClient.Create(tc.ctx, cm) - if err != nil { - utils.WarnError(err, "failed to create configmap") - return err - } - return nil - - }, 1*time.Minute, 1*time.Second).Should(Succeed()) + EventuallyWithOffset(1, func() error { return createAddOnConfigMap(tc) }, 1*time.Minute, 1*time.Second).Should(Succeed()) By("adding a new version of test-addon") addon := addonData[1] - if err := tc.kClient.Get(tc.ctx, fleetConfigNN, fc); err != nil { + if err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fc); err != nil { utils.WarnError(err, "failed to get FleetConfig") ExpectWithOffset(1, err).NotTo(HaveOccurred()) } @@ -444,3 +429,146 @@ func updateAddon(tc *E2EContext, fc *v1alpha1.FleetConfig) { ExpectWithOffset(1, tc.kClient.Update(tc.ctx, fc)).NotTo(HaveOccurred()) } + +func updateHubAddon(tc *E2EContext, hub *v1beta1.Hub) { + By("creating a configmap containing the source manifests") + EventuallyWithOffset(1, func() error { return createAddOnConfigMap(tc) }, 1*time.Minute, 1*time.Second).Should(Succeed()) + + By("adding a new version of test-addon") + addon := addonData[1] + if err := tc.kClient.Get(tc.ctx, v1beta1hubNN, hub); err != nil { + utils.WarnError(err, "failed to get FleetConfig") + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + hub.Spec.AddOnConfigs = append(hub.Spec.AddOnConfigs, v1beta1.AddOnConfig{ + Name: addon.name, + Version: addon.version, + Overwrite: true, + }) + + ExpectWithOffset(1, tc.kClient.Update(tc.ctx, hub)).NotTo(HaveOccurred()) +} + +func createAddOnConfigMap(tc *E2EContext) error { + projDir, err := utils.GetProjectDir() + if err != nil { + return err + } + path := filepath.Join(projDir, "test", "data", "addon-2-cm.yaml") + cmYaml, err := os.ReadFile(path) + if err != nil { + return err + } + cm := &corev1.ConfigMap{} + err = yaml.Unmarshal(cmYaml, cm) + if err != nil { + utils.WarnError(err, "failed to unmarshal configmap") + return err + } + cm.Namespace = fcNamespace + err = tc.kClient.Create(tc.ctx, cm) + if err != nil && !kerrs.IsNotFound(err) { + utils.WarnError(err, "failed to create configmap") + return err + } + return nil +} + +// ensureHubAndSpokesProvisioned checks that the Hub and Spokes are properly provisioned with expected conditions +func ensureHubAndSpokesProvisioned(tc *E2EContext, hub *v1beta1.Hub, spokes []*v1beta1.Spoke, extraExpectedConditions map[string]metav1.ConditionStatus) { + hubExpectedConditions := map[string]metav1.ConditionStatus{ + "HubInitialized": metav1.ConditionTrue, + "CleanupFailed": metav1.ConditionFalse, + "AddonsConfigured": metav1.ConditionTrue, + } + spokeExpectedConditions := map[string]metav1.ConditionStatus{ + "SpokeJoined": metav1.ConditionTrue, + "CleanupFailed": metav1.ConditionFalse, + "AddonsConfigured": metav1.ConditionTrue, + } + for k, v := range extraExpectedConditions { + hubExpectedConditions[k] = v + spokeExpectedConditions[k] = v + } + + By("ensuring the Hub and Spokes are provisioned") + EventuallyWithOffset(1, func() error { + // Check Hub + if err := tc.kClient.Get(tc.ctx, v1beta1hubNN, hub); err != nil { + utils.WarnError(err, "Hub not provisioned") + return err + } + hubConditions := make([]metav1.Condition, len(hub.Status.Conditions)) + for i, c := range hub.Status.Conditions { + hubConditions[i] = c.Condition + } + if err := utils.AssertConditions(hubConditions, hubExpectedConditions); err != nil { + utils.WarnError(err, "Hub not provisioned") + return err + } + if hub.Status.Phase != "Running" { + err := fmt.Errorf("expected %s, got %s", "Running", hub.Status.Phase) + utils.WarnError(err, "Hub not provisioned") + return err + } + + // Check each Spoke + for _, spoke := range spokes { + if err := tc.kClient.Get(tc.ctx, ktypes.NamespacedName{Name: spoke.Name, Namespace: spoke.Namespace}, spoke); err != nil { + utils.WarnError(err, "Spoke %s not provisioned", spoke.Name) + return err + } + spokeConditions := make([]metav1.Condition, len(spoke.Status.Conditions)) + for i, c := range spoke.Status.Conditions { + spokeConditions[i] = c.Condition + } + if err := utils.AssertConditions(spokeConditions, spokeExpectedConditions); err != nil { + utils.WarnError(err, "Spoke %s not provisioned", spoke.Name) + return err + } + if spoke.Status.Phase != "Running" { + err := fmt.Errorf("expected %s, got %s", "Running", spoke.Status.Phase) + utils.WarnError(err, "Spoke %s not provisioned", spoke.Name) + return err + } + } + return nil + }, 20*time.Minute, 10*time.Second).Should(Succeed()) +} + +func deployV1alpha1FleetConfig(tc *E2EContext) error { + projectDir, err := utils.GetProjectDir() + if err != nil { + return fmt.Errorf("failed to get project dir: %w", err) + } + + fcPath := filepath.Join(projectDir, "test", "data", "fleetconfig-v1alpha1.yaml") + fcBytes, err := os.ReadFile(fcPath) + if err != nil { + return fmt.Errorf("failed to read fleetconfig-v1alpha1.yaml: %w", err) + } + + var fleetConfig v1alpha1.FleetConfig + if err := yaml.Unmarshal(fcBytes, &fleetConfig); err != nil { + return fmt.Errorf("failed to unmarshal fleetconfig-v1alpha1.yaml: %w", err) + } + + // Apply the FleetConfig using the controller-runtime client + if err := tc.kClient.Create(tc.ctx, &fleetConfig); err != nil { + // If already exists, try to update + if kerrs.IsAlreadyExists(err) { + existing := &v1alpha1.FleetConfig{} + getErr := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, existing) + if getErr != nil { + return fmt.Errorf("failed to get existing FleetConfig: %w", getErr) + } + fleetConfig.ResourceVersion = existing.ResourceVersion + if err := tc.kClient.Update(tc.ctx, &fleetConfig); err != nil { + return fmt.Errorf("failed to update FleetConfig: %w", err) + } + return nil + } + return fmt.Errorf("failed to create FleetConfig: %w", err) + } + return nil +} diff --git a/fleetconfig-controller/test/e2e/fleetconfig.go b/fleetconfig-controller/test/e2e/v1alpha1_fleetconfig.go similarity index 94% rename from fleetconfig-controller/test/e2e/fleetconfig.go rename to fleetconfig-controller/test/e2e/v1alpha1_fleetconfig.go index 753610fe..ea40d4f5 100644 --- a/fleetconfig-controller/test/e2e/fleetconfig.go +++ b/fleetconfig-controller/test/e2e/v1alpha1_fleetconfig.go @@ -35,7 +35,7 @@ import ( "github.com/open-cluster-management-io/lab/fleetconfig-controller/test/utils" ) -var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { +var _ = Describe("fleetconfig", Label("v1alpha1"), Serial, Ordered, func() { var ( tc *E2EContext @@ -47,7 +47,8 @@ var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { tc = setupTestEnvironment() By("deploying fleetconfig") - Expect(utils.DevspaceRunPipeline(tc.ctx, tc.hubKubeconfig, "deploy-local", fcNamespace)).To(Succeed()) + Expect(utils.DevspaceRunPipeline(tc.ctx, tc.hubKubeconfig, "deploy-local", fcNamespace, "v1alpha1")).To(Succeed()) + Expect(deployV1alpha1FleetConfig(tc)).To(Succeed()) }) AfterAll(func() { @@ -111,14 +112,14 @@ var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { It("should not allow changes to the FleetConfig resource", func() { By("failing to patch the FleetConfig's feature gates") - fc, err := utils.GetFleetConfig(tc.ctx, tc.kClient, fleetConfigNN) + fc, err := utils.GetFleetConfig(tc.ctx, tc.kClient, v1alpha1fleetConfigNN) Expect(err).NotTo(HaveOccurred()) patchFeatureGates := "DefaultClusterSet=true,ManifestWorkReplicaSet=true,ResourceCleanup=false" Expect(utils.UpdateFleetConfigFeatureGates(tc.ctx, tc.kClient, fc, patchFeatureGates)).ToNot(Succeed()) }) It("should update an addon and make sure its propagated to the spoke", func() { - updateAddon(tc, fc) + updateFleetConfigAddon(tc, fc) ensureAddonCreated(tc, 1) }) @@ -130,7 +131,7 @@ var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { By("ensuring the spoke is deregistered properly") EventuallyWithOffset(1, func() error { - if err := tc.kClient.Get(tc.ctx, fleetConfigNN, fc); err != nil { + if err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fc); err != nil { return err } if len(fc.Status.JoinedSpokes) > 1 { @@ -197,7 +198,7 @@ var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { By("deleting the FleetConfig and ensuring that it isn't deleted until the ManifestWork is deleted") ExpectWithOffset(1, tc.kClient.Delete(tc.ctx, fcClone)).To(Succeed()) EventuallyWithOffset(1, func() error { - if err := tc.kClient.Get(tc.ctx, fleetConfigNN, fcClone); err != nil { + if err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fcClone); err != nil { utils.WarnError(err, "failed to get FleetConfig") return err } @@ -232,7 +233,7 @@ var _ = Describe("fleetconfig", Label("fleetconfig"), Ordered, func() { By("ensuring the FleetConfig is deleted once the ManifestWork is deleted") ensureResourceDeleted( func() error { - err := tc.kClient.Get(tc.ctx, fleetConfigNN, fcClone) + err := tc.kClient.Get(tc.ctx, v1alpha1fleetConfigNN, fcClone) if kerrs.IsNotFound(err) { utils.Info("FleetConfig deleted successfully") return nil diff --git a/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go b/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go new file mode 100644 index 00000000..134628f0 --- /dev/null +++ b/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go @@ -0,0 +1,256 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "errors" + "fmt" + "os" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + kerrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ktypes "k8s.io/apimachinery/pkg/types" + operatorv1 "open-cluster-management.io/api/operator/v1" + + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/pkg/common" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/test/utils" +) + +var _ = Describe("hub and spoke", Label("v1beta1"), Serial, Ordered, func() { + + var ( + tc *E2EContext + hub = &v1beta1.Hub{} + hubClone = &v1beta1.Hub{} + spoke = &v1beta1.Spoke{ + ObjectMeta: metav1.ObjectMeta{ + Name: v1beta1spokeNN.Name, + Namespace: v1beta1spokeNN.Namespace, + }, + } + hubAsSpoke = &v1beta1.Spoke{ + ObjectMeta: metav1.ObjectMeta{ + Name: v1beta1hubAsSpokeNN.Name, + Namespace: v1beta1hubAsSpokeNN.Namespace, + }, + } + spokeClone = &v1beta1.Spoke{} + hubAsSpokeClone = &v1beta1.Spoke{} + ) + + BeforeAll(func() { + tc = setupTestEnvironment() + + By("deploying fleetconfig") + Expect(utils.DevspaceRunPipeline(tc.ctx, tc.hubKubeconfig, "deploy-local", fcNamespace, "v1beta1")).To(Succeed()) + }) + + AfterAll(func() { + teardownTestEnvironment(tc) + }) + + // Tests FleetConfig operations with ResourceCleanup feature gate enabled, verifying: + // 1. Cluster joining (spoke and hub-as-spoke) to the hub + // 2. Addon configuration on hub and installation on spoke + // 3. ManifestWork creation in hub-as-spoke namespace and namespace creation validation + // 4. Prevention of feature gate modifications during active operation + // 5. Addon update and propagation + // 6. Spoke removal with proper deregistration from hub + // 7. ManagedCluster and namespace deletion validation + // 8. Automatic ManifestWork cleanup when Hub and Spoke resource are deleted + Context("deploy and teardown Hub and Spokes with ResourceCleanup feature gate enabled", func() { + + It("should join the spoke and hub-as-spoke clusters to the hub", func() { + // NOTE: The FleetConfig CR is created by devspace when the fleetconfig-controller chart is installed. + // Its configuration is defined via the fleetConfig values. + ensureHubAndSpokesProvisioned(tc, hub, []*v1beta1.Spoke{spoke, hubAsSpoke}, nil) + + By("cloning the FleetConfig resources for further scenarios") + err := utils.CloneHub(hub, hubClone) + Expect(err).NotTo(HaveOccurred()) + err = utils.CloneSpoke(spoke, spokeClone) + Expect(err).NotTo(HaveOccurred()) + err = utils.CloneSpoke(hubAsSpoke, hubAsSpokeClone) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should verify addons configured on the hub and enabled on the spoke", func() { + ensureAddonCreated(tc, 0) + }) + + It("should verify spoke cluster annotations", func() { + EventuallyWithOffset(1, func() error { + klusterlet := &operatorv1.Klusterlet{} + if err := tc.kClientSpoke.Get(tc.ctx, klusterletNN, klusterlet); err != nil { + return err + } + if err := assertKlusterletAnnotation(klusterlet, "foo", "bar"); err != nil { + return err + } + if err := assertKlusterletAnnotation(klusterlet, "baz", "quux"); err != nil { + return err + } + return nil + }, 1*time.Minute, 1*time.Second).Should(Succeed()) + }) + + It("should successfully create a namespace in the hub-as-spoke cluster", func() { + + By("creating a ManifestWork in the hub-as-spoke cluster namespace") + EventuallyWithOffset(1, func() error { + return createManifestWork(tc.ctx, hubAsSpokeName) + }, 1*time.Minute, 1*time.Second).Should(Succeed()) + + By("ensuring the test-namespace namespace is created on the hub") + EventuallyWithOffset(1, func() error { + return assertNamespace(tc.ctx, hubAsSpokeName, tc.kClient) + }, 2*time.Minute, 10*time.Second).Should(Succeed()) + }) + + It("should not allow changes to the FleetConfig resource", func() { + + By("failing to patch the Hub's feature gates") + hub, err := utils.GetHub(tc.ctx, tc.kClient, v1beta1hubNN) + Expect(err).NotTo(HaveOccurred()) + patchFeatureGates := "DefaultClusterSet=true,ManifestWorkReplicaSet=true,ResourceCleanup=false" + Expect(utils.UpdateHubFeatureGates(tc.ctx, tc.kClient, hub, patchFeatureGates)).ToNot(Succeed()) + }) + + It("should update an addon and make sure its propagated to the spoke", func() { + updateHubAddon(tc, hub) + ensureAddonCreated(tc, 1) + }) + + It("should delete a Spoke", func() { + err := tc.kClient.Delete(tc.ctx, spoke) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should clean up the hub cluster", func() { + + By("ensuring the spoke is deregistered properly") + EventuallyWithOffset(1, func() error { + By("ensuring the Spoke resource is deleted") + err := tc.kClient.Get(tc.ctx, v1beta1spokeNN, spoke) + if err == nil { + return errors.New("spoke still exists") + } + if err != nil && !kerrs.IsNotFound(err) { + return err + } + + kcfg, err := os.ReadFile(tc.hubKubeconfig) + if err != nil { + return err + } + clusterC, err := common.ClusterClient(kcfg) + if err != nil { + return err + } + + By("ensuring the ManagedCluster is deleted") + _, err = clusterC.ClusterV1().ManagedClusters().Get(tc.ctx, spokeName, metav1.GetOptions{}) + if err != nil { + if !kerrs.IsNotFound(err) { + return err + } + utils.Info("ManagedCluster successfully deleted") + } else { + err := errors.New("ManagedCluster not deleted yet") + utils.WarnError(err, "ManagedCluster still exists") + return err + } + + By("ensuring the ManagedCluster namespace is deleted") + ns := &corev1.Namespace{} + err = tc.kClient.Get(tc.ctx, ktypes.NamespacedName{Name: spokeName}, ns) + if err != nil { + if !kerrs.IsNotFound(err) { + return err + } + utils.Info("Managed Cluster namespace deleted successfully") + } else { + err := errors.New("ManagedCluster namespace not deleted yet") + utils.WarnError(err, "ManagedCluster namespace still exists") + return err + } + return nil + }) + + By("deleting the Hub and ensuring that it isn't deleted until the ManifestWork is deleted") + ExpectWithOffset(1, tc.kClient.Delete(tc.ctx, hubClone)).To(Succeed()) + EventuallyWithOffset(1, func() error { + if err := tc.kClient.Get(tc.ctx, v1beta1hubNN, hubClone); err != nil { + utils.WarnError(err, "failed to get FleetConfig") + return err + } + if hubClone.Status.Phase != v1beta1.Deleting { + err := fmt.Errorf("expected %s, got %s", v1beta1.Deleting, hubClone.Status.Phase) + utils.WarnError(err, "FleetConfig deletion not started") + return err + } + conditions := make([]metav1.Condition, len(hubClone.Status.Conditions)) + for i, c := range hubClone.Status.Conditions { + conditions[i] = c.Condition + } + if err := utils.AssertConditions(conditions, map[string]metav1.ConditionStatus{ + v1beta1.HubInitialized: metav1.ConditionTrue, + v1beta1.CleanupFailed: metav1.ConditionTrue, + v1beta1.AddonsConfigured: metav1.ConditionTrue, + }); err != nil { + utils.WarnError(err, "Hub deletion not blocked") + return err + } + return nil + }, 5*time.Minute, 10*time.Second).Should(Succeed()) + + By("deleting the manifest work from the hub") + ExpectWithOffset(1, deleteManifestWork(tc.ctx, hubAsSpokeName)).To(Succeed()) + + By("ensuring the Hub and hub-as-spoke Spoke are deleted once the ManifestWork is deleted") + ensureResourceDeleted( + func() error { + err := tc.kClient.Get(tc.ctx, v1beta1hubAsSpokeNN, hubAsSpokeClone) + if kerrs.IsNotFound(err) { + utils.Info("Spoke deleted successfully") + return nil + } else if err != nil { + utils.WarnError(err, "failed to check if Spoke was deleted") + } + return errors.New("spoke still exists") + }, + ) + ensureResourceDeleted( + func() error { + err := tc.kClient.Get(tc.ctx, v1beta1hubNN, hubClone) + if kerrs.IsNotFound(err) { + utils.Info("Hub deleted successfully") + return nil + } else if err != nil { + utils.WarnError(err, "failed to check if Hub was deleted") + } + return errors.New("hub still exists") + }, + ) + }) + }) +}) diff --git a/fleetconfig-controller/test/utils/utils.go b/fleetconfig-controller/test/utils/utils.go index 736a0262..6b0e4909 100644 --- a/fleetconfig-controller/test/utils/utils.go +++ b/fleetconfig-controller/test/utils/utils.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1alpha1" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/api/v1beta1" ) const ( @@ -74,7 +75,7 @@ func WarnError(err error, format string, a ...any) { } // DevspaceRunPipeline runs a devspace pipeline -func DevspaceRunPipeline(ctx context.Context, kubeconfig, pipeline, namespace string) error { +func DevspaceRunPipeline(ctx context.Context, kubeconfig, pipeline, namespace, profile string) error { projDir, err := GetProjectDir() if err != nil { return fmt.Errorf("failed to get project directory: %v", err) @@ -84,6 +85,7 @@ func DevspaceRunPipeline(ctx context.Context, kubeconfig, pipeline, namespace st "devspace", "run-pipeline", pipeline, "--kubeconfig", kubeconfig, "--namespace", namespace, + "--profile", profile, "--no-warn", "--force-build", // "--debug", ) @@ -291,6 +293,42 @@ func UpdateFleetConfigFeatureGates(ctx context.Context, kClient client.Client, f return PatchFleetConfig(ctx, kClient, original, fc) } +// GetHub gets a Hub +func GetHub(ctx context.Context, kClient client.Client, nn ktypes.NamespacedName) (*v1beta1.Hub, error) { + hub := &v1beta1.Hub{} + return hub, kClient.Get(ctx, nn, hub) +} + +// PatchHub patches a Hub +func PatchHub(ctx context.Context, kClient client.Client, original *v1beta1.Hub, patch *v1beta1.Hub) error { + patchObject := client.MergeFrom(original) + return kClient.Patch(ctx, patch, patchObject) +} + +// UpdateHubFeatureGates updates a Hub's feature gates +func UpdateHubFeatureGates(ctx context.Context, kClient client.Client, hub *v1beta1.Hub, featureGates string) error { + if hub.Spec.ClusterManager == nil { + return fmt.Errorf("ClusterManager is nil") + } + + original := hub.DeepCopy() + hub.Spec.ClusterManager.FeatureGates = featureGates + + return PatchHub(ctx, kClient, original, hub) +} + +// GetSpoke gets a Spoke +func GetSpoke(ctx context.Context, kClient client.Client, nn ktypes.NamespacedName) (*v1beta1.Spoke, error) { + spoke := &v1beta1.Spoke{} + return spoke, kClient.Get(ctx, nn, spoke) +} + +// PatchSpoke patches a Spoke +func PatchSpoke(ctx context.Context, kClient client.Client, original *v1beta1.Spoke, patch *v1beta1.Spoke) error { + patchObject := client.MergeFrom(original) + return kClient.Patch(ctx, patch, patchObject) +} + // CloneFleetConfig clones a FleetConfig func CloneFleetConfig(fc *v1alpha1.FleetConfig, dest *v1alpha1.FleetConfig) error { *dest = v1alpha1.FleetConfig{ @@ -302,3 +340,27 @@ func CloneFleetConfig(fc *v1alpha1.FleetConfig, dest *v1alpha1.FleetConfig) erro } return nil } + +// CloneHub clones a Hub +func CloneHub(hub *v1beta1.Hub, dest *v1beta1.Hub) error { + *dest = v1beta1.Hub{ + ObjectMeta: metav1.ObjectMeta{ + Name: hub.Name, + Namespace: hub.Namespace, + }, + Spec: *hub.Spec.DeepCopy(), + } + return nil +} + +// CloneSpoke clones a Spoke +func CloneSpoke(spoke *v1beta1.Spoke, dest *v1beta1.Spoke) error { + *dest = v1beta1.Spoke{ + ObjectMeta: metav1.ObjectMeta{ + Name: spoke.Name, + Namespace: spoke.Namespace, + }, + Spec: *spoke.Spec.DeepCopy(), + } + return nil +}