diff --git a/fleetconfig-controller/api/v1beta1/constants.go b/fleetconfig-controller/api/v1beta1/constants.go index c8ced172..c52be0c9 100644 --- a/fleetconfig-controller/api/v1beta1/constants.go +++ b/fleetconfig-controller/api/v1beta1/constants.go @@ -162,6 +162,13 @@ const ( LabelAddOnManagedBy = "addon.open-cluster-management.io/managedBy" ) +// FleetConfig addon annotations +const ( + // AnnotationAddOnDeploymentConfigHash is the annotation key for storing the hash of an addon's deployment configuration. + // Used to detect when the configuration changes and trigger a redeploy. + AnnotationAddOnDeploymentConfigHash = "fleetconfig.open-cluster-management.io/configHash" +) + // Registration driver types const ( // CSRRegistrationDriver is the default CSR-based registration driver. diff --git a/fleetconfig-controller/api/v1beta1/spoke_types.go b/fleetconfig-controller/api/v1beta1/spoke_types.go index 1a741210..02bea7a2 100644 --- a/fleetconfig-controller/api/v1beta1/spoke_types.go +++ b/fleetconfig-controller/api/v1beta1/spoke_types.go @@ -23,6 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" "open-cluster-management.io/ocm/pkg/operator/helpers/chart" ) @@ -279,13 +280,14 @@ type AddOn struct { // +required ConfigName string `json:"configName"` - // The namespace to install the add-on in. If left empty, installs into the "open-cluster-management-addon" namespace. - // +optional - InstallNamespace string `json:"installNamespace,omitempty"` - // Annotations to apply to the add-on. // +optional Annotations map[string]string `json:"annotations,omitempty"` + + // DeploymentConfig provides additional configuration for the add-on deployment. + // If specified, this will be used to create an AddOnDeploymentConfig resource. + // +optional + DeploymentConfig *addonv1alpha1.AddOnDeploymentConfigSpec `json:"deploymentConfig,omitempty"` } // SpokeStatus defines the observed state of Spoke. diff --git a/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go b/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go index 197a8809..76fc45c0 100644 --- a/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go +++ b/fleetconfig-controller/api/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" + "open-cluster-management.io/api/addon/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -34,6 +35,11 @@ func (in *AddOn) DeepCopyInto(out *AddOn) { (*out)[key] = val } } + if in.DeploymentConfig != nil { + in, out := &in.DeploymentConfig, &out.DeploymentConfig + *out = new(v1alpha1.AddOnDeploymentConfigSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOn. diff --git a/fleetconfig-controller/build/Dockerfile.base b/fleetconfig-controller/build/Dockerfile.base index 83972aa5..047260e0 100644 --- a/fleetconfig-controller/build/Dockerfile.base +++ b/fleetconfig-controller/build/Dockerfile.base @@ -38,7 +38,7 @@ ARG ARCH RUN apk update && apk add --no-cache bash curl # Install clusteradm -ARG CLUSTERADM_VERSION=1.0.2 +ARG CLUSTERADM_VERSION=1.1.0 RUN curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash -s -- ${CLUSTERADM_VERSION} ## Stage 3: Compress binaries with upx to reduce image size diff --git a/fleetconfig-controller/build/Dockerfile.devspace b/fleetconfig-controller/build/Dockerfile.devspace index 2e198134..16c468c5 100644 --- a/fleetconfig-controller/build/Dockerfile.devspace +++ b/fleetconfig-controller/build/Dockerfile.devspace @@ -16,7 +16,7 @@ RUN apk add --no-cache bash curl python3 py3-pip RUN go install github.com/go-delve/delve/cmd/dlv@latest # Install clusteradm -ARG CLUSTERADM_VERSION=1.0.2 +ARG CLUSTERADM_VERSION=1.1.0 RUN curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash -s -- ${CLUSTERADM_VERSION} # Install aws-iam-authenticator if building for EKS diff --git a/fleetconfig-controller/build/Dockerfile.eks b/fleetconfig-controller/build/Dockerfile.eks index ce1db588..63c00ce6 100644 --- a/fleetconfig-controller/build/Dockerfile.eks +++ b/fleetconfig-controller/build/Dockerfile.eks @@ -38,7 +38,7 @@ ARG ARCH RUN apk update && apk add --no-cache bash curl # Install clusteradm -ARG CLUSTERADM_VERSION=1.0.2 +ARG CLUSTERADM_VERSION=1.1.0 RUN curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash -s -- ${CLUSTERADM_VERSION} # Install aws-iam-authenticator diff --git a/fleetconfig-controller/build/Dockerfile.gke b/fleetconfig-controller/build/Dockerfile.gke index 1b2b2e9f..a67e4c68 100644 --- a/fleetconfig-controller/build/Dockerfile.gke +++ b/fleetconfig-controller/build/Dockerfile.gke @@ -37,7 +37,7 @@ ARG ARCH RUN apk update && apk add --no-cache bash curl # Install clusteradm -ARG CLUSTERADM_VERSION=1.0.2 +ARG CLUSTERADM_VERSION=1.1.0 RUN curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash -s -- ${CLUSTERADM_VERSION} ## Stage 3: Compress binaries with upx to reduce image size diff --git a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml index 1ba93c71..3bba312b 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io_spokes.yaml @@ -60,10 +60,244 @@ spec: description: The name of the add-on being enabled. Must match one of the AddOnConfigs or HubAddOns names. type: string - installNamespace: - description: The namespace to install the add-on in. If left - empty, installs into the "open-cluster-management-addon" namespace. - type: string + deploymentConfig: + description: |- + DeploymentConfig provides additional configuration for the add-on deployment. + If specified, this will be used to create an AddOnDeploymentConfig resource. + properties: + agentInstallNamespace: + default: open-cluster-management-agent-addon + description: AgentInstallNamespace is the namespace where + the add-on agent should be installed on the managed cluster. + maxLength: 63 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + customizedVariables: + description: |- + CustomizedVariables is a list of name-value variables for the current add-on deployment. + The add-on implementation can use these variables to render its add-on deployment. + The default is an empty list. + items: + description: CustomizedVariable represents a customized + variable for add-on deployment. + properties: + name: + description: Name of this variable. + maxLength: 255 + pattern: ^[a-zA-Z_][_a-zA-Z0-9]*$ + type: string + value: + description: Value of this variable. + maxLength: 1024 + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodePlacement: + description: |- + NodePlacement enables explicit control over the scheduling of the add-on agents on the + managed cluster. + All add-on agent pods are expected to comply with this node placement. + If the placement is nil, the placement is not specified, it will be omitted. + If the placement is an empty object, the placement will match all nodes and tolerate nothing. + properties: + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector defines which Nodes the Pods are scheduled on. + If the selector is an empty list, it will match all nodes. + The default is an empty list. + type: object + tolerations: + description: |- + Tolerations is attached by pods to tolerate any taint that matches + the triple using the matching operator . + If the tolerations is an empty list, it will tolerate nothing. + The default is an empty list. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + proxyConfig: + description: |- + ProxyConfig holds proxy settings for add-on agent on the managed cluster. + Empty means no proxy settings is available. + properties: + caBundle: + description: |- + CABundle is a CA certificate bundle to verify the proxy server. + And it's only useful when HTTPSProxy is set and a HTTPS proxy server is specified. + format: byte + type: string + httpProxy: + description: HTTPProxy is the URL of the proxy for HTTP + requests + type: string + httpsProxy: + description: HTTPSProxy is the URL of the proxy for + HTTPS requests + type: string + noProxy: + description: |- + NoProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy + should not be used. + type: string + type: object + registries: + description: |- + Registries describes how to override images used by the addon agent on the managed cluster. + the following example will override image "quay.io/open-cluster-management/addon-agent" to + "quay.io/ocm/addon-agent" when deploying the addon agent + + registries: + - source: quay.io/open-cluster-management/addon-agent + mirror: quay.io/ocm/addon-agent + items: + description: ImageMirror describes how to mirror images + from a source + properties: + mirror: + description: Mirror is the mirrored registry of the + Source. Will be ignored if Mirror is empty. + type: string + source: + description: Source is the source registry. All image + registries will be replaced by Mirror if Source + is empty. + type: string + required: + - mirror + type: object + type: array + resourceRequirements: + description: |- + ResourceRequirements specify the resources required by add-on agents. + If a container matches multiple ContainerResourceRequirements, the last matched configuration in the + array will take precedence. + items: + description: ContainerResourceRequirements defines resources + required by one or a group of containers. + properties: + containerID: + description: |- + ContainerID is a unique identifier for an agent container. It consists of three parts: resource types, + resource name, and container name, separated by ':'. The format follows + '{resource_types}:{resource_name}:{container_name}' where + 1). Supported resource types include deployments, daemonsets, statefulsets, replicasets, jobs, + cronjobs and pods; + 2). Wildcards (*) can be used in any part to match multiple containers. For example, '*:*:*' + matches all containers of the agent. + pattern: ^(deployments|daemonsets|statefulsets|replicasets|jobs|cronjobs|pods|\*):.+:.+$ + type: string + resources: + description: Compute resources required by matched + containers. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - containerID + - resources + type: object + type: array + x-kubernetes-list-map-keys: + - containerID + x-kubernetes-list-type: map + type: object required: - configName type: object diff --git a/fleetconfig-controller/docs/addon-framework.md b/fleetconfig-controller/docs/addon-framework.md new file mode 100644 index 00000000..ca0760f8 --- /dev/null +++ b/fleetconfig-controller/docs/addon-framework.md @@ -0,0 +1,243 @@ +# Addon Configuration using FleetConfig Controller + +## Overview + +FleetConfig Controller (FCC) provides a declarative wrapper around the Open Cluster Management (OCM) [addon framework](https://open-cluster-management.io/docs/concepts/add-on-extensibility/addon/), simplifying the process of creating and deploying custom addons across your fleet of managed clusters. Instead of manually running `clusteradm` commands, you can define addons in your Hub and Spoke resources, and FCC handles the lifecycle management automatically. + +The addon framework in FCC supports two types of addons: +1. **Custom Addons** - User-defined addons configured via `AddOnConfig` in the Hub spec +2. **Built-in Hub Addons** - Pre-configured addons (ArgoCD and Governance Policy Framework) via `HubAddOn` + +## Custom Addons + +### How It Works + +FCC wraps the `clusteradm addon create` command, allowing you to define custom addon templates declaratively. When you specify an `AddOnConfig` in your Hub resource, FCC: + +1. Looks for a ConfigMap containing the addon manifests +2. Creates an `AddOnTemplate` resource in the hub cluster using clusteradm +3. Makes the addon available for installation on spoke clusters +4. Manages the lifecycle (create/update/delete) based on your configuration + +### Configuring a Custom Addon + +#### Step 1: Create a ConfigMap with Addon Manifests + +First, create a ConfigMap containing your addon manifests. The ConfigMap must be named following the pattern: `fleet-addon--` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: fleet-addon-myapp-v1.0.0 + namespace: +data: + # Option 1: Provide raw manifests directly + manifestsRaw: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: myapp-agent + namespace: open-cluster-management-agent-addon + spec: + # ... your deployment spec + --- + # Additional resources... + + # Option 2: Provide a URL to manifests (mutually exclusive with manifestsRaw) + # manifestsURL: "https://example.com/myapp/manifests.yaml" +``` + +**Note**: You must provide either `manifestsRaw` or `manifestsURL`, but not both. + +#### Step 2: Define AddOnConfig in Hub Resource + +Add the addon configuration to your Hub resource: + +```yaml +apiVersion: fleet.ocm.io/v1beta1 +kind: Hub +metadata: + name: my-hub +spec: + # ... other hub configuration + + addOnConfigs: + - name: myapp + version: v1.0.0 + + # Optional: Enable hub registration for the addon agent + # Allows the addon agent to register and communicate with the hub + hubRegistration: false + + # Optional: Overwrite if addon already exists + overwrite: false + + # Optional: ClusterRole binding for addon agent in the cluster namespace on the Hub cluster + clusterRoleBinding: "cluster-admin" +``` + +#### AddOnConfig Fields + +- **name** (required): The name of the addon +- **version** (optional): The addon version, defaults to "v0.0.1" +- **clusterRoleBinding** (optional): RoleBinding to a ClusterRole in the cluster namespace for the addon agent +- **hubRegistration** (optional): Enable agent registration to the hub cluster (default: false) +- **overwrite** (optional): Overwrite the addon if it already exists (default: false) + +### Enabling Custom Addons on Spoke Clusters + +Once you've defined an `AddOnConfig` in your Hub, you can enable it on specific spoke clusters by referencing it in the Spoke resource: + +```yaml +apiVersion: fleet.ocm.io/v1beta1 +kind: Spoke +metadata: + name: my-spoke +spec: + # ... other spoke configuration + + addOns: + - configName: myapp # Must match an AddOnConfig.name or HubAddOn.name + + # Optional: Add annotations to the addon + annotations: + example.com/type: "custom" + + # Optional: Provide deployment configuration + deploymentConfig: + nodePlacement: + nodeSelector: + node-role.kubernetes.io/worker: "" + customizedVariables: + - name: IMAGE + value: "quay.io/myorg/myapp:v1.0.0" +``` + +#### AddOn Fields + +- **configName** (required): The name of the addon, must match an `AddOnConfig.name` or `HubAddOn.name` +- **annotations** (optional): Annotations to apply to the addon +- **deploymentConfig** (optional): Additional configuration for addon deployment, creates an `AddOnDeploymentConfig` resource + +The `deploymentConfig` field accepts the full [AddOnDeploymentConfigSpec](https://github.com/open-cluster-management-io/api/blob/main/addon/v1alpha1/types_addondeploymentconfig.go) from OCM, allowing you to configure: +- Node placement (node selectors, tolerations) +- Replicas and availability policy +- Custom variables for manifest templating +- Resource requirements + +## Built-in Hub Addons + +FCC includes two built-in addons that can be easily enabled without requiring a ConfigMap: + +1. **argocd** - ArgoCD GitOps controller +2. **governance-policy-framework** - OCM governance policy framework + +### Configuring Built-in Hub Addons + +Add `HubAddOn` entries to your Hub resource: + +```yaml +apiVersion: fleet.ocm.io/v1beta1 +kind: Hub +metadata: + name: my-hub +spec: + # ... other hub configuration + + hubAddOns: + - name: argocd + createNamespace: true + + - name: governance-policy-framework + installNamespace: open-cluster-management-addon + createNamespace: false +``` + +#### HubAddOn Fields + +- **name** (required): The name of the built-in addon. Must be one of: `argocd`, `governance-policy-framework` +- **installNamespace** (optional): The namespace to install the addon in. Defaults to "open-cluster-management-addon". Not supported for `argocd`. +- **createNamespace** (optional): Whether to create the namespace if it doesn't exist (default: false) + +### Enabling Built-in Addons on Spoke Clusters + +Built-in hub addons can be enabled on spoke clusters the same way as custom addons: + +```yaml +apiVersion: fleet.ocm.io/v1beta1 +kind: Spoke +metadata: + name: my-spoke +spec: + addOns: + - configName: argocd + - configName: governance-policy-framework +``` + +## Implementation Details + +The addon creation logic in FCC (found in `internal/controller/v1beta1/addon.go`) performs the following operations: + +### For Custom Addons (`AddOnConfig`) + +1. **List existing addons**: Queries for `AddOnTemplate` resources with the `addon.open-cluster-management.io/managedBy` label +2. **Compare requested vs created**: Determines which addons need to be created or deleted +3. **Delete obsolete addons**: Removes addons no longer in the spec +4. **Create new addons**: For each new addon: + - Loads the ConfigMap containing manifests (`fleet-addon--`) + - Validates manifest configuration (either raw or URL) + - Constructs the `clusteradm addon create` command with appropriate flags + - Executes the command to create the `AddOnTemplate` + - Applies the `addon.open-cluster-management.io/managedBy` label for lifecycle tracking + +### Command Construction + +The controller builds `clusteradm` commands like: + +```bash +clusteradm addon create \ + --version= \ + --labels=addon.open-cluster-management.io/managedBy=fleetconfig-controller \ + --filename= \ + [--hub-registration] \ + [--overwrite] \ + [--cluster-role-bind=] \ + --kubeconfig= \ + --context= +``` + +### For Built-in Hub Addons + +Built-in hub addons follow a similar pattern but use pre-configured manifest sources from the `clusteradm` tool itself. + +## Best Practices + +1. **Version Management**: Always specify explicit versions for your custom addons to ensure predictable deployments. [Semantic versioning](https://semver.org/) is recommended, but not required. +2. **ConfigMap Naming**: Follow the naming convention strictly: `fleet-addon--` +3. **Manifest Sources**: Use `manifestsURL` for large manifests or those stored in git; use `manifestsRaw` for simple, small manifests +4. **Hub Registration**: Only enable `hubRegistration` when your addon agent needs to communicate back to the hub +5. **Namespace Management**: For built-in hub addons, set `createNamespace: true` if you're unsure whether the namespace exists +6. **Labels**: FCC automatically applies the `addon.open-cluster-management.io/managedBy` label to track resources it manages +7. **Cleanup**: When removing an addon from the Hub spec, FCC will automatically delete the corresponding `AddOnTemplate` + +## Troubleshooting + +### Addon Not Created + +- Verify the ConfigMap exists with the correct name format +- Check the Hub controller logs for clusteradm command errors +- Ensure the manifest source (raw or URL) is valid + +### Addon Not Installing on Spoke + +- Confirm the `configName` matches an existing `AddOnConfig` or `HubAddOn` name +- Check the spoke controller logs for addon reconciliation errors +- Verify the addon template exists in the hub cluster + +### Version Conflicts + +- If you need to update an addon version, create a new ConfigMap with the new version +- Update the `AddOnConfig` version in the Hub spec +- The old addon template will be automatically removed + diff --git a/fleetconfig-controller/hack/.versions.env b/fleetconfig-controller/hack/.versions.env index 99d57843..5ddd9b6a 100644 --- a/fleetconfig-controller/hack/.versions.env +++ b/fleetconfig-controller/hack/.versions.env @@ -1,4 +1,4 @@ AWSIAMAUTH_VERSION=0.7.2 CERT_MANAGER_VERSION=v1.18.1 -CLUSTERADM_VERSION=1.0.2 +CLUSTERADM_VERSION=1.1.0 OCM_VERSION=1.0.0 diff --git a/fleetconfig-controller/internal/controller/v1beta1/addon.go b/fleetconfig-controller/internal/controller/v1beta1/addon.go index ad0fafd5..9ff94d81 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/addon.go +++ b/fleetconfig-controller/internal/controller/v1beta1/addon.go @@ -2,8 +2,8 @@ package v1beta1 import ( "context" - "encoding/json" "fmt" + "maps" "net/url" "os" "os/exec" @@ -17,6 +17,7 @@ import ( kerrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/types" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" @@ -30,6 +31,7 @@ import ( arg_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/args" exec_utils "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/exec" "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/file" + "github.com/open-cluster-management-io/lab/fleetconfig-controller/internal/hash" ) // getManagedClusterAddOns returns the list of ManagedClusterAddOns currently installed on a spoke cluster @@ -278,50 +280,82 @@ func handleSpokeAddons(ctx context.Context, addonC *addonapi.Clientset, spoke *v logger := log.FromContext(ctx) addons := spoke.Spec.AddOns - // Get actual enabled addons from cluster instead of status + // Get actual enabled addons from cluster enabledAddons, err := getManagedClusterAddOns(ctx, addonC, spoke.Name) if err != nil { logger.V(1).Info("failed to get ManagedClusterAddOns, assuming none enabled", "error", err, "spokeName", spoke.Name) enabledAddons = []string{} } + // Build a map of MCA objects to read their current config hash annotations + mcas, err := addonC.AddonV1alpha1().ManagedClusterAddOns(spoke.Name).List(ctx, metav1.ListOptions{ + LabelSelector: v1beta1.ManagedBySelector.String(), + }) + if err != nil { + logger.V(1).Info("failed to list ManagedClusterAddOns", "error", err, "spokeName", spoke.Name) + mcas = &addonv1alpha1.ManagedClusterAddOnList{} + } + + mcaMap := make(map[string]addonv1alpha1.ManagedClusterAddOn) + for _, mca := range mcas.Items { + mcaMap[mca.Name] = mca + } + if len(addons) == 0 && len(enabledAddons) == 0 { // nothing to do return enabledAddons, nil } - // compare existing to requested - requestedAddonNames := make([]string, len(addons)) - for i, addon := range addons { - requestedAddonNames[i] = addon.ConfigName + // Build a map of requested addons + requestedAddonMap := make(map[string]v1beta1.AddOn) + for _, addon := range addons { + requestedAddonMap[addon.ConfigName] = addon } - // Find addons that need to be enabled (present in requested, missing from enabledAddons) + // Find addons that need to be enabled or re-enabled due to config changes addonsToEnable := make([]v1beta1.AddOn, 0) - for i, requestedName := range requestedAddonNames { - if !slices.Contains(enabledAddons, requestedName) { - addonsToEnable = append(addonsToEnable, addons[i]) + addonsToDisable := make([]string, 0) + + for _, addon := range addons { + // Calculate hash of the deployment config + configHash, err := hash.ComputeHash(addon) + if err != nil { + logger.V(1).Info("failed to compute config hash, will re-enable addon", "addon", addon.ConfigName, "error", err) + configHash = "" + } + + // Check if addon needs to be enabled/re-enabled + isCurrentlyEnabled := slices.Contains(enabledAddons, addon.ConfigName) + var previousHash string + if mca, exists := mcaMap[addon.ConfigName]; exists { + previousHash = mca.Annotations[v1beta1.AnnotationAddOnDeploymentConfigHash] + } + + // Enable if: not currently enabled OR config hash has changed + if !isCurrentlyEnabled { + addonsToEnable = append(addonsToEnable, addon) + } else if isCurrentlyEnabled && previousHash != configHash { + // Config hash changed - need to disable first, then re-enable. clusteradm does not support in-place update + logger.V(1).Info("addon config changed, will disable then re-enable", "addon", addon.ConfigName, + "oldHash", previousHash, "newHash", configHash) + addonsToDisable = append(addonsToDisable, addon.ConfigName) + addonsToEnable = append(addonsToEnable, addon) } } - // Find addons that need to be disabled (present in enabledAddons, missing from requested) - addonsToDisable := make([]string, 0) - for _, enabledAddon := range enabledAddons { - if !slices.Contains(requestedAddonNames, enabledAddon) { - addonsToDisable = append(addonsToDisable, enabledAddon) + for _, enabledAddonName := range enabledAddons { + if _, requested := requestedAddonMap[enabledAddonName]; !requested { + addonsToDisable = append(addonsToDisable, enabledAddonName) } } // do disables first, then enables/updates err = handleAddonDisable(ctx, spoke, addonsToDisable) if err != nil { - spoke.SetConditions(true, v1beta1.NewCondition( - err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, - )) return enabledAddons, err } - // Remove disabled addons from enabledAddons + // Remove disabled addons from enabled list for _, disabledAddon := range addonsToDisable { enabledAddons = slices.DeleteFunc(enabledAddons, func(ea string) bool { return ea == disabledAddon @@ -329,23 +363,17 @@ func handleSpokeAddons(ctx context.Context, addonC *addonapi.Clientset, spoke *v } // Enable new addons and updated addons - newEnabledAddons, err := handleAddonEnable(ctx, spoke, addonsToEnable, addonC) + newEnabledAddons, err := handleAddonEnable(ctx, spoke, addonsToEnable) // even if an error is returned, any addon which was successfully enabled is tracked, so append before returning enabledAddons = append(enabledAddons, newEnabledAddons...) if err != nil { - spoke.SetConditions(true, v1beta1.NewCondition( - err.Error(), v1beta1.AddonsConfigured, metav1.ConditionFalse, metav1.ConditionTrue, - )) return enabledAddons, err } - spoke.SetConditions(true, v1beta1.NewCondition( - v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, - )) return enabledAddons, nil } -func handleAddonEnable(ctx context.Context, spoke *v1beta1.Spoke, addons []v1beta1.AddOn, addonC *addonapi.Clientset) ([]string, error) { +func handleAddonEnable(ctx context.Context, spoke *v1beta1.Spoke, addons []v1beta1.AddOn) ([]string, error) { if len(addons) == 0 { return nil, nil } @@ -366,38 +394,60 @@ func handleAddonEnable(ctx context.Context, spoke *v1beta1.Spoke, addons []v1bet args := []string{ fmt.Sprintf("--names=%s", a.ConfigName), } - if a.InstallNamespace != "" { - args = append(args, fmt.Sprintf("--namespace=%s", a.InstallNamespace)) + + // Calculate the deployment config hash + configHash, err := hash.ComputeHash(a) + if err != nil { + logger.V(1).Info("failed to compute config hash for addon", "addon", a.ConfigName, "error", err) + configHash = "" } - var annots []string - for k, v := range a.Annotations { - annots = append(annots, fmt.Sprintf("%s=%s", k, v)) + + // Build annotations map, including the deployment config hash + annotMap := make(map[string]string) + maps.Copy(annotMap, a.Annotations) + + // Add the deployment config hash annotation + if configHash != "" { + annotMap[v1beta1.AnnotationAddOnDeploymentConfigHash] = configHash } - annot := strings.Join(annots, ",") - if annot != "" { + + // Convert annotations map to comma-separated string + if len(annotMap) > 0 { + annots := make([]string, 0, len(annotMap)) + for k, v := range annotMap { + annots = append(annots, fmt.Sprintf("%s=%s", k, v)) + } + annot := strings.Join(annots, ",") args = append(args, fmt.Sprintf("--annotate=%s", annot)) } + // If DeploymentConfig is provided, generate the config file + if a.DeploymentConfig != nil { + configFilePath, cleanup, err := createAddonDeploymentConfigFile(a.ConfigName, spoke.Name, a.DeploymentConfig) + if cleanup != nil { + defer cleanup() + } + if err != nil { + enableErrs = append(enableErrs, fmt.Errorf("failed to create addon deployment config for %s: %v", a.ConfigName, err)) + continue + } + + args = append(args, fmt.Sprintf("--config-file=%s", configFilePath)) + } + args = append(baseArgs, args...) logger.V(7).Info("running", "command", clusteradm, "args", arg_utils.SanitizeArgs(args)) cmd := exec.Command(clusteradm, args...) stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon enable' to complete...") + if err != nil { out := append(stdout, stderr...) enableErrs = append(enableErrs, fmt.Errorf("failed to enable addon: %v, output: %s", err, string(out))) continue } - // TODO - do this natively with clusteradm once https://github.com/open-cluster-management-io/clusteradm/issues/501 is resolved. - if a.ConfigName == v1beta1.FCCAddOnName { - err = patchFCCMca(ctx, spoke.Name, addonC) - if err != nil { - enableErrs = append(enableErrs, err) - continue - } - } enabledAddons = append(enabledAddons, a.ConfigName) - logger.V(1).Info("enabled addon", "managedcluster", spoke.Name, "addon", a.ConfigName, "output", string(stdout)) + logger.V(1).Info("enabled addon", "managedcluster", spoke.Name, "addon", a.ConfigName, "configHash", configHash, "output", string(stdout)) } if len(enableErrs) > 0 { @@ -406,46 +456,35 @@ func handleAddonEnable(ctx context.Context, spoke *v1beta1.Spoke, addons []v1bet return enabledAddons, nil } -func patchFCCMca(ctx context.Context, spokeName string, addonC *addonapi.Clientset) error { - mca, err := addonC.AddonV1alpha1().ManagedClusterAddOns(spokeName).Get(ctx, v1beta1.FCCAddOnName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to configure %s: %v", v1beta1.FCCAddOnName, err) - } - desired := addonv1alpha1.AddOnConfig{ - ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ - Group: addonv1alpha1.GroupName, - Resource: AddOnDeploymentConfigResource, +// createAddonDeploymentConfigFile creates a temporary file containing an AddOnDeploymentConfig resource +// that can be passed to clusteradm addon enable via --config-file. +func createAddonDeploymentConfigFile(addonName, namespace string, config *addonv1alpha1.AddOnDeploymentConfigSpec) (string, func(), error) { + // Build the AddOnDeploymentConfig resource + addonDeploymentConfig := &addonv1alpha1.AddOnDeploymentConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "addon.open-cluster-management.io/v1alpha1", + Kind: "AddOnDeploymentConfig", }, - ConfigReferent: addonv1alpha1.ConfigReferent{ - Name: v1beta1.FCCAddOnName, - Namespace: spokeName, + ObjectMeta: metav1.ObjectMeta{ + Name: addonName, + Namespace: namespace, }, + Spec: *config, } - if slices.ContainsFunc(mca.Spec.Configs, func(c addonv1alpha1.AddOnConfig) bool { - return c.Group == desired.Group && - c.Resource == desired.Resource && - c.Name == desired.Name && - c.Namespace == desired.Namespace - }) { - return nil - } - mca.Spec.Configs = append(mca.Spec.Configs, desired) - patchBytes, err := json.Marshal(map[string]any{ - "spec": map[string]any{"configs": mca.Spec.Configs}, - }) + + // Serialize to YAML + yamlBytes, err := yaml.Marshal(addonDeploymentConfig) if err != nil { - return fmt.Errorf("failed to marshal patch for %s: %v", v1beta1.FCCAddOnName, err) + return "", nil, fmt.Errorf("failed to marshal AddOnDeploymentConfig to YAML: %w", err) } - if _, err = addonC.AddonV1alpha1().ManagedClusterAddOns(spokeName).Patch( - ctx, - v1beta1.FCCAddOnName, - types.MergePatchType, - patchBytes, - metav1.PatchOptions{}, - ); err != nil { - return fmt.Errorf("failed to patch %s: %v", v1beta1.FCCAddOnName, err) + + // Write to temporary file + tmpFile, cleanup, err := file.TmpFile(yamlBytes, "yaml") + if err != nil { + return "", nil, fmt.Errorf("failed to create temporary config file: %w", err) } - return nil + + return tmpFile, cleanup, nil } func handleAddonDisable(ctx context.Context, spoke *v1beta1.Spoke, enabledAddons []string) error { diff --git a/fleetconfig-controller/internal/controller/v1beta1/spoke_handler.go b/fleetconfig-controller/internal/controller/v1beta1/spoke_handler.go index 207eacf6..bf30a74f 100644 --- a/fleetconfig-controller/internal/controller/v1beta1/spoke_handler.go +++ b/fleetconfig-controller/internal/controller/v1beta1/spoke_handler.go @@ -10,6 +10,7 @@ import ( "os/exec" "reflect" "slices" + "sort" "strconv" "strings" "time" @@ -337,41 +338,16 @@ func (r *SpokeReconciler) doHubWork(ctx context.Context, spoke *v1beta1.Spoke, h } if !spoke.IsHubAsSpoke() { - adc := &addonv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: v1beta1.FCCAddOnName, - Namespace: spoke.Name, - }, - Spec: addonv1alpha1.AddOnDeploymentConfigSpec{ - AgentInstallNamespace: os.Getenv(v1beta1.ControllerNamespaceEnvVar), - CustomizedVariables: []addonv1alpha1.CustomizedVariable{ - { - Name: v1beta1.HubNamespaceEnvVar, - Value: spoke.Spec.HubRef.Namespace, - }, - { - Name: v1beta1.SpokeNamespaceEnvVar, - Value: spoke.Namespace, - }, - { - Name: v1beta1.PurgeAgentNamespaceEnvVar, - Value: strconv.FormatBool(spoke.Spec.CleanupConfig.PurgeAgentNamespace), - }, - }, - }, - } - _, err = addonC.AddonV1alpha1().AddOnDeploymentConfigs(spoke.Name).Create(ctx, adc, metav1.CreateOptions{}) - if err != nil && !kerrs.IsAlreadyExists(err) { - return err - } - err = r.bindAddonAgent(ctx, spoke) if err != nil { return err } } - enabledAddons, err := handleSpokeAddons(ctx, addonC, spoke) + spokeCopy := spoke.DeepCopy() + r.configureFCCAddOn(spokeCopy) + + enabledAddons, err := handleSpokeAddons(ctx, addonC, spokeCopy) if err != nil { msg := fmt.Sprintf("failed to enable addons for spoke cluster %s: %s", spoke.Name, err.Error()) spoke.SetConditions(true, v1beta1.NewCondition( @@ -379,10 +355,88 @@ func (r *SpokeReconciler) doHubWork(ctx context.Context, spoke *v1beta1.Spoke, h )) return err } + + if len(enabledAddons) > 0 { + spoke.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, + )) + } spoke.Status.EnabledAddons = enabledAddons return nil } +func (r *SpokeReconciler) configureFCCAddOn(spoke *v1beta1.Spoke) { + if spoke.IsHubAsSpoke() || r.InstanceType == v1beta1.InstanceTypeUnified { + return + } + + fccIdx := -1 + for i, addon := range spoke.Spec.AddOns { + if addon.ConfigName == v1beta1.FCCAddOnName { + fccIdx = i + break + } + } + + if fccIdx == -1 { + return + } + + // Merge FCC-specific config with any existing config + if spoke.Spec.AddOns[fccIdx].DeploymentConfig == nil { + spoke.Spec.AddOns[fccIdx].DeploymentConfig = &addonv1alpha1.AddOnDeploymentConfigSpec{} + } + + // Set agent install namespace if not already set + if spoke.Spec.AddOns[fccIdx].DeploymentConfig.AgentInstallNamespace == "" { + spoke.Spec.AddOns[fccIdx].DeploymentConfig.AgentInstallNamespace = os.Getenv(v1beta1.ControllerNamespaceEnvVar) + } + + // Append FCC-required customized variables + fccVariables := []addonv1alpha1.CustomizedVariable{ + { + Name: v1beta1.HubNamespaceEnvVar, + Value: spoke.Spec.HubRef.Namespace, + }, + { + Name: v1beta1.SpokeNamespaceEnvVar, + Value: spoke.Namespace, + }, + { + Name: v1beta1.PurgeAgentNamespaceEnvVar, + Value: strconv.FormatBool(spoke.Spec.CleanupConfig.PurgeAgentNamespace), + }, + } + + // Merge variables - default controller variables take precedence + existingVars := spoke.Spec.AddOns[fccIdx].DeploymentConfig.CustomizedVariables + varMap := make(map[string]string) + + for _, v := range existingVars { + varMap[v.Name] = v.Value + } + + for _, v := range fccVariables { + varMap[v.Name] = v.Value + } + + // Convert back to slice + mergedVars := make([]addonv1alpha1.CustomizedVariable, 0, len(varMap)) + for name, value := range varMap { + mergedVars = append(mergedVars, addonv1alpha1.CustomizedVariable{ + Name: name, + Value: value, + }) + } + + // Sort by name to ensure deterministic ordering and prevent false hash drift + sort.Slice(mergedVars, func(i, j int) bool { + return mergedVars[i].Name < mergedVars[j].Name + }) + + spoke.Spec.AddOns[fccIdx].DeploymentConfig.CustomizedVariables = mergedVars +} + func (r *SpokeReconciler) createAgentNamespace(ctx context.Context, spoke *v1beta1.Spoke) error { logger := log.FromContext(ctx) @@ -609,6 +663,7 @@ func (r *SpokeReconciler) hubCleanupPreflight(ctx context.Context, spoke *v1beta if !shouldCleanAll { spokeCopy.Spec.AddOns = append(spokeCopy.Spec.AddOns, v1beta1.AddOn{ConfigName: v1beta1.FCCAddOnName}) + r.configureFCCAddOn(spokeCopy) } if _, err := handleSpokeAddons(ctx, addonC, spokeCopy); err != nil { spoke.SetConditions(true, v1beta1.NewCondition( @@ -616,6 +671,10 @@ func (r *SpokeReconciler) hubCleanupPreflight(ctx context.Context, spoke *v1beta )) return true, err } + // Success - addons disabled/cleaned up + spoke.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, + )) // Apply terminating taint to remove all remaining workloads including addons. // This taint should not be tolerated by anything - it signals final cluster termination. @@ -659,6 +718,10 @@ func (r *SpokeReconciler) waitForAgentAddonDeleted(ctx context.Context, spoke *v )) return err } + // Success - addon deleted + spoke.SetConditions(true, v1beta1.NewCondition( + v1beta1.AddonsConfigured, v1beta1.AddonsConfigured, metav1.ConditionTrue, metav1.ConditionTrue, + )) // at this point, klusterlet-work-agent is uninstalled, so nothing can remove this finalizer. all resources are cleaned up by the spoke's controller, so to prevent a dangling mw/namespace, we remove the finalizer manually mwList, err := workC.WorkV1().ManifestWorks(spoke.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", manifestWorkAddOnLabelKey, v1beta1.FCCAddOnName)}) diff --git a/fleetconfig-controller/test/data/addon-2-cm.yaml b/fleetconfig-controller/test/data/addon-2-cm.yaml index e4507faa..c43099bd 100644 --- a/fleetconfig-controller/test/data/addon-2-cm.yaml +++ b/fleetconfig-controller/test/data/addon-2-cm.yaml @@ -7,4 +7,14 @@ data: apiVersion: v1 kind: Namespace metadata: - name: test-addon-2 \ No newline at end of file + name: test-addon + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: addon-variables-test + namespace: test-addon + data: + CLUSTER_NAME: '{{CLUSTER_NAME}}' + FOO: '{{FOO}}' + NEW_KEY: new-value \ No newline at end of file diff --git a/fleetconfig-controller/test/data/fleetconfig-values.yaml b/fleetconfig-controller/test/data/fleetconfig-values.yaml index c8398162..07bdcdad 100644 --- a/fleetconfig-controller/test/data/fleetconfig-values.yaml +++ b/fleetconfig-controller/test/data/fleetconfig-values.yaml @@ -10,6 +10,15 @@ fleetConfig: kind: Namespace metadata: name: test-addon + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: addon-variables-test + namespace: test-addon + data: + CLUSTER_NAME: '{{CLUSTER_NAME}}' + FOO: '{{FOO}}' clusterRoleBinding: "" hubRegistration: false overwrite: false @@ -38,6 +47,13 @@ fleetConfig: namespace: fleetconfig-system addOns: - configName: test-addon + deploymentConfig: + agentInstallNamespace: test-addon + customizedVariables: + - name: CLUSTER_NAME + value: spoke + - name: FOO + value: initial-foo-value - configName: fleetconfig-controller-agent createNamespace: true syncLabels: false diff --git a/fleetconfig-controller/test/e2e/helper.go b/fleetconfig-controller/test/e2e/helper.go index 02b17f82..9752812e 100644 --- a/fleetconfig-controller/test/e2e/helper.go +++ b/fleetconfig-controller/test/e2e/helper.go @@ -40,6 +40,12 @@ const ( kubeconfigSecretKey = "value" hubAsSpokeName = v1alpha1.ManagedClusterTypeHubAsSpoke spokeName = v1alpha1.ManagedClusterTypeSpoke + + // addon variable ConfigMap and data keys + addonVariablesConfigMapName = "addon-variables-test" + addonClusterNameKey = "CLUSTER_NAME" + addonFooKey = "FOO" + addonNewKey = "NEW_KEY" ) var ( @@ -72,7 +78,7 @@ var ( }, { name: "test-addon", - namespace: "test-addon-2", + namespace: "test-addon", version: "v2.0.0", }, } @@ -413,6 +419,54 @@ func ensureAddonCreated(tc *E2EContext, addonIdx int) { }, 2*time.Minute, 1*time.Second).Should(Succeed()) } +func ensureAddonVariablesResolved(tc *E2EContext, addonIdx int, expectedClusterName, expectedFooValue string) { + By("verifying that addon variables are correctly resolved in deployed resources") + EventuallyWithOffset(1, func() error { + addon := addonData[addonIdx] + // Check ConfigMap data + cm := corev1.ConfigMap{} + if err := tc.kClientSpoke.Get(tc.ctx, ktypes.NamespacedName{Name: addonVariablesConfigMapName, Namespace: addon.namespace}, &cm); err != nil { + utils.WarnError(err, "failed to get ConfigMap %s in namespace %s", addonVariablesConfigMapName, addon.namespace) + return err + } + + if clusterNameData, ok := cm.Data[addonClusterNameKey]; !ok { + err := fmt.Errorf("%s data key not found in ConfigMap", addonClusterNameKey) + utils.WarnError(err, "ConfigMap missing expected data key") + return err + } else if clusterNameData != expectedClusterName { + err := fmt.Errorf("wrong %s data value. want %s, got %s", addonClusterNameKey, expectedClusterName, clusterNameData) + utils.WarnError(err, fmt.Sprintf("%s data mismatch", addonClusterNameKey)) + return err + } + + if fooData, ok := cm.Data[addonFooKey]; !ok { + err := fmt.Errorf("%s data key not found in ConfigMap", addonFooKey) + utils.WarnError(err, "ConfigMap missing expected data key") + return err + } else if fooData != expectedFooValue { + err := fmt.Errorf("wrong %s data value. want %s, got %s", addonFooKey, expectedFooValue, fooData) + utils.WarnError(err, fmt.Sprintf("%s data mismatch", addonFooKey)) + return err + } + + if addonIdx == 1 { + if newKeyData, ok := cm.Data[addonNewKey]; !ok { + err := fmt.Errorf("%s data key not found in ConfigMap", addonNewKey) + utils.WarnError(err, "ConfigMap missing expected data key") + return err + } else if newKeyData != "new-value" { + err := fmt.Errorf("wrong %s data value. want %s, got %s", addonNewKey, "new-value", newKeyData) + utils.WarnError(err, fmt.Sprintf("%s data mismatch", addonNewKey)) + return err + } + } + + utils.Info(fmt.Sprintf("verified addon variables resolved correctly: %s=%s, %s=%s", addonClusterNameKey, expectedClusterName, addonFooKey, expectedFooValue)) + return nil + }, 2*time.Minute, 1*time.Second).Should(Succeed()) +} + func updateFleetConfigAddon(tc *E2EContext, fc *v1alpha1.FleetConfig) { By("creating a configmap containing the source manifests") EventuallyWithOffset(1, func() error { return createAddOnConfigMap(tc) }, 1*time.Minute, 1*time.Second).Should(Succeed()) @@ -439,7 +493,7 @@ func updateHubAddon(tc *E2EContext, hub *v1beta1.Hub) { By("adding a new version of test-addon") addon := addonData[1] if err := tc.kClient.Get(tc.ctx, v1beta1hubNN, hub); err != nil { - utils.WarnError(err, "failed to get FleetConfig") + utils.WarnError(err, "failed to get Hub") ExpectWithOffset(1, err).NotTo(HaveOccurred()) } hub.Spec.AddOnConfigs = append(hub.Spec.AddOnConfigs, v1beta1.AddOnConfig{ diff --git a/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go b/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go index 3c795ddd..2bf3722c 100644 --- a/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go +++ b/fleetconfig-controller/test/e2e/v1beta1_hub_spoke.go @@ -29,6 +29,7 @@ import ( kerrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktypes "k8s.io/apimachinery/pkg/types" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" operatorv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/pkg/operator/helpers/chart" "sigs.k8s.io/controller-runtime/pkg/client" @@ -103,6 +104,11 @@ var _ = Describe("hub and spoke", Label("v1beta1"), Serial, Ordered, func() { ensureAddonCreated(tc, 0) }) + It("should verify initial addon variables are correctly resolved", func() { + By("verifying that the initial FOO and CLUSTER_NAME variables are resolved in deployed resources") + ensureAddonVariablesResolved(tc, 0, spokeName, "initial-foo-value") + }) + It("should verify spoke cluster annotations", func() { EventuallyWithOffset(1, func() error { klusterlet := &operatorv1.Klusterlet{} @@ -235,9 +241,50 @@ var _ = Describe("hub and spoke", Label("v1beta1"), Serial, Ordered, func() { Expect(utils.UpdateHubFeatureGates(tc.ctx, tc.kClient, hub, patchFeatureGates)).ToNot(Succeed()) }) - It("should update an addon and make sure its propagated to the spoke", func() { + It("should update addon variable values and verify resources are updated", func() { + By("updating the spoke addon config to change the FOO variable value") + EventuallyWithOffset(1, func() error { + err := tc.kClient.Get(tc.ctx, v1beta1spokeNN, spokeClone) + if err != nil { + utils.WarnError(err, "failed to get spoke") + return err + } + + // Find the test-addon in the addons list and update the FOO variable value + for i, addon := range spokeClone.Spec.AddOns { + if addon.ConfigName == "test-addon" { + if spokeClone.Spec.AddOns[i].DeploymentConfig == nil { + spokeClone.Spec.AddOns[i].DeploymentConfig = &addonv1alpha1.AddOnDeploymentConfigSpec{} + } + // Update FOO variable value (keep CLUSTER_NAME the same) + spokeClone.Spec.AddOns[i].DeploymentConfig.CustomizedVariables = []addonv1alpha1.CustomizedVariable{ + { + Name: "FOO", + Value: "updated-foo-value", + }, + } + break + } + } + + err = tc.kClient.Update(tc.ctx, spokeClone) + if err != nil { + utils.WarnError(err, "failed to update spoke") + return err + } + return nil + }, 1*time.Minute, 5*time.Second).Should(Succeed()) + + By("verifying that the updated FOO variable is correctly resolved in deployed resources") + ensureAddonVariablesResolved(tc, 0, spokeName, "updated-foo-value") + }) + + It("should update addon template version and verify new resources are deployed", func() { updateHubAddon(tc, hub) ensureAddonCreated(tc, 1) + + By("verifying that the new addon template (v2.0.0) has variables correctly resolved") + ensureAddonVariablesResolved(tc, 1, spokeName, "updated-foo-value") }) It("should delete a Spoke", func() {