diff --git a/.github/repositories.json b/.github/repositories.json index f21a1feb..3c97c6c5 100644 --- a/.github/repositories.json +++ b/.github/repositories.json @@ -1,5 +1,5 @@ { "fleetconfig-controller": { - "e2e-artifacts": ["hub-bundle.tar.gz", "spoke-bundle.tar.gz"] + "e2e-artifacts": ["test/e2e/hub-bundle.tar.gz", "test/e2e/spoke-bundle.tar.gz"] } } \ No newline at end of file diff --git a/fleetconfig-controller/Makefile b/fleetconfig-controller/Makefile index c0307e39..563035a1 100644 --- a/fleetconfig-controller/Makefile +++ b/fleetconfig-controller/Makefile @@ -115,7 +115,7 @@ test-unit: manifests generate fmt vet envtest ## Run unit tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" \ go test -v \ -coverpkg=./... \ - -coverprofile=cover.out \ + -coverprofile=$(COVER_DIR)/unit/cover.out \ $(shell go list ./... | grep -v '/test/e2e') .PHONY: test-e2e diff --git a/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go b/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go index 4ee43e2b..3e20b6a3 100644 --- a/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go +++ b/fleetconfig-controller/api/v1alpha1/fleetconfig_types.go @@ -28,13 +28,27 @@ import ( type FleetConfigSpec struct { // +required Hub Hub `json:"hub"` + // +required Spokes []Spoke `json:"spokes"` + // +kubebuilder:default:={} // +optional - RegistrationAuth RegistrationAuth `json:"registrationAuth,omitempty"` + RegistrationAuth RegistrationAuth `json:"registrationAuth,omitzero"` + // +optional AddOnConfigs []AddOnConfig `json:"addOnConfigs,omitempty"` + + // Timeout is the timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. + // +kubebuilder:default:=300 + // +optional + Timeout int `json:"timeout,omitempty"` + + // LogVerbosity is the verbosity of the logs. + // +kubebuilder:validation:Enum=0;1;2;3;4;5;6;7;8;9;10 + // +kubebuilder:default:=0 + // +optional + LogVerbosity int `json:"logVerbosity,omitempty"` } // FleetConfigStatus defines the observed state of FleetConfig. @@ -147,6 +161,16 @@ func (c Condition) Equal(other Condition) bool { // Hub provides specifications for an OCM hub cluster. type Hub struct { + // APIServer is the API server URL for the Hub cluster. If provided, spokes clusters will + // join the hub using this API server instead of the one in the bootstrap kubeconfig. + // Spoke clusters with ForceInternalEndpointLookup set to true will ignore this field. + // +optional + APIServer string `json:"apiServer,omitempty"` + + // Hub cluster CA certificate, optional + // +optional + Ca string `json:"ca,omitempty"` + // ClusterManager configuration. // +optional ClusterManager *ClusterManager `json:"clusterManager,omitempty"` @@ -168,12 +192,6 @@ type Hub struct { // This is an alpha stage flag. // +optional SingletonControlPlane *SingletonControlPlane `json:"singleton,omitempty"` - - // APIServer is the API server URL for the Hub cluster. If provided, the hub will be joined - // using this API server instead of the one in the obtained kubeconfig. This is useful when - // using in-cluster kubeconfig when that kubeconfig would return an incorrect API server URL. - // +optional - APIServer string `json:"apiServer,omitempty"` } // SingletonControlPlane is the configuration for a singleton control plane @@ -240,12 +258,12 @@ type ClusterManager struct { // Resource specifications for all clustermanager-managed containers. // +kubebuilder:default:={} // +optional - Resources ResourceSpec `json:"resources,omitempty"` + Resources ResourceSpec `json:"resources,omitzero"` // Version and image registry details for the cluster manager. // +kubebuilder:default:={} // +optional - Source OCMSource `json:"source,omitempty"` + Source OCMSource `json:"source,omitzero"` // If set, the bootstrap token will used instead of a service account token. // +optional @@ -301,6 +319,17 @@ type SecretReference struct { KubeconfigKey string `json:"kubeconfigKey,omitempty"` } +// ISpoke is an interface that both Spoke and JoinedSpoke implement. +// +kubebuilder:object:generate=false +type ISpoke interface { + GetName() string + GetKubeconfig() Kubeconfig + GetPurgeKlusterletOperator() bool +} + +var _ ISpoke = &Spoke{} +var _ ISpoke = &JoinedSpoke{} + // Spoke provides specifications for joining and potentially upgrading spokes. type Spoke struct { // The name of the spoke cluster. @@ -323,10 +352,6 @@ type Spoke struct { // +required Kubeconfig Kubeconfig `json:"kubeconfig"` - // Hub cluster CA certificate, optional - // +optional - Ca string `json:"ca,omitempty"` - // Proxy CA certificate, optional // +optional ProxyCa string `json:"proxyCa,omitempty"` @@ -338,7 +363,7 @@ type Spoke struct { // Klusterlet configuration. // +kubebuilder:default:={} // +optional - Klusterlet Klusterlet `json:"klusterlet,omitempty"` + Klusterlet Klusterlet `json:"klusterlet,omitzero"` // ClusterARN is the ARN of the spoke cluster. // This field is optionally used for AWS IRSA registration authentication. @@ -350,19 +375,19 @@ type Spoke struct { AddOns []AddOn `json:"addOns,omitempty"` } -// AddOn enables add-on installation on the cluster. -type AddOn struct { - // The name of the add-on being enabled. Must match one of the default or manually configured add-on names. - // +required - ConfigName string `json:"configName"` +// GetName returns the name of the spoke cluster. +func (s *Spoke) GetName() string { + return s.Name +} - // The namespace to install the add-on in. If left empty, installs into the "open-cluster-management-addon" namespace. - // +optional - InstallNamespace string `json:"installNamespace,omitempty"` +// GetKubeconfig returns the kubeconfig for the spoke cluster. +func (s *Spoke) GetKubeconfig() Kubeconfig { + return s.Kubeconfig +} - // Annotations to apply to the add-on. - // +optional - Annotations map[string]string `json:"annotations,omitempty"` +// GetPurgeKlusterletOperator returns the purge klusterlet operator flag. +func (s *Spoke) GetPurgeKlusterletOperator() bool { + return s.Klusterlet.PurgeOperator } // JoinType returns a status condition type indicating that a particular Spoke cluster has joined the Hub. @@ -378,6 +403,21 @@ func (s *Spoke) conditionName() string { return name } +// AddOn enables add-on installation on the cluster. +type AddOn struct { + // The name of the add-on being enabled. Must match one of the default or manually configured add-on names. + // +required + ConfigName string `json:"configName"` + + // The namespace to install the add-on in. If left empty, installs into the "open-cluster-management-addon" namespace. + // +optional + InstallNamespace string `json:"installNamespace,omitempty"` + + // Annotations to apply to the add-on. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + // JoinedSpoke represents a spoke that has been joined to a hub. type JoinedSpoke struct { // The name of the spoke cluster. @@ -393,6 +433,21 @@ type JoinedSpoke struct { PurgeKlusterletOperator bool `json:"purgeKlusterletOperator,omitempty"` } +// GetName returns the name of the joined spoke cluster. +func (j *JoinedSpoke) GetName() string { + return j.Name +} + +// GetKubeconfig returns the kubeconfig for the joined spoke cluster. +func (j *JoinedSpoke) GetKubeconfig() Kubeconfig { + return j.Kubeconfig +} + +// GetPurgeKlusterletOperator returns the purge klusterlet operator flag for the joined spoke cluster. +func (j *JoinedSpoke) GetPurgeKlusterletOperator() bool { + return j.PurgeKlusterletOperator +} + // UnjoinType returns a status condition type indicating that a particular Spoke cluster has been removed from the Hub. func (j *JoinedSpoke) UnjoinType() string { return fmt.Sprintf("spoke-cluster-%s-unjoined", j.conditionName()) @@ -445,7 +500,7 @@ type Klusterlet struct { // External managed cluster kubeconfig, required if using hosted mode. // +optional - ManagedClusterKubeconfig Kubeconfig `json:"managedClusterKubeconfig,omitempty"` + ManagedClusterKubeconfig Kubeconfig `json:"managedClusterKubeconfig,omitzero"` // If true, the klusterlet accesses the managed cluster using the internal endpoint from the public // cluster-info in the managed cluster instead of using managedClusterKubeconfig. @@ -455,7 +510,7 @@ type Klusterlet struct { // Resource specifications for all klusterlet-managed containers. // +kubebuilder:default:={} // +optional - Resources ResourceSpec `json:"resources,omitempty"` + Resources ResourceSpec `json:"resources,omitzero"` // If true, deploy klusterlet in singleton mode, with registration and work agents running in a single pod. // This is an alpha stage flag. @@ -465,7 +520,7 @@ type Klusterlet struct { // Version and image registry details for the klusterlet. // +kubebuilder:default:={} // +optional - Source OCMSource `json:"source,omitempty"` + Source OCMSource `json:"source,omitzero"` } // ResourceSpec defines resource limits and requests for all managed clusters. @@ -565,12 +620,20 @@ type AddOnConfig struct { // FleetConfig is the Schema for the fleetconfigs API. type FleetConfig struct { metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` + metav1.ObjectMeta `json:"metadata,omitzero"` - Spec FleetConfigSpec `json:"spec,omitempty"` + Spec FleetConfigSpec `json:"spec,omitzero"` Status FleetConfigStatus `json:"status,omitempty"` } +// BaseArgs returns the base arguments for all clusteradm commands. +func (m *FleetConfig) BaseArgs() []string { + return []string{ + fmt.Sprintf("--timeout=%d", m.Spec.Timeout), + fmt.Sprintf("--v=%d", m.Spec.LogVerbosity), + } +} + // GetCondition gets the condition with the supplied type, if it exists. func (m *FleetConfig) GetCondition(cType string) *Condition { return m.Status.GetCondition(cType) @@ -586,7 +649,7 @@ func (m *FleetConfig) SetConditions(cover bool, c ...Condition) { // FleetConfigList contains a list of FleetConfig. type FleetConfigList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.ListMeta `json:"metadata,omitzero"` Items []FleetConfig `json:"items"` } diff --git a/fleetconfig-controller/charts/fleetconfig-controller/README.md b/fleetconfig-controller/charts/fleetconfig-controller/README.md index f3e27628..128191f7 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/README.md +++ b/fleetconfig-controller/charts/fleetconfig-controller/README.md @@ -66,6 +66,8 @@ Refer to the [Multicluster Controlplane configuration](https://github.com/open-c | Name | Description | Value | | --------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- | | `fleetConfig.enabled` | Whether to create a FleetConfig resource. | `true` | +| `fleetConfig.timeout` | Timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. | `300` | +| `fleetConfig.logVerbosity` | Log verbosity. Valid values: 0-10, 0 is the least verbose, 10 is the most verbose. | `0` | | `fleetConfig.spokeAnnotations` | Global annotations to apply to all spoke clusters. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on every spoke and subsequently to the ManagedClusters on the hub. Per-spoke annotations take precedence over the global annotations. | `{}` | | `fleetConfig.spokeFeatureGates.ClusterClaim` | ClusterClaim feature gate (ALPHA - default=true). Enables cluster claim functionality. | `true` | | `fleetConfig.spokeFeatureGates.RawFeedbackJsonString` | RawFeedbackJsonString feature gate (ALPHA - default=false). Enables raw feedback JSON string support. | `true` | @@ -79,6 +81,8 @@ Refer to the [Multicluster Controlplane configuration](https://github.com/open-c | `fleetConfig.hub.clusterManager.resources` | Resource specifications for all clustermanager-managed containers. | `{}` | | `fleetConfig.hub.createNamespace` | If true, create open-cluster-management namespace, otherwise use existing one. | `true` | | `fleetConfig.hub.force` | If set, the hub will be reinitialized. | `false` | +| `fleetConfig.hub.apiServer` | The Hub cluster's API Server. Required when configuring an EKS, or GKE FleetConfig. | `""` | +| `fleetConfig.hub.ca` | The Hub cluster's CA certificate. Optional. | `""` | | `fleetConfig.hub.kubeconfig.context` | The context to use in the kubeconfig file. Leave empty to use the current context. | `""` | | `fleetConfig.hub.kubeconfig.inCluster` | If set, the kubeconfig will be read from the cluster. Only applicable for same-cluster operations. | `true` | | `fleetConfig.spokes[0].name` | Name of the spoke cluster. | `hub-as-spoke` | @@ -86,7 +90,6 @@ Refer to the [Multicluster Controlplane configuration](https://github.com/open-c | `fleetConfig.spokes[0].syncLabels` | If true, sync the labels from klusterlet to all agent resources. | `false` | | `fleetConfig.spokes[0].kubeconfig.context` | The context to use in the kubeconfig file. Leave empty to use the current context. | `""` | | `fleetConfig.spokes[0].kubeconfig.inCluster` | If set, the kubeconfig will be read from the cluster. Only applicable for same-cluster operations. | `true` | -| `fleetConfig.spokes[0].ca` | Hub cluster CA certificate, optional. | `""` | | `fleetConfig.spokes[0].proxyCa` | Proxy CA certificate, optional. | `""` | | `fleetConfig.spokes[0].proxyUrl` | URL of a forward proxy server used by agents to connect to the Hub cluster, optional. | `""` | | `fleetConfig.spokes[0].klusterlet.annotations` | Annotations to apply to the spoke cluster. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on the spoke and subsequently to the ManagedCluster on the hub. These annotations take precedence over the global spoke annotations. | `{}` | @@ -112,7 +115,7 @@ Refer to the [Multicluster Controlplane configuration](https://github.com/open-c | `replicas` | fleetconfig-controller replica count | `1` | | `imageRegistry` | Image registry | `""` | | `image.repository` | Image repository | `quay.io/open-cluster-management/fleetconfig-controller` | -| `image.tag` | Image tag | `v0.0.6` | +| `image.tag` | Image tag | `v0.0.7` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `imagePullSecrets` | Image pull secrets | `[]` | | `serviceAccount.annotations` | Annotations to add to the service account | `{}` | diff --git a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io-crds.yaml b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io-crds.yaml index 1bf413b5..3f11dc2f 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io-crds.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/crds/fleetconfig.open-cluster-management.io-crds.yaml @@ -91,9 +91,12 @@ spec: properties: apiServer: description: |- - APIServer is the API server URL for the Hub cluster. If provided, the hub will be joined - using this API server instead of the one in the obtained kubeconfig. This is useful when - using in-cluster kubeconfig when that kubeconfig would return an incorrect API server URL. + APIServer is the API server URL for the Hub cluster. If provided, spokes clusters will + join the hub using this API server instead of the one in the bootstrap kubeconfig. + Spoke clusters with ForceInternalEndpointLookup set to true will ignore this field. + type: string + ca: + description: Hub cluster CA certificate, optional type: string clusterManager: description: ClusterManager configuration. @@ -270,6 +273,22 @@ spec: required: - kubeconfig type: object + logVerbosity: + default: 0 + description: LogVerbosity is the verbosity of the logs. + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + type: integer registrationAuth: default: {} description: RegistrationAuth provides specifications for registration @@ -329,9 +348,6 @@ spec: - configName type: object type: array - ca: - description: Hub cluster CA certificate, optional - type: string clusterARN: description: |- ClusterARN is the ARN of the spoke cluster. @@ -545,6 +561,11 @@ spec: - name type: object type: array + timeout: + default: 300 + description: Timeout is the timeout in seconds for all clusteradm + operations, including init, accept, join, upgrade, etc. + type: integer required: - hub - spokes @@ -666,6 +687,9 @@ spec: phase: type: string type: object + required: + - metadata + - spec type: object served: true storage: true diff --git a/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml b/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml index 2d73fb82..8920fb61 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/templates/fleetconfig.yaml @@ -7,6 +7,8 @@ metadata: name: fleetconfig namespace: {{ .Release.Namespace }} spec: + timeout: {{ .Values.fleetConfig.timeout }} + logVerbosity: {{ .Values.fleetConfig.logVerbosity }} {{- with .Values.fleetConfig.registrationAuth }} registrationAuth: driver: {{ .driver | quote }} @@ -36,9 +38,8 @@ spec: createNamespace: {{ .Values.fleetConfig.hub.createNamespace }} force: {{ .Values.fleetConfig.hub.force }} kubeconfig: {{- toYaml .Values.fleetConfig.hub.kubeconfig | nindent 6 }} - {{- with .Values.fleetConfig.hub.apiServer }} - apiServer: {{ . | quote }} - {{- end }} + apiServer: {{ .Values.fleetConfig.hub.apiServer | quote }} + ca: {{ .Values.fleetConfig.hub.ca | quote }} spokes: {{- $spokeFeatureGates := .Values.fleetConfig.spokeFeatureGates }} {{- range .Values.fleetConfig.spokes }} @@ -46,7 +47,6 @@ spec: createNamespace: {{ .createNamespace }} syncLabels: {{ .syncLabels }} kubeconfig: {{- toYaml .kubeconfig | nindent 8 }} - ca: {{ .ca | quote }} proxyCa: {{ .proxyCa | quote }} proxyUrl: {{ .proxyUrl | quote }} {{- if .clusterARN }} diff --git a/fleetconfig-controller/charts/fleetconfig-controller/values.yaml b/fleetconfig-controller/charts/fleetconfig-controller/values.yaml index abb0030b..54178638 100644 --- a/fleetconfig-controller/charts/fleetconfig-controller/values.yaml +++ b/fleetconfig-controller/charts/fleetconfig-controller/values.yaml @@ -10,6 +10,10 @@ fleetConfig: ## @param fleetConfig.enabled Whether to create a FleetConfig resource. enabled: true + ## @param fleetConfig.timeout Timeout in seconds for all clusteradm operations, including init, accept, join, upgrade, etc. + timeout: 300 + ## @param fleetConfig.logVerbosity Log verbosity. Valid values: 0-10, 0 is the least verbose, 10 is the most verbose. + logVerbosity: 0 ## @param fleetConfig.spokeAnnotations Global annotations to apply to all spoke clusters. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on every spoke and subsequently to the ManagedClusters on the hub. Per-spoke annotations take precedence over the global annotations. spokeAnnotations: {} ## @descriptionStart @@ -104,8 +108,10 @@ fleetConfig: createNamespace: true ## @param fleetConfig.hub.force If set, the hub will be reinitialized. force: false - ## The Hub clusters API Server. Required when configuring an EKS, or GKE FleetConfig. - # apiServer: "https://" + ## @param fleetConfig.hub.apiServer The Hub cluster's API Server. Required when configuring an EKS, or GKE FleetConfig. + apiServer: "" + ## @param fleetConfig.hub.ca The Hub cluster's CA certificate. Optional. + ca: "" ## Kubeconfig details for the Hub cluster. kubeconfig: ## @param fleetConfig.hub.kubeconfig.context The context to use in the kubeconfig file. Leave empty to use the current context. @@ -156,7 +162,6 @@ fleetConfig: ## @param fleetConfig.spokes[0].syncLabels If true, sync the labels from klusterlet to all agent resources. ## @param fleetConfig.spokes[0].kubeconfig.context The context to use in the kubeconfig file. Leave empty to use the current context. ## @param fleetConfig.spokes[0].kubeconfig.inCluster If set, the kubeconfig will be read from the cluster. Only applicable for same-cluster operations. - ## @param fleetConfig.spokes[0].ca Hub cluster CA certificate, optional. ## @param fleetConfig.spokes[0].proxyCa Proxy CA certificate, optional. ## @param fleetConfig.spokes[0].proxyUrl URL of a forward proxy server used by agents to connect to the Hub cluster, optional. ## @param fleetConfig.spokes[0].klusterlet.annotations Annotations to apply to the spoke cluster. If not present, the 'agent.open-cluster-management.io/' prefix is added to each key. Each annotation is added to klusterlet.spec.registrationConfiguration.clusterAnnotations on the spoke and subsequently to the ManagedCluster on the hub. These annotations take precedence over the global spoke annotations. @@ -185,7 +190,6 @@ fleetConfig: # namespace: "" # ## @param fleetConfig.spokes[0].kubeconfig.secretReference.kubeconfigKey The map key to access the kubeconfig. # kubeconfigKey: "kubeconfig" - ca: "" proxyCa: "" proxyUrl: "" ## Configuration for the Klusterlet on the Spoke cluster. @@ -273,7 +277,7 @@ imageRegistry: "" ## @param image.pullPolicy Image pull policy image: repository: quay.io/open-cluster-management/fleetconfig-controller - tag: v0.0.6 + tag: v0.0.7 pullPolicy: IfNotPresent ## @param imagePullSecrets Image pull secrets diff --git a/fleetconfig-controller/config/crd/bases/fleetconfig.open-cluster-management.io_fleetconfigs.yaml b/fleetconfig-controller/config/crd/bases/fleetconfig.open-cluster-management.io_fleetconfigs.yaml index efca7953..0807daad 100644 --- a/fleetconfig-controller/config/crd/bases/fleetconfig.open-cluster-management.io_fleetconfigs.yaml +++ b/fleetconfig-controller/config/crd/bases/fleetconfig.open-cluster-management.io_fleetconfigs.yaml @@ -81,9 +81,12 @@ spec: properties: apiServer: description: |- - APIServer is the API server URL for the Hub cluster. If provided, the hub will be joined - using this API server instead of the one in the obtained kubeconfig. This is useful when - using in-cluster kubeconfig when that kubeconfig would return an incorrect API server URL. + APIServer is the API server URL for the Hub cluster. If provided, spokes clusters will + join the hub using this API server instead of the one in the bootstrap kubeconfig. + Spoke clusters with ForceInternalEndpointLookup set to true will ignore this field. + type: string + ca: + description: Hub cluster CA certificate, optional type: string clusterManager: description: ClusterManager configuration. @@ -260,6 +263,22 @@ spec: required: - kubeconfig type: object + logVerbosity: + default: 0 + description: LogVerbosity is the verbosity of the logs. + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + type: integer registrationAuth: default: {} description: RegistrationAuth provides specifications for registration @@ -319,9 +338,6 @@ spec: - configName type: object type: array - ca: - description: Hub cluster CA certificate, optional - type: string clusterARN: description: |- ClusterARN is the ARN of the spoke cluster. @@ -535,6 +551,11 @@ spec: - name type: object type: array + timeout: + default: 300 + description: Timeout is the timeout in seconds for all clusteradm + operations, including init, accept, join, upgrade, etc. + type: integer required: - hub - spokes @@ -656,6 +677,9 @@ spec: phase: type: string type: object + required: + - metadata + - spec type: object served: true storage: true diff --git a/fleetconfig-controller/internal/controller/addon.go b/fleetconfig-controller/internal/controller/addon.go index b1207cae..8b8252e5 100644 --- a/fleetconfig-controller/internal/controller/addon.go +++ b/fleetconfig-controller/internal/controller/addon.go @@ -105,12 +105,12 @@ func handleAddonCreate(ctx context.Context, kClient client.Client, fc *v1alpha1. return errors.Wrapf(err, "could not load configuration for add-on %s version %s", a.Name, a.Version) } - args := []string{ + args := append([]string{ addon, create, a.Name, fmt.Sprintf("--version=%s", a.Version), - } + }, fc.BaseArgs()...) // Extract manifest configuration from ConfigMap // validation was already done by the webhook, so simply check if raw manifests are provided and if not, use the URL. @@ -151,11 +151,12 @@ func handleAddonCreate(ctx context.Context, kClient client.Client, fc *v1alpha1. } cmd := exec.Command(clusteradm, args...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon create' to complete...") + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm addon create' to complete...") if err != nil { + out := append(stdout, stderr...) return fmt.Errorf("failed to create addon: %v, output: %s", err, string(out)) } - logger.V(0).Info("created addon", "AddOnTemplate", a.Name) + logger.V(0).Info("created addon", "AddOnTemplate", a.Name, "output", string(stdout)) } return nil } diff --git a/fleetconfig-controller/internal/controller/hub.go b/fleetconfig-controller/internal/controller/hub.go index a60696de..b72c5079 100644 --- a/fleetconfig-controller/internal/controller/hub.go +++ b/fleetconfig-controller/internal/controller/hub.go @@ -111,11 +111,12 @@ func initializeHub(ctx context.Context, kClient client.Client, fc *v1alpha1.Flee logger := log.FromContext(ctx) logger.V(0).Info("initHub", "fleetconfig", fc.Name) - initArgs := []string{"init", + initArgs := append([]string{ + "init", fmt.Sprintf("--create-namespace=%t", fc.Spec.Hub.CreateNamespace), fmt.Sprintf("--force=%t", fc.Spec.Hub.Force), "--wait=true", - } + }, fc.BaseArgs()...) if fc.Spec.RegistrationAuth.Driver == v1alpha1.AWSIRSARegistrationDriver { raArgs := []string{ @@ -182,11 +183,12 @@ func initializeHub(ctx context.Context, kClient client.Client, fc *v1alpha1.Flee logger.V(1).Info("clusteradm init", "args", initArgs) cmd := exec.Command(clusteradm, initArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm init' to complete...") + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm init' to complete...") if err != nil { + out := append(stdout, stderr...) return fmt.Errorf("failed to init hub: %v, output: %s", err, string(out)) } - logger.V(1).Info("hub initialized", "output", string(out)) + logger.V(1).Info("hub initialized", "output", string(stdout)) return nil } @@ -253,22 +255,25 @@ func upgradeHub(ctx context.Context, fc *v1alpha1.FleetConfig) error { logger := log.FromContext(ctx) logger.V(0).Info("upgradeHub", "fleetconfig", fc.Name) - upgradeArgs := []string{"upgrade", "clustermanager", + upgradeArgs := append([]string{ + "upgrade", "clustermanager", "--bundle-version", fc.Spec.Hub.ClusterManager.Source.BundleVersion, "--image-registry", fc.Spec.Hub.ClusterManager.Source.Registry, "--wait=true", - } + }, fc.BaseArgs()...) + logger.V(1).Info("clusteradm upgrade clustermanager", "args", upgradeArgs) cmd := exec.Command(clusteradm, upgradeArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm upgrade clustermanager' to complete...") + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm upgrade clustermanager' to complete...") if err != nil { + out := append(stdout, stderr...) return fmt.Errorf( "failed to upgrade hub clustermanager to %s: %v, output: %s", fc.Spec.Hub.ClusterManager.Source.BundleVersion, err, string(out), ) } - logger.V(1).Info("clustermanager upgraded", "output", string(out)) + logger.V(1).Info("clustermanager upgraded", "output", string(stdout)) return nil } @@ -294,19 +299,21 @@ func cleanHub(ctx context.Context, kClient client.Client, hubKubeconfig []byte, return err } - cleanArgs := []string{"clean", + cleanArgs := append([]string{ + "clean", // name is omitted, as the default name, 'cluster-manager', is always used fmt.Sprintf("--purge-operator=%t", fc.Spec.Hub.ClusterManager.PurgeOperator), - } + }, fc.BaseArgs()...) + logger.V(1).Info("clusteradm clean", "args", cleanArgs) cmd := exec.Command(clusteradm, cleanArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm clean' to complete...") + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm clean' to complete...") if err != nil { + out := append(stdout, stderr...) return fmt.Errorf("failed to clean hub cluster: %v, output: %s", err, string(out)) } - - logger.V(1).Info("hub cleaned", "output", string(out)) + logger.V(1).Info("hub cleaned", "output", string(stdout)) return nil } diff --git a/fleetconfig-controller/internal/controller/spoke.go b/fleetconfig-controller/internal/controller/spoke.go index 78b3266d..5cdb6109 100644 --- a/fleetconfig-controller/internal/controller/spoke.go +++ b/fleetconfig-controller/internal/controller/spoke.go @@ -52,7 +52,7 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet if !slices.ContainsFunc(fc.Spec.Spokes, func(spoke v1alpha1.Spoke) bool { return spoke.Name == js.Name && reflect.DeepEqual(spoke.Kubeconfig, js.Kubeconfig) }) { - err = deregisterSpoke(ctx, kClient, hubKubeconfig, &js) + err = deregisterSpoke(ctx, kClient, hubKubeconfig, fc, &js) if err != nil { fc.SetConditions(true, v1alpha1.NewCondition( err.Error(), js.UnjoinType(), metav1.ConditionFalse, metav1.ConditionTrue, @@ -80,14 +80,14 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet if err != nil { return fmt.Errorf("failed to get join token: %w", err) } - if err := joinSpoke(ctx, kClient, fc.Spec, spoke, tokenMeta); err != nil { + if err := joinSpoke(ctx, kClient, fc, spoke, tokenMeta); err != nil { fc.SetConditions(true, v1alpha1.NewCondition( err.Error(), spoke.JoinType(), metav1.ConditionFalse, metav1.ConditionTrue, )) continue } // run `clusteradm accept` even if auto acceptance is enabled, as it's just a no-op if the spoke is already accepted - if err := acceptCluster(ctx, spoke.Name); err != nil { + if err := acceptCluster(ctx, fc, spoke.Name, false); err != nil { fc.SetConditions(true, v1alpha1.NewCondition( err.Error(), spoke.JoinType(), metav1.ConditionFalse, metav1.ConditionTrue, )) @@ -110,6 +110,12 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet fc.SetConditions(true, v1alpha1.NewCondition( msg, spoke.JoinType(), metav1.ConditionFalse, metav1.ConditionTrue, )) + // Re-accept all join requests for the spoke cluster. This is a workaround for the issue + // that duplicate CSRs are sometimes created for the same spoke cluster when the klusterlet + // controller bounces the klusterlet registration agent. + if err := acceptCluster(ctx, fc, spoke.Name, true); err != nil { + logger.Error(err, "failed to accept spoke cluster join request(s)", "spoke", spoke.Name) + } continue } @@ -148,7 +154,7 @@ func handleSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Fleet return fmt.Errorf("failed to check if spoke cluster needs upgrade: %w", err) } if upgrade { - if err := upgradeSpoke(ctx, kClient, spoke); err != nil { + if err := upgradeSpoke(ctx, kClient, fc, spoke); err != nil { return fmt.Errorf("failed to upgrade spoke cluster %s: %w", spoke.Name, err) } } @@ -187,26 +193,31 @@ func getJoinedCondition(managedCluster *clusterv1.ManagedCluster) *metav1.Condit } // acceptCluster accepts a Spoke cluster's join request via 'clusteradm accept' -func acceptCluster(ctx context.Context, name string) error { +func acceptCluster(ctx context.Context, fc *v1alpha1.FleetConfig, name string, skipApproveCheck bool) error { logger := log.FromContext(ctx) logger.V(0).Info("acceptCluster") - acceptArgs := []string{"accept", "--cluster", name} + acceptArgs := append([]string{ + "accept", "--cluster", name, + }, fc.BaseArgs()...) + logger.V(1).Info("clusteradm accept", "args", acceptArgs) // TODO: handle other args: // --requesters=[]: // Common Names of agents to be approved. - // --skip-approve-check=false: - // If set, then skip check and approve csr directly. + if skipApproveCheck { + acceptArgs = append(acceptArgs, "--skip-approve-check") + } cmd := exec.Command(clusteradm, acceptArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm accept' to complete for spoke %s...", name)) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm accept' to complete for spoke %s...", name)) if err != nil { + out := append(stdout, stderr...) return fmt.Errorf("failed to accept spoke cluster join request: %v, output: %s", err, string(out)) } - logger.V(1).Info("spoke cluster join request accepted", "output", string(out)) + logger.V(1).Info("spoke cluster join request accepted", "output", string(stdout)) return nil } @@ -221,7 +232,10 @@ func getToken(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConf logger := log.FromContext(ctx) logger.V(0).Info("getToken") - tokenArgs := []string{"get", "token", "--output=json"} + tokenArgs := append([]string{ + "get", "token", "--output=json", + }, fc.BaseArgs()...) + if fc.Spec.Hub.ClusterManager != nil { tokenArgs = append(tokenArgs, fmt.Sprintf("--use-bootstrap-token=%t", fc.Spec.Hub.ClusterManager.UseBootstrapToken)) } @@ -232,28 +246,31 @@ func getToken(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConf if err != nil { return nil, fmt.Errorf("failed to prepare kubeconfig: %w", err) } + logger.V(1).Info("clusteradm get token", "args", tokenArgs) cmd := exec.Command(clusteradm, tokenArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm get token' to complete...") + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, "waiting for 'clusteradm get token' to complete...") if err != nil { + out := append(stdout, stderr...) return nil, fmt.Errorf("failed to get join token: %v, output: %s", err, string(out)) } - logger.V(1).Info("got join token", "output", string(out)) + logger.V(1).Info("got join token", "output", string(stdout)) tokenMeta := &tokenMeta{} - if err := json.Unmarshal(out, &tokenMeta); err != nil { + if err := json.Unmarshal(stdout, &tokenMeta); err != nil { return nil, fmt.Errorf("failed to unmarshal join token: %w", err) } return tokenMeta, nil } // joinSpoke joins a Spoke cluster to the Hub cluster via 'clusteradm join' -func joinSpoke(ctx context.Context, kClient client.Client, spec v1alpha1.FleetConfigSpec, spoke v1alpha1.Spoke, tokenMeta *tokenMeta) error { +func joinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConfig, spoke v1alpha1.Spoke, tokenMeta *tokenMeta) error { logger := log.FromContext(ctx) logger.V(0).Info("joinSpoke", "spoke", spoke.Name) - joinArgs := []string{"join", + joinArgs := append([]string{ + "join", "--cluster-name", spoke.Name, fmt.Sprintf("--create-namespace=%t", spoke.CreateNamespace), fmt.Sprintf("--enable-sync-labels=%t", spoke.SyncLabels), @@ -267,7 +284,7 @@ func joinSpoke(ctx context.Context, kClient client.Client, spec v1alpha1.FleetCo // source args "--bundle-version", spoke.Klusterlet.Source.BundleVersion, "--image-registry", spoke.Klusterlet.Source.Registry, - } + }, fc.BaseArgs()...) for k, v := range spoke.Klusterlet.Annotations { joinArgs = append(joinArgs, fmt.Sprintf("--klusterlet-annotation=%s=%s", k, v)) @@ -276,19 +293,31 @@ func joinSpoke(ctx context.Context, kClient client.Client, spec v1alpha1.FleetCo // resources args joinArgs = append(joinArgs, common.PrepareResources(spoke.Klusterlet.Resources)...) - // Use hub API server from spec if provided, otherwise fall back to tokenMeta - if spec.Hub.APIServer != "" { - joinArgs = append(joinArgs, "--hub-apiserver", spec.Hub.APIServer) + // Use hub API server from spec if provided and not forced to use internal endpoint, + // otherwise fall back to the hub API server from the tokenMeta + if fc.Spec.Hub.APIServer != "" && !spoke.Klusterlet.ForceInternalEndpointLookup { + joinArgs = append(joinArgs, "--hub-apiserver", fc.Spec.Hub.APIServer) } else if tokenMeta.HubAPIServer != "" { joinArgs = append(joinArgs, "--hub-apiserver", tokenMeta.HubAPIServer) } - if spec.RegistrationAuth.Driver == v1alpha1.AWSIRSARegistrationDriver { + if fc.Spec.Hub.Ca != "" { + caFile, caCleanup, err := file.TmpFile([]byte(fc.Spec.Hub.Ca), "ca") + if caCleanup != nil { + defer caCleanup() + } + if err != nil { + return fmt.Errorf("failed to write hub CA to disk: %w", err) + } + joinArgs = append([]string{fmt.Sprintf("--ca-file=%s", caFile)}, joinArgs...) + } + + if fc.Spec.RegistrationAuth.Driver == v1alpha1.AWSIRSARegistrationDriver { raArgs := []string{ - fmt.Sprintf("--registration-auth=%s", spec.RegistrationAuth.Driver), + fmt.Sprintf("--registration-auth=%s", fc.Spec.RegistrationAuth.Driver), } - if spec.RegistrationAuth.HubClusterARN != "" { - raArgs = append(raArgs, fmt.Sprintf("--hub-cluster-arn=%s", spec.RegistrationAuth.HubClusterARN)) + if fc.Spec.RegistrationAuth.HubClusterARN != "" { + raArgs = append(raArgs, fmt.Sprintf("--hub-cluster-arn=%s", fc.Spec.RegistrationAuth.HubClusterARN)) } if spoke.ClusterARN != "" { raArgs = append(raArgs, fmt.Sprintf("--managed-cluster-arn=%s", spoke.ClusterARN)) @@ -315,16 +344,6 @@ func joinSpoke(ctx context.Context, kClient client.Client, spec v1alpha1.FleetCo joinArgs = append(joinArgs, "--managed-cluster-kubeconfig", mgdKcfg) } - if spoke.Ca != "" { - caFile, caCleanup, err := file.TmpFile([]byte(spoke.Ca), "ca") - if caCleanup != nil { - defer caCleanup() - } - if err != nil { - return fmt.Errorf("failed to write CA to disk: %w", err) - } - joinArgs = append([]string{fmt.Sprintf("--ca-file=%s", caFile)}, joinArgs...) - } if spoke.ProxyCa != "" { proxyCaFile, proxyCaCleanup, err := file.TmpFile([]byte(spoke.ProxyCa), "proxy-ca") if proxyCaCleanup != nil { @@ -350,11 +369,12 @@ func joinSpoke(ctx context.Context, kClient client.Client, spec v1alpha1.FleetCo logger.V(1).Info("clusteradm join", "args", joinArgs) cmd := exec.Command(clusteradm, joinArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm join' to complete for spoke %s...", spoke.Name)) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm join' to complete for spoke %s...", spoke.Name)) if err != nil { + out := append(stdout, stderr...) return fmt.Errorf("clusteradm join command failed for spoke %s: %v, output: %s", spoke.Name, err, string(out)) } - logger.V(1).Info("successfully requested spoke cluster join", "output", string(out)) + logger.V(1).Info("successfully requested spoke cluster join", "output", string(stdout)) return nil } @@ -411,15 +431,16 @@ func spokeNeedsUpgrade(ctx context.Context, kClient client.Client, spoke v1alpha } // upgradeSpoke upgrades the Spoke cluster's klusterlet to the specified version -func upgradeSpoke(ctx context.Context, kClient client.Client, spoke v1alpha1.Spoke) error { +func upgradeSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConfig, spoke v1alpha1.Spoke) error { logger := log.FromContext(ctx) logger.V(0).Info("upgradeSpoke", "spoke", spoke.Name) - upgradeArgs := []string{"upgrade", "klusterlet", + upgradeArgs := append([]string{ + "upgrade", "klusterlet", "--bundle-version", spoke.Klusterlet.Source.BundleVersion, "--image-registry", spoke.Klusterlet.Source.Registry, "--wait=true", - } + }, fc.BaseArgs()...) upgradeArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, spoke.Kubeconfig, upgradeArgs) if cleanupKcfg != nil { @@ -428,17 +449,19 @@ func upgradeSpoke(ctx context.Context, kClient client.Client, spoke v1alpha1.Spo if err != nil { return err } + logger.V(1).Info("clusteradm upgrade klusterlet", "args", upgradeArgs) cmd := exec.Command(clusteradm, upgradeArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm upgrade klusterlet' to complete for spoke %s...", spoke.Name)) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm upgrade klusterlet' to complete for spoke %s...", spoke.Name)) if err != nil { + out := append(stdout, stderr...) return fmt.Errorf( "failed to upgrade klusterlet on spoke cluster %s to %s: %v, output: %s", spoke.Name, spoke.Klusterlet.Source.BundleVersion, err, string(out), ) } - logger.V(1).Info("klusterlet upgraded", "output", string(out)) + logger.V(1).Info("klusterlet upgraded", "output", string(stdout)) return nil } @@ -457,7 +480,7 @@ func cleanupSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Flee continue } - if err := unjoinSpoke(ctx, kClient, spoke.Kubeconfig, spoke.Name, spoke.Klusterlet.PurgeOperator); err != nil { + if err := unjoinSpoke(ctx, kClient, fc, &spoke); err != nil { return err } } @@ -466,35 +489,38 @@ func cleanupSpokes(ctx context.Context, kClient client.Client, fc *v1alpha1.Flee } // unjoinSpoke unjoins a single spoke cluster from the Hub cluster via `clusteradm unjoin` -func unjoinSpoke(ctx context.Context, kClient client.Client, kubeconfig v1alpha1.Kubeconfig, spokeName string, purgeOperator bool) error { +func unjoinSpoke(ctx context.Context, kClient client.Client, fc *v1alpha1.FleetConfig, spoke v1alpha1.ISpoke) error { logger := log.FromContext(ctx) - unjoinArgs := []string{ + unjoinArgs := append([]string{ "unjoin", - "--cluster-name", spokeName, - fmt.Sprintf("--purge-operator=%t", purgeOperator), - } - unjoinArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, kubeconfig, unjoinArgs) + "--cluster-name", spoke.GetName(), + fmt.Sprintf("--purge-operator=%t", spoke.GetPurgeKlusterletOperator()), + }, fc.BaseArgs()...) + + unjoinArgs, cleanupKcfg, err := common.PrepareKubeconfig(ctx, kClient, spoke.GetKubeconfig(), unjoinArgs) if cleanupKcfg != nil { defer cleanupKcfg() } if err != nil { - return fmt.Errorf("failed to unjoin spoke cluster %s: %w", spokeName, err) + return fmt.Errorf("failed to unjoin spoke cluster %s: %w", spoke.GetName(), err) } + logger.V(1).Info("clusteradm unjoin", "args", unjoinArgs) cmd := exec.Command(clusteradm, unjoinArgs...) - out, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm unjoin' to complete for spoke %s...", spokeName)) + stdout, stderr, err := exec_utils.CmdWithLogs(ctx, cmd, fmt.Sprintf("waiting for 'clusteradm unjoin' to complete for spoke %s...", spoke.GetName())) + out := append(stdout, stderr...) if err != nil || strings.Contains(string(out), amwExistsError) { - return fmt.Errorf("failed to unjoin spoke cluster %s: %v, output: %s", spokeName, err, string(out)) + return fmt.Errorf("failed to unjoin spoke cluster %s: %v, output: %s", spoke.GetName(), err, string(out)) } - logger.V(1).Info("spoke cluster unjoined", "output", string(out)) + logger.V(1).Info("spoke cluster unjoined", "output", string(stdout)) return nil } // deregisterSpoke fully deregisters a spoke cluster, including cleaning up all relevant resources on the hub -func deregisterSpoke(ctx context.Context, kClient client.Client, hubKubeconfig []byte, spoke *v1alpha1.JoinedSpoke) error { +func deregisterSpoke(ctx context.Context, kClient client.Client, hubKubeconfig []byte, fc *v1alpha1.FleetConfig, spoke *v1alpha1.JoinedSpoke) error { logger := log.FromContext(ctx) clusterC, err := common.ClusterClient(hubKubeconfig) if err != nil { @@ -524,7 +550,7 @@ func deregisterSpoke(ctx context.Context, kClient client.Client, hubKubeconfig [ } // unjoin spoke - if err := unjoinSpoke(ctx, kClient, spoke.Kubeconfig, spoke.Name, spoke.PurgeKlusterletOperator); err != nil { + if err := unjoinSpoke(ctx, kClient, fc, spoke); err != nil { return err } diff --git a/fleetconfig-controller/internal/exec/exec.go b/fleetconfig-controller/internal/exec/exec.go index 91eab250..83b9fe59 100644 --- a/fleetconfig-controller/internal/exec/exec.go +++ b/fleetconfig-controller/internal/exec/exec.go @@ -2,6 +2,7 @@ package exec import ( + "bytes" "context" "os/exec" "time" @@ -13,20 +14,27 @@ const logInterval = 5 * time.Second // CmdWithLogs executes the passed in command in a goroutine while the main thread waits for the command to complete. // The main thread logs a message at regular intervals until the command completes. -func CmdWithLogs(ctx context.Context, cmd *exec.Cmd, message string) ([]byte, error) { +// Returns stdout, stderr, and error separately. +func CmdWithLogs(ctx context.Context, cmd *exec.Cmd, message string) ([]byte, []byte, error) { logger := log.FromContext(ctx) resultCh := make(chan struct { - out []byte - err error + stdout []byte + stderr []byte + err error }, 1) go func() { - out, err := cmd.CombinedOutput() + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() resultCh <- struct { - out []byte - err error - }{out: out, err: err} + stdout []byte + stderr []byte + err error + }{stdout: stdout.Bytes(), stderr: stderr.Bytes(), err: err} }() ticker := time.NewTicker(logInterval) @@ -36,9 +44,9 @@ func CmdWithLogs(ctx context.Context, cmd *exec.Cmd, message string) ([]byte, er select { case <-ctx.Done(): _ = cmd.Process.Kill() - return nil, ctx.Err() + return nil, nil, ctx.Err() case res := <-resultCh: - return res.out, res.err + return res.stdout, res.stderr, res.err case <-ticker.C: logger.V(1).Info(message) } diff --git a/fleetconfig-controller/internal/exec/exec_test.go b/fleetconfig-controller/internal/exec/exec_test.go index cd712649..4ee635aa 100644 --- a/fleetconfig-controller/internal/exec/exec_test.go +++ b/fleetconfig-controller/internal/exec/exec_test.go @@ -50,13 +50,14 @@ func TestCmdWithLogs(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), tt.timeout) defer cancel() - out, err := CmdWithLogs(ctx, tt.cmdFunc(), "waiting for command...") + stdout, stderr, err := CmdWithLogs(ctx, tt.cmdFunc(), "waiting for command...") if (err != nil) != tt.wantErr { t.Errorf("CmdWithLogs() error = %v, wantErr %v", err, tt.wantErr) } - outStr := strings.TrimSpace(string(out)) + combinedOut := string(stdout) + string(stderr) + outStr := strings.TrimSpace(combinedOut) if tt.wantOut != "" && !strings.Contains(outStr, tt.wantOut) { t.Errorf("output = %q, want to contain %q", outStr, tt.wantOut) } diff --git a/fleetconfig-controller/test/data/fleetconfig-values.yaml b/fleetconfig-controller/test/data/fleetconfig-values.yaml index ce020e38..5540c1d5 100644 --- a/fleetconfig-controller/test/data/fleetconfig-values.yaml +++ b/fleetconfig-controller/test/data/fleetconfig-values.yaml @@ -1,4 +1,6 @@ fleetConfig: + timeout: 300 + logVerbosity: 5 spokeAnnotations: foo: "not-bar" baz: "quux"