diff --git a/Makefile b/Makefile index 7e7547e3b69a..148783f2fa1c 100644 --- a/Makefile +++ b/Makefile @@ -204,11 +204,14 @@ TILT_PREPARE_BIN := tilt-prepare TILT_PREPARE := $(abspath $(TOOLS_BIN_DIR)/$(TILT_PREPARE_BIN)) # Define Docker related variables. Releases should modify and double check these vars. -REGISTRY ?= gcr.io/$(shell gcloud config get-value project) +REGISTRY ?= gcr.io/xxxxxx + +# For string inside YAML files (in "out" directory) PROD_REGISTRY ?= registry.k8s.io/cluster-api +# For string inside YAML files (in "out" directory) STAGING_REGISTRY ?= gcr.io/k8s-staging-cluster-api -STAGING_BUCKET ?= k8s-staging-cluster-api +#STAGING_BUCKET ?= k8s-staging-cluster-api # core IMAGE_NAME ?= cluster-api-controller @@ -242,7 +245,7 @@ CAPI_KIND_CLUSTER_NAME ?= capi-test TAG ?= dev ARCH ?= $(shell go env GOARCH) -ALL_ARCH ?= amd64 arm arm64 ppc64le s390x +ALL_ARCH ?= amd64 # Allow overriding the imagePullPolicy PULL_POLICY ?= Always @@ -819,10 +822,11 @@ docker-build-%: # Choice of images to build/push ALL_DOCKER_BUILD ?= core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension clusterctl +SYSELF_RELEVANT_DOCKER_BUILD ?= core .PHONY: docker-build docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all the images - $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD)) + $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ALL_DOCKER_BUILD_E2E = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension @@ -1013,9 +1017,9 @@ $(RELEASE_NOTES_DIR): .PHONY: release release: clean-release ## Build and push container images using the latest git tag for the commit - @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi - @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi - git checkout "${RELEASE_TAG}" + #@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi + #@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi + #git checkout "${RELEASE_TAG}" # Build binaries first. GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries # Set the manifest images to the staging/production bucket and Builds the manifests to publish with a release. @@ -1091,11 +1095,11 @@ release-manifests-dev: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the development mani .PHONY: release-binaries release-binaries: ## Build the binaries to publish with a release RELEASE_BINARY=clusterctl-linux-amd64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary .PHONY: release-binary release-binary: $(RELEASE_DIR) @@ -1104,9 +1108,11 @@ release-binary: $(RELEASE_DIR) -e CGO_ENABLED=0 \ -e GOOS=$(GOOS) \ -e GOARCH=$(GOARCH) \ - -e GOCACHE=/tmp/ \ + -e GOCACHE=/go/build-cache/ \ --user $$(id -u):$$(id -g) \ -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ + -v "$$(go env GOMODCACHE):/go/pkg/mod" \ + -v "$$(go env GOCACHE):/go/build-cache" \ -w /workspace \ golang:$(GO_VERSION) \ go build -a -trimpath -ldflags "$(LDFLAGS) -extldflags '-static'" \ @@ -1128,7 +1134,8 @@ release-staging: ## Build and push container images to the staging bucket $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/main/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) + + ##gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) .PHONY: release-staging-nightly release-staging-nightly: ## Tag and push container images to the staging bucket. Example image tag: cluster-api-controller:nightly_main_20210121 @@ -1145,16 +1152,17 @@ release-staging-nightly: ## Tag and push container images to the staging bucket. $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20240425/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) + #gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) .PHONY: release-alias-tag release-alias-tag: ## Add the release alias tag to the last build tag - gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) + echo "Syself: skipping" +# gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) .PHONY: release-notes-tool release-notes-tool: @@ -1190,13 +1198,13 @@ docker-image-verify: ## Verifies all built images to contain the correct binary .PHONY: docker-push-all docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests - $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(ALL_DOCKER_BUILD)) + $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(SYSELF_RELEVANT_DOCKER_BUILD)) docker-push-%: $(MAKE) ARCH=$* docker-push .PHONY: docker-push -docker-push: $(addprefix docker-push-,$(ALL_DOCKER_BUILD)) ## Push the docker images to be included in the release +docker-push: $(addprefix docker-push-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ## Push the docker images to be included in the release .PHONY: docker-push-core docker-push-core: ## Push the core docker image diff --git a/README.md b/README.md index 2f87ecc9b480..2783f3cbb677 100644 --- a/README.md +++ b/README.md @@ -62,4 +62,37 @@ Participation in the Kubernetes community is governed by the [Kubernetes Code of [Good first issue]: https://github.com/kubernetes-sigs/cluster-api/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [Help wanted]: https://github.com/kubernetes-sigs/cluster-api/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+ - +# Release/Development (Syself Fork) + +1. Create the git tag for the release. + ```shell + git tag v1.10.7-syself.8 + ``` + +2. Export the release tag. + ```shell + export RELEASE_TAG=v1.10.7-syself.8 + ``` + +3. Run the release script. + ```shell + ./hack/release.sh + ``` + This will create the manifests in the `out/` directory. And push the CAPI controller-image to http://ghcr.io/syself/cluster-api-prod/cluster-api-controller-amd64:v1.10.7-syself.8 + +4. You can apply the generated `out/operator-configmaps.yaml` and `out/operator-provider-patches.yaml` to the management cluster. + ```shell + k apply -f ../cluster-api/out/operator-configmaps.yaml + k apply -f ../cluster-api/out/operator-provider-patches.yaml + ``` + +5. Ensure that the configured `--source` flag in CSO and CSPH deployments is set as `oci`. If its not set as `oci`, edit the same in `addonprovider` for `cso` and `csph`. + ```shell + k edit addonproviders.operator.cluster.x-k8s.io -n mgt-system cso + k edit addonproviders.operator.cluster.x-k8s.io -n mgt-system csph + ``` + +6. Ensure that the secret `cluster-stack` containing Github and OCI credentials exist and is same as that of testing-cluster. Otherwise modify it and copy the values from the secret present in testing cluster. + ```shell + kubectl-modify-secret -n mgt-system cluster-stack + ``` diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index be27ac6d1300..c1badd42a8ba 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -599,6 +599,14 @@ type ControlPlaneTopology struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class is the name of the ControlPlaneClass used to create the set of control plane nodes. + // This should match one of the control plane classes defined in the ClusterClass object. + // syself new field. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Class string `json:"class,omitempty"` + // replicas is the number of control plane nodes. // If the value is nil, the ControlPlane object is created without the number of Replicas // and it's assumed that the control plane controller does not implement support for this field. diff --git a/api/v1beta1/clusterclass_types.go b/api/v1beta1/clusterclass_types.go index 4d62dbf7246b..69aa872aa03f 100644 --- a/api/v1beta1/clusterclass_types.go +++ b/api/v1beta1/clusterclass_types.go @@ -116,6 +116,18 @@ type ClusterClassSpec struct { // +optional ControlPlane ControlPlaneClass `json:"controlPlane,omitempty"` + // controlPlaneClasses is a list of named control plane classes that can be referenced + // from the Cluster topology. Each class defines a distinct control plane + // configuration. The class name MUST be unique within this list. + // When classes is defined, the Cluster topology can reference a specific + // control plane class by name. + // syself new field. + // +optional + // +listType=map + // +listMapKey=class + // +kubebuilder:validation:MaxItems=100 + ControlPlaneClasses []ControlPlaneClass `json:"controlPlaneClasses,omitempty"` + // workers describes the worker nodes for the cluster. // It is a collection of node types which can be used to create // the worker nodes of the cluster. @@ -148,6 +160,13 @@ type ControlPlaneClass struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class denotes a type of control-plane node present in the cluster. + // When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + // within the list and can be referenced from the Cluster topology. + // +optional + // +default="" + Class string `json:"class,omitempty"` + // LocalObjectTemplate contains the reference to the control plane provider. LocalObjectTemplate `json:",inline"` @@ -1013,6 +1032,12 @@ type PatchSelectorMatch struct { // +optional InfrastructureCluster bool `json:"infrastructureCluster,omitempty"` + // controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + // .spec.controlPlane.classes. + // syself new field. + // +optional + ControlPlaneClass *PatchSelectorMatchControlPlaneClass `json:"controlPlaneClass,omitempty"` + // machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in // .spec.workers.machineDeployments. // +optional @@ -1024,6 +1049,17 @@ type PatchSelectorMatch struct { MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } +// PatchSelectorMatchControlPlaneClass selects templates referenced +// in specific ControlPlaneClasses in .spec.controlPlane.classes. +type PatchSelectorMatchControlPlaneClass struct { + // names selects templates by class names. + // +optional + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + Names []string `json:"names,omitempty"` +} + // PatchSelectorMatchMachineDeploymentClass selects templates referenced // in specific MachineDeploymentClasses in .spec.workers.machineDeployments. type PatchSelectorMatchMachineDeploymentClass struct { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index ec25c1dfb000..08766f1950c2 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -217,6 +217,13 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { (*in).DeepCopyInto(*out) } in.ControlPlane.DeepCopyInto(&out.ControlPlane) + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Workers.DeepCopyInto(&out.Workers) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -2601,6 +2608,11 @@ func (in *PatchSelector) DeepCopy() *PatchSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = *in + if in.ControlPlaneClass != nil { + in, out := &in.ControlPlaneClass, &out.ControlPlaneClass + *out = new(PatchSelectorMatchControlPlaneClass) + (*in).DeepCopyInto(*out) + } if in.MachineDeploymentClass != nil { in, out := &in.MachineDeploymentClass, &out.MachineDeploymentClass *out = new(PatchSelectorMatchMachineDeploymentClass) @@ -2623,6 +2635,26 @@ func (in *PatchSelectorMatch) DeepCopy() *PatchSelectorMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopyInto(out *PatchSelectorMatchControlPlaneClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchControlPlaneClass. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopy() *PatchSelectorMatchControlPlaneClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchControlPlaneClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopyInto(out *PatchSelectorMatchMachineDeploymentClass) { *out = *in diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index a3f0529dc3ca..4e896cbb9d51 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -111,6 +111,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.PatchDefinition": schema_sigsk8sio_cluster_api_api_v1beta1_PatchDefinition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelector": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelector(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatch": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatch(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchControlPlaneClass": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchControlPlaneClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.RemediationStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_RemediationStrategy(ref), @@ -470,6 +471,28 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassSpec(ref common.Refere Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass"), }, }, + "controlPlaneClasses": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "class", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClasses is a list of named control plane classes that can be referenced from the Cluster topology. Each class defines a distinct control plane configuration. The class name MUST be unique within this list. When classes is defined, the Cluster topology can reference a specific control plane class by name. syself new field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass"), + }, + }, + }, + }, + }, "workers": { SchemaProps: spec.SchemaProps{ Description: "workers describes the worker nodes for the cluster. It is a collection of node types which can be used to create the worker nodes of the cluster.", @@ -1257,6 +1280,14 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class denotes a type of control-plane node present in the cluster. When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, "ref": { SchemaProps: spec.SchemaProps{ Description: "ref is a required reference to a custom resource offered by a provider.", @@ -1364,6 +1395,13 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneTopology(ref common.Re Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the name of the ControlPlaneClass used to create the set of control plane nodes. This should match one of the control plane classes defined in the ClusterClass object. syself new field.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the number of control plane nodes. If the value is nil, the ControlPlane object is created without the number of Replicas and it's assumed that the control plane controller does not implement support for this field. When specified against a control plane provider that lacks support for this field, this value will be ignored.", @@ -4564,6 +4602,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatch(ref common.Refe Format: "", }, }, + "controlPlaneClass": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClass selects templates referenced in specific ControlPlaneClasses in .spec.controlPlane.classes. syself new field.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchControlPlaneClass"), + }, + }, "machineDeploymentClass": { SchemaProps: spec.SchemaProps{ Description: "machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", @@ -4580,7 +4624,35 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatch(ref common.Refe }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass"}, + "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchControlPlaneClass", "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchControlPlaneClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchSelectorMatchControlPlaneClass selects templates referenced in specific ControlPlaneClasses in .spec.controlPlane.classes.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "names selects templates by class names.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, } } diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index 8e6b7d76cc41..1f3373918d86 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -43,9 +43,11 @@ import ( secretutil "sigs.k8s.io/cluster-api/util/secret" ) -const clusterTopologyNameKey = "cluster.spec.topology.class" -const clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" -const clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +const ( + clusterTopologyNameKey = "cluster.spec.topology.class" + clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" + clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +) type empty struct{} @@ -523,14 +525,31 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { errs := []error{} _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.Infrastructure.Ref) errs = append(errs, err) - _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.Ref) - errs = append(errs, err) + + // syself change. + // Fetch inline control plane refs (if defined). + if cc.Spec.ControlPlane.Ref != nil { + _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.Ref) + errs = append(errs, err) + } if cc.Spec.ControlPlane.MachineInfrastructure != nil { _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.MachineInfrastructure.Ref) errs = append(errs, err) } + // Fetch refs from named control plane classes. + for _, cpClass := range cc.Spec.ControlPlaneClasses { + if cpClass.Ref != nil { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.Ref) + errs = append(errs, err) + } + if cpClass.MachineInfrastructure != nil { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.MachineInfrastructure.Ref) + errs = append(errs, err) + } + } + for _, mdClass := range cc.Spec.Workers.MachineDeployments { _, err = o.fetchRef(ctx, discoveryBackoff, mdClass.Template.Infrastructure.Ref) errs = append(errs, err) diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index 2c1c329fce51..74804cf7b3c9 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -782,21 +782,37 @@ func equalRef(a, b *corev1.ObjectReference) bool { } func clusterClassUsesTemplate(cc *clusterv1.ClusterClass, templateRef *corev1.ObjectReference) bool { + // syself change. // Check infrastructure ref. if equalRef(cc.Spec.Infrastructure.Ref, templateRef) { return true } - // Check control plane ref. - if equalRef(cc.Spec.ControlPlane.Ref, templateRef) { + + // Check inline control plane ref. + if cc.Spec.ControlPlane.Ref != nil && equalRef(cc.Spec.ControlPlane.Ref, templateRef) { return true } - // If control plane uses machine, check it. + + // If inline control plane uses machine, check it. if cc.Spec.ControlPlane.MachineInfrastructure != nil && cc.Spec.ControlPlane.MachineInfrastructure.Ref != nil { if equalRef(cc.Spec.ControlPlane.MachineInfrastructure.Ref, templateRef) { return true } } + // Check named control plane classes. + for _, cpClass := range cc.Spec.ControlPlaneClasses { + if cpClass.Ref != nil && equalRef(cpClass.Ref, templateRef) { + return true + } + + if cpClass.MachineInfrastructure != nil && cpClass.MachineInfrastructure.Ref != nil { + if equalRef(cpClass.MachineInfrastructure.Ref, templateRef) { + return true + } + } + } + for _, mdClass := range cc.Spec.Workers.MachineDeployments { // Check bootstrap template ref. if equalRef(mdClass.Template.Bootstrap.Ref, templateRef) { diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index 7d8f42572c73..534f16af7255 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -490,6 +490,13 @@ spec: controlPlane is a reference to a local struct that holds the details for provisioning the Control Plane for the Cluster. properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + type: string machineHealthCheck: description: |- machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. @@ -830,6 +837,370 @@ spec: required: - ref type: object + controlPlaneClasses: + description: |- + controlPlaneClasses is a list of named control plane classes that can be referenced + from the Cluster topology. Each class defines a distinct control plane + configuration. The class name MUST be unique within this list. + When classes is defined, the Cluster topology can reference a specific + control plane class by name. + syself new field. + items: + description: ControlPlaneClass defines the class for the control + plane. + properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + type: string + machineHealthCheck: + description: |- + machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. + properties: + maxUnhealthy: + anyOf: + - type: integer + - type: string + description: |- + maxUnhealthy specifies the maximum number of unhealthy machines allowed. + Any further remediation is only allowed if at most "maxUnhealthy" machines selected by + "selector" are not healthy. + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: |- + nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + to consider a Machine unhealthy if a corresponding Node isn't associated + through a `Spec.ProviderID` field. + + The duration set in this field is compared to the greatest of: + - Cluster's infrastructure ready condition timestamp (if and when available) + - Control Plane's initialized condition timestamp (if and when available) + - Machine's infrastructure ready condition timestamp (if and when available) + - Machine's metadata creation timestamp + + Defaults to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. + type: string + remediationTemplate: + description: |- + remediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + unhealthyConditions: + description: |- + unhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + minLength: 1 + type: string + timeout: + description: |- + timeout is the duration that a node must be in a given status for, + after which the node is considered unhealthy. + For example, with a value of "1h", the node must match the status + for at least 1 hour before being considered unhealthy. + type: string + type: + description: type of Node condition + minLength: 1 + type: string + required: + - status + - timeout + - type + type: object + maxItems: 100 + type: array + unhealthyRange: + description: |- + unhealthyRange specifies the range of unhealthy machines allowed. + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "unhealthyRange". Takes precedence over maxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines + maxLength: 32 + minLength: 1 + pattern: ^\[[0-9]+-[0-9]+\]$ + type: string + type: object + machineInfrastructure: + description: |- + machineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. + properties: + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + metadata: + description: |- + metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + This field is supported if and only if the control plane provider template + referenced is Machine based. + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + namingStrategy: + description: namingStrategy allows changing the naming pattern + used when creating the control plane provider object. + properties: + template: + description: |- + template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + maxLength: 1024 + minLength: 1 + type: string + type: object + nodeDeletionTimeout: + description: |- + nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeDrainTimeout: + description: |- + nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeVolumeDetachTimeout: + description: |- + nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a condition with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + polarity: + description: |- + polarity of the conditionType specified in this readinessGate. + Valid values are Positive, Negative and omitted. + When omitted, the default behaviour will be Positive. + A positive polarity means that the condition should report a true status under normal conditions. + A negative polarity means that the condition should report a false status under normal conditions. + enum: + - Positive + - Negative + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - class + x-kubernetes-list-type: map infrastructure: description: |- infrastructure is a reference to a provider-specific template that holds @@ -1010,6 +1381,22 @@ spec: Note: this will match the controlPlane and also the controlPlane machineInfrastructure (depending on the kind and apiVersion). type: boolean + controlPlaneClass: + description: |- + controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + .spec.controlPlane.classes. + syself new field. + properties: + names: + description: names selects templates by class + names. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + type: array + type: object infrastructureCluster: description: infrastructureCluster selects templates referenced in .spec.infrastructure. diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 2694d431cf02..a843104071c4 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -979,6 +979,14 @@ spec: controlPlane: description: controlPlane describes the cluster control plane. properties: + class: + description: |- + class is the name of the ControlPlaneClass used to create the set of control plane nodes. + This should match one of the control plane classes defined in the ClusterClass object. + syself new field. + maxLength: 256 + minLength: 1 + type: string machineHealthCheck: description: |- machineHealthCheck allows to enable, disable and override diff --git a/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go index edc7f7b29cfb..e459a0080a99 100644 --- a/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go +++ b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go @@ -123,6 +123,11 @@ type ControlPlaneBuiltins struct { // +optional Name string `json:"name,omitempty"` + // class is the class name of the ControlPlane, + // to which the current template belongs to. + // +optional + Class string `json:"class,omitempty"` + // replicas is the value of the replicas field of the ControlPlane object. // +optional Replicas *int64 `json:"replicas,omitempty"` diff --git a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go index 627c45b105eb..d4416dbf8971 100644 --- a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go +++ b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go @@ -1027,6 +1027,13 @@ func schema_runtime_hooks_api_v1alpha1_ControlPlaneBuiltins(ref common.Reference Format: "", }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the class name of the ControlPlane, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the value of the replicas field of the ControlPlane object.", diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 082f6e826635..e5c6a67cf0e0 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -229,7 +229,9 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu // that should be referenced by the ControlPlane object. func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scope.Scope) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.InfrastructureMachineTemplate - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref + + // syself change + templateClonedFromRef := s.Blueprint.ControlPlaneClass.MachineInfrastructure.Ref cluster := s.Current.Cluster // Check if the current control plane object has a machineTemplate.infrastructureRef already defined. @@ -259,7 +261,9 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop // corresponding template defined in the blueprint. func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref + + // syself change + templateClonedFromRef := s.Blueprint.ControlPlaneClass.Ref cluster := s.Current.Cluster currentRef := cluster.Spec.ControlPlaneRef @@ -267,7 +271,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // We merge the labels and annotations from topology and ClusterClass. // We also add the cluster-name and the topology owned labels, so they are propagated down. topologyMetadata := s.Blueprint.Topology.ControlPlane.Metadata - clusterClassMetadata := s.Blueprint.ClusterClass.Spec.ControlPlane.Metadata + clusterClassMetadata := s.Blueprint.ControlPlaneClass.Metadata controlPlaneLabels := util.MergeMap(topologyMetadata.Labels, clusterClassMetadata.Labels) if controlPlaneLabels == nil { @@ -279,8 +283,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations) nameTemplate := "{{ .cluster.name }}-{{ .random }}" - if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != nil { - nameTemplate = *s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template + if s.Blueprint.ControlPlaneClass.NamingStrategy != nil && s.Blueprint.ControlPlaneClass.NamingStrategy.Template != nil { + nameTemplate = *s.Blueprint.ControlPlaneClass.NamingStrategy.Template } controlPlane, err := templateToObject(templateToInput{ @@ -365,14 +369,15 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf if err := contract.ControlPlane().MachineTemplate().ReadinessGates().Set(controlPlane, s.Blueprint.Topology.ControlPlane.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates().Path()) } - } else if s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates != nil { - if err := contract.ControlPlane().MachineTemplate().ReadinessGates().Set(controlPlane, s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates); err != nil { + // syself change + } else if s.Blueprint.ControlPlaneClass.ReadinessGates != nil { + if err := contract.ControlPlane().MachineTemplate().ReadinessGates().Set(controlPlane, s.Blueprint.ControlPlaneClass.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates().Path()) } } // If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field. - nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDrainTimeout + nodeDrainTimeout := s.Blueprint.ControlPlaneClass.NodeDrainTimeout if s.Blueprint.Topology.ControlPlane.NodeDrainTimeout != nil { nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.NodeDrainTimeout } @@ -383,7 +388,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } // If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field. - nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeVolumeDetachTimeout + nodeVolumeDetachTimeout := s.Blueprint.ControlPlaneClass.NodeVolumeDetachTimeout if s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout != nil { nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout } @@ -394,7 +399,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } // If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field. - nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDeletionTimeout + nodeDeletionTimeout := s.Blueprint.ControlPlaneClass.NodeDeletionTimeout if s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout != nil { nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout } diff --git a/exp/topology/scope/blueprint.go b/exp/topology/scope/blueprint.go index 08e734505f80..60802a44458b 100644 --- a/exp/topology/scope/blueprint.go +++ b/exp/topology/scope/blueprint.go @@ -31,6 +31,12 @@ type ClusterBlueprint struct { // ClusterClass holds the ClusterClass object referenced from Cluster.Spec.Topology. ClusterClass *clusterv1.ClusterClass + // syself change + // ControlPlaneClass holds the resolved ControlPlaneClass from the ClusterClass. + // This is the ControlPlaneClass selected based on the Cluster topology's control plane class field. + // If the topology does not specify a class, this is the inline ControlPlaneClass from ClusterClass.Spec.ControlPlane. + ControlPlaneClass *clusterv1.ControlPlaneClass + // InfrastructureClusterTemplate holds the InfrastructureClusterTemplate referenced from ClusterClass. InfrastructureClusterTemplate *unstructured.Unstructured @@ -93,7 +99,11 @@ type MachinePoolBlueprint struct { // HasControlPlaneInfrastructureMachine checks whether the clusterClass mandates the controlPlane has infrastructureMachines. func (b *ClusterBlueprint) HasControlPlaneInfrastructureMachine() bool { - return b.ClusterClass.Spec.ControlPlane.MachineInfrastructure != nil && b.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil + if b.ControlPlaneClass == nil { + return false + } + + return b.ControlPlaneClass.MachineInfrastructure != nil && b.ControlPlaneClass.MachineInfrastructure.Ref != nil } // IsControlPlaneMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the control plane. @@ -102,19 +112,31 @@ func (b *ClusterBlueprint) IsControlPlaneMachineHealthCheckEnabled() bool { if !b.HasControlPlaneInfrastructureMachine() { return false } - // If no MachineHealthCheck is defined in the ClusterClass or in the Cluster Topology then return false. - if b.ClusterClass.Spec.ControlPlane.MachineHealthCheck == nil && + + // If no MachineHealthCheck is defined in the resolved ControlPlaneClass or in the Cluster Topology then return false. + cpClassMHC := b.controlPlaneClassMachineHealthCheck() + if cpClassMHC == nil && (b.Topology.ControlPlane.MachineHealthCheck == nil || b.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero()) { return false } + // If `enable` is not set then consider it as true. A MachineHealthCheck will be created from either ClusterClass or Cluster Topology. if b.Topology.ControlPlane.MachineHealthCheck == nil || b.Topology.ControlPlane.MachineHealthCheck.Enable == nil { return true } + // If `enable` is explicitly set, use the value. return *b.Topology.ControlPlane.MachineHealthCheck.Enable } +// controlPlaneClassMachineHealthCheck returns the MachineHealthCheck from the resolved ControlPlaneClass. +func (b *ClusterBlueprint) controlPlaneClassMachineHealthCheck() *clusterv1.MachineHealthCheckClass { + if b.ControlPlaneClass == nil { + return nil + } + return b.ControlPlaneClass.MachineHealthCheck +} + // ControlPlaneMachineHealthCheckClass returns the MachineHealthCheckClass that should be used to create the MachineHealthCheck object. func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() *clusterv1.MachineHealthCheckClass { if b.Topology.ControlPlane.MachineHealthCheck != nil && !b.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { @@ -123,9 +145,9 @@ func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() *clusterv1.Mach return b.ControlPlane.MachineHealthCheck } -// HasControlPlaneMachineHealthCheck returns true if the ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. +// HasControlPlaneMachineHealthCheck returns true if the resolved ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. func (b *ClusterBlueprint) HasControlPlaneMachineHealthCheck() bool { - return b.HasControlPlaneInfrastructureMachine() && b.ClusterClass.Spec.ControlPlane.MachineHealthCheck != nil + return b.HasControlPlaneInfrastructureMachine() && b.controlPlaneClassMachineHealthCheck() != nil } // IsMachineDeploymentMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the MachineDeployment. diff --git a/hack/create-capi-op-yaml.sh b/hack/create-capi-op-yaml.sh new file mode 100755 index 000000000000..0e7ac0617719 --- /dev/null +++ b/hack/create-capi-op-yaml.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Bash Strict Mode: https://github.com/guettli/bash-strict-mode +trap 'echo -e "\n🤷 🚨 šŸ”„ Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "${LINENO}p" "$0" 2>/dev/null || true) šŸ”„ 🚨 🤷 "; exit 3' ERR +set -Eeuo pipefail + +if [[ -z ${RELEASE_TAG:-} ]]; then + echo "RELEASE_TAG is missing" + exit 1 +fi +tmpdir="$(mktemp -d)" +gzip -c out/core-components.yaml >"${tmpdir}/core-components.yaml.gz" + +kubectl create configmap capi-core-custom-v1.10.7 -n mgt-system \ + --from-file=metadata=out/metadata.yaml \ + --from-file=components="${tmpdir}/core-components.yaml.gz" \ + --dry-run=client -o yaml | + kubectl label --local -f - \ + provider-components=core-custom \ + provider.cluster.x-k8s.io/version=v1.10.7 \ + -o yaml | + kubectl annotate --local -f - \ + provider.cluster.x-k8s.io/compressed=true \ + -o yaml \ + >out/operator-configmaps.yaml + +cat >out/operator-provider-patches.yaml </dev/null || true) šŸ”„ 🚨 🤷 "; exit 3' ERR +set -Eeuo pipefail + +if [[ -z ${RELEASE_TAG:-} ]]; then + echo "failed: RELEASE_TAG is not set. Use v1.10.7-syself.XX" + exit 1 +fi + +if [[ -z ${IGNORE_GIT_DIRTY:-} ]]; then + if [[ -n "$(git status --porcelain --untracked-files=normal)" ]]; then + echo "failed: git working tree is dirty (modified, staged, or untracked files present)" + git status --short --untracked-files=normal + exit 1 + fi + if ! git rev-parse --verify --quiet "refs/tags/${RELEASE_TAG}^{commit}" >/dev/null; then + echo "failed: RELEASE_TAG '${RELEASE_TAG}' does not exist as a git tag" + echo + echo "use: git tag $RELEASE_TAG" + echo + exit 1 + fi + + release_tag_commit="$(git rev-parse "refs/tags/${RELEASE_TAG}^{commit}")" + head_commit="$(git rev-parse HEAD)" + if [[ "${release_tag_commit}" != "${head_commit}" ]]; then + echo "failed: RELEASE_TAG '${RELEASE_TAG}' points to ${release_tag_commit}, but HEAD is ${head_commit}" + exit 1 + fi +fi + +export PROD_REGISTRY=ghcr.io/syself/cluster-api-prod +export STAGING_REGISTRY=ghcr.io/syself/cluster-api-staging +export REGISTRY=$PROD_REGISTRY +export TAG=$RELEASE_TAG + +echo $RELEASE_TAG + +make release +make docker-build-all +make docker-push-all + +./hack/create-capi-op-yaml.sh diff --git a/internal/apis/core/v1alpha4/zz_generated.conversion.go b/internal/apis/core/v1alpha4/zz_generated.conversion.go index c5bbfcfd5d90..a9ab2137ec6b 100644 --- a/internal/apis/core/v1alpha4/zz_generated.conversion.go +++ b/internal/apis/core/v1alpha4/zz_generated.conversion.go @@ -650,6 +650,7 @@ func autoConvert_v1beta1_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in *v1bet if err := Convert_v1beta1_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + // WARNING: in.ControlPlaneClasses requires manual conversion: does not exist in peer-type if err := Convert_v1beta1_WorkersClass_To_v1alpha4_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -852,6 +853,7 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in *v1b if err := Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + // WARNING: in.Class requires manual conversion: does not exist in peer-type if err := Convert_v1beta1_LocalObjectTemplate_To_v1alpha4_LocalObjectTemplate(&in.LocalObjectTemplate, &out.LocalObjectTemplate, s); err != nil { return err } @@ -882,6 +884,7 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(i if err := Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + // WARNING: in.Class requires manual conversion: does not exist in peer-type out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 2b0f1cd994fe..21c62efb4a14 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -95,7 +95,6 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt ). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)). Complete(r) - if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -225,6 +224,17 @@ func (r *Reconciler) reconcileExternalReferences(ctx context.Context, s *scope) refs = append(refs, clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref) } + // collect the same for control-plane classes as well. + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + if cpClass.Ref != nil { + refs = append(refs, cpClass.Ref) + } + + if cpClass.MachineInfrastructure != nil && cpClass.MachineInfrastructure.Ref != nil { + refs = append(refs, cpClass.MachineInfrastructure.Ref) + } + } + for _, mdClass := range clusterClass.Spec.Workers.MachineDeployments { if mdClass.Template.Bootstrap.Ref != nil { refs = append(refs, mdClass.Template.Bootstrap.Ref) @@ -394,7 +404,8 @@ func addNewStatusVariable(variable clusterv1.ClusterClassVariable, from string) Metadata: variable.Metadata, Schema: variable.Schema, }, - }} + }, + } } func addDefinitionToExistingStatusVariable(variable clusterv1.ClusterClassVariable, from string, existingVariable *clusterv1.ClusterClassStatusVariable) *clusterv1.ClusterClassStatusVariable { diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index dfa8ddbbddbf..7a5ab0a18c94 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -44,16 +44,26 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } - // Get ClusterClass.spec.controlPlane. + // syself change + // Resolve the ControlPlaneClass to use. + // If the Cluster topology specifies a control plane class, look it up from ClusterClass.spec.controlPlane.classes. + // Otherwise, fall back to the inline ClusterClass.spec.controlPlane definition. + controlPlaneClass, err := resolveControlPlaneClass(cluster, clusterClass) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve control plane class for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) + } + + blueprint.ControlPlaneClass = controlPlaneClass + blueprint.ControlPlane = &scope.ControlPlaneBlueprint{} - blueprint.ControlPlane.Template, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.Ref) + blueprint.ControlPlane.Template, err = r.getReference(ctx, controlPlaneClass.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } // If the clusterClass mandates the controlPlane has infrastructureMachines, read it. if blueprint.HasControlPlaneInfrastructureMachine() { - blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref) + blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ControlPlaneClass.MachineInfrastructure.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane's machine template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } @@ -61,7 +71,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // If the clusterClass defines a valid MachineHealthCheck (including a defined MachineInfrastructure) set the blueprint MachineHealthCheck. if blueprint.HasControlPlaneMachineHealthCheck() { - blueprint.ControlPlane.MachineHealthCheck = blueprint.ClusterClass.Spec.ControlPlane.MachineHealthCheck + blueprint.ControlPlane.MachineHealthCheck = blueprint.ControlPlaneClass.MachineHealthCheck } // Loop over the machine deployments classes in ClusterClass @@ -120,3 +130,28 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return blueprint, nil } + +// resolveControlPlaneClass determines which ControlPlaneClass to use based on the Cluster topology. +// If the Cluster topology specifies a control plane class name, it is looked up from +// ClusterClass.spec.controlPlane.classes. +// Otherwise, the inline ClusterClass.spec.controlPlane is used. +func resolveControlPlaneClass(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) (*clusterv1.ControlPlaneClass, error) { + // If the topology doesn't specify a class, use the inline definition. + if cluster.Spec.Topology.ControlPlane.Class == "" { + return &clusterClass.Spec.ControlPlane, nil + } + + // Look up the class from spec.controlPlaneClasses. + for i := range clusterClass.Spec.ControlPlaneClasses { + if clusterClass.Spec.ControlPlaneClasses[i].Class == cluster.Spec.Topology.ControlPlane.Class { + return &clusterClass.Spec.ControlPlaneClasses[i], nil + } + } + + return nil, errors.Errorf( + "control plane class %q not found in ClusterClass %s/%s", + cluster.Spec.Topology.ControlPlane.Class, + clusterClass.Namespace, + clusterClass.Name, + ) +} diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index ed78569e9aba..b7f4eca11e9d 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -166,8 +166,9 @@ func addVariablesForPatch(blueprint *scope.ClusterBlueprint, desired *scope.Clus } req.Variables = globalVariables + // syself change // Calculate the Control Plane variables. - controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, patchVariableDefinitions) + controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, blueprint.Topology.ControlPlane.Class, patchVariableDefinitions) if err != nil { return errors.Wrapf(err, "failed to calculate ControlPlane variables") } diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index fe09c91100de..af8e69ab9d00 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -155,6 +155,33 @@ func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVar } } + // syself change + // Check if the request is for a ControlPlane or the InfrastructureMachineTemplate of a ControlPlaneClass. + if selector.MatchResources.ControlPlaneClass != nil { + if (req.HolderReference.Kind == "Cluster" && req.HolderReference.FieldPath == "spec.controlPlaneRef") || + req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), ".") { + // Read the builtin.controlPlane.class variable. + templateCPClassJSON, err := patchvariables.GetVariableValue(templateVariables, "builtin.controlPlane.class") + + // If the builtin variable could be read. + if err == nil { + // If templateCPClass matches one of the configured ControlPlaneClasses. + for _, cpClass := range selector.MatchResources.ControlPlaneClass.Names { + if cpClass == "*" || string(templateCPClassJSON.Raw) == strconv.Quote(cpClass) { + return true + } + unquoted, _ := strconv.Unquote(string(templateCPClassJSON.Raw)) + if strings.HasPrefix(cpClass, "*") && strings.HasSuffix(unquoted, strings.TrimPrefix(cpClass, "*")) { + return true + } + if strings.HasSuffix(cpClass, "*") && strings.HasPrefix(unquoted, strings.TrimSuffix(cpClass, "*")) { + return true + } + } + } + } + } + // Check if the request is for a BootstrapConfigTemplate or an InfrastructureMachineTemplate // of one of the configured MachineDeploymentClasses. if selector.MatchResources.MachineDeploymentClass != nil { diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index 5f27c0f53cab..3c4bd43773f1 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -94,7 +94,7 @@ func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, pat } // ControlPlane returns variables that apply to templates belonging to the ControlPlane. -func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { +func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, controlPlaneClass string, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { variables := []runtimehooksv1.Variable{} // Add variables overrides for the ControlPlane. @@ -107,10 +107,12 @@ func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructu } } + // syself change // Construct builtin variable. builtin := runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ - Name: cp.GetName(), + Name: cp.GetName(), + Class: controlPlaneClass, }, } diff --git a/internal/controllers/topology/cluster/patches/variables/variables_test.go b/internal/controllers/topology/cluster/patches/variables/variables_test.go index ffbf79d65034..d50088ea4315 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables_test.go +++ b/internal/controllers/topology/cluster/patches/variables/variables_test.go @@ -623,7 +623,7 @@ func TestControlPlane(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, tt.variableDefinitionsForPatch) + got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, "", tt.variableDefinitionsForPatch) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeComparableTo(tt.want)) }) diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index b7626623e8d9..b9c1aebf0f98 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -311,13 +311,36 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) return false, errors.Wrapf(err, "failed to reconcile %s %s", s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), klog.KObj(s.Desired.ControlPlane.InfrastructureMachineTemplate)) } - // Create or update the MachineInfrastructureTemplate of the control plane. + // syself change: determine if control plane class has changed. + currentCPInfraMachineTemplate := s.Current.ControlPlane.InfrastructureMachineTemplate + cpInfraKindChanged := false + if s.Current.ControlPlane.InfrastructureMachineTemplate != nil && + s.Desired.ControlPlane.InfrastructureMachineTemplate != nil && + s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind() != s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind() { + cpInfraKindChanged = true + log.Info( + "Control plane infrastructure kind changed", + "currentKind", s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind(), + "desiredKind", s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), + ) + + // Setting currentCPInfraMachineTemplate as nil so that method reconcileReferencedTemplate do not + // try to patch the existing template. Otherwise patching will fail as we cannot patch the `Kind` + // of an object. + currentCPInfraMachineTemplate = nil + } + + compatibilityChecker := check.ObjectsAreCompatible + if cpInfraKindChanged { + compatibilityChecker = check.ObjectsAreInTheSameNamespace + } + createdInfrastructureTemplate, err := r.reconcileReferencedTemplate(ctx, reconcileReferencedTemplateInput{ cluster: s.Current.Cluster, ref: cpInfraRef, - current: s.Current.ControlPlane.InfrastructureMachineTemplate, + current: currentCPInfraMachineTemplate, desired: s.Desired.ControlPlane.InfrastructureMachineTemplate, - compatibilityChecker: check.ObjectsAreCompatible, + compatibilityChecker: compatibilityChecker, templateNamePrefix: topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name), }) if err != nil { @@ -1177,10 +1200,12 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile return true, nil } + // syself change + // Commenting out the strictly compatible check to allow changes in controlplane InfrastructureMachine template. // Check if the current and desired referenced object are compatible. - if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { - return false, allErrs.ToAggregate() - } + // if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { + // return false, allErrs.ToAggregate() + // } log = log.WithValues(in.current.GetKind(), klog.KObj(in.current)) ctx = ctrl.LoggerInto(ctx, log) diff --git a/internal/topology/check/compatibility.go b/internal/topology/check/compatibility.go index ead9f4b130a0..4445623edc36 100644 --- a/internal/topology/check/compatibility.go +++ b/internal/topology/check/compatibility.go @@ -197,14 +197,29 @@ func ClusterClassesAreCompatible(current, desired *clusterv1.ClusterClass) field allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(current.Spec.Infrastructure, desired.Spec.Infrastructure, field.NewPath("spec", "infrastructure"))...) - // Validate control plane changes desired a compatible way. - allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(current.Spec.ControlPlane.LocalObjectTemplate, desired.Spec.ControlPlane.LocalObjectTemplate, - field.NewPath("spec", "controlPlane"))...) - if desired.Spec.ControlPlane.MachineInfrastructure != nil && current.Spec.ControlPlane.MachineInfrastructure != nil { - allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(*current.Spec.ControlPlane.MachineInfrastructure, *desired.Spec.ControlPlane.MachineInfrastructure, - field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) + // Validate inline control plane changes in a compatible way (only if both have refs set). + if current.Spec.ControlPlane.LocalObjectTemplate.Ref != nil && desired.Spec.ControlPlane.LocalObjectTemplate.Ref != nil { + allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(current.Spec.ControlPlane.LocalObjectTemplate, desired.Spec.ControlPlane.LocalObjectTemplate, + field.NewPath("spec", "controlPlane"))...) + if desired.Spec.ControlPlane.MachineInfrastructure != nil && current.Spec.ControlPlane.MachineInfrastructure != nil { + allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(*current.Spec.ControlPlane.MachineInfrastructure, *desired.Spec.ControlPlane.MachineInfrastructure, + field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) + } } + // Validate named control plane class changes in a compatible way. + for _, desiredClass := range desired.Spec.ControlPlaneClasses { + for i, currentClass := range current.Spec.ControlPlaneClasses { + if desiredClass.Class == currentClass.Class { + classPath := field.NewPath("spec", "controlPlaneClasses").Index(i) + allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(currentClass.LocalObjectTemplate, desiredClass.LocalObjectTemplate, classPath)...) + if desiredClass.MachineInfrastructure != nil && currentClass.MachineInfrastructure != nil { + allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(*currentClass.MachineInfrastructure, *desiredClass.MachineInfrastructure, + classPath.Child("machineInfrastructure"))...) + } + } + } + } // Validate changes to MachineDeployments. allErrs = append(allErrs, MachineDeploymentClassesAreCompatible(current, desired)...) @@ -236,6 +251,25 @@ func MachineDeploymentClassesAreCompatible(current, desired *clusterv1.ClusterCl return allErrs } +// ControlPlaneClassesAreUnique checks that no two ControlPlaneClasses in a ClusterClass share a name. +func ControlPlaneClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + classes := sets.Set[string]{} + for i, class := range clusterClass.Spec.ControlPlaneClasses { + if classes.Has(class.Class) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "controlplane", "classes").Index(i).Child("class"), + class.Class, + fmt.Sprintf("ControlPlane class must be unique. ControlPlane with class %q is defined more than once", class.Class), + ), + ) + } + classes.Insert(class.Class) + } + return allErrs +} + // MachineDeploymentClassesAreUnique checks that no two MachineDeploymentClasses in a ClusterClass share a name. func MachineDeploymentClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -424,6 +458,33 @@ func MachinePoolTopologiesAreValidAndDefinedInClusterClass(desired *clusterv1.Cl return allErrs } +// ControlPlaneTopologyClassIsDefinedInClusterClass checks that the control plane class referenced +// in the Cluster topology (if set) is defined in the ClusterClass. +// syself change. +func ControlPlaneTopologyClassIsDefinedInClusterClass(desired *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + cpClass := desired.Spec.Topology.ControlPlane.Class + if cpClass == "" { + return nil + } + + for _, class := range clusterClass.Spec.ControlPlaneClasses { + if class.Class == cpClass { + return nil + } + } + + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "topology", "controlPlane", "class"), + cpClass, + fmt.Sprintf("ControlPlaneClass with name %q does not exist in ClusterClass %q", + cpClass, clusterClass.Name), + ), + ) + return allErrs +} + // ClusterClassReferencesAreValid checks that each template reference in the ClusterClass is valid . func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -432,8 +493,24 @@ func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field. field.NewPath("spec", "infrastructure"))...) allErrs = append(allErrs, LocalObjectTemplateIsValid(&clusterClass.Spec.ControlPlane.LocalObjectTemplate, clusterClass.Namespace, field.NewPath("spec", "controlPlane"))...) - if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil { - allErrs = append(allErrs, LocalObjectTemplateIsValid(clusterClass.Spec.ControlPlane.MachineInfrastructure, clusterClass.Namespace, field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) + + // validate the inline control plane definition. + cpPath := field.NewPath("spec", "controlPlane") + if clusterClass.Spec.ControlPlane.LocalObjectTemplate.Ref != nil { + allErrs = append(allErrs, LocalObjectTemplateIsValid(&clusterClass.Spec.ControlPlane.LocalObjectTemplate, clusterClass.Namespace, cpPath)...) + if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil { + allErrs = append(allErrs, LocalObjectTemplateIsValid(clusterClass.Spec.ControlPlane.MachineInfrastructure, clusterClass.Namespace, cpPath.Child("machineInfrastructure"))...) + } + } + + // validate each named control plane class. + for i := range clusterClass.Spec.ControlPlaneClasses { + cpc := clusterClass.Spec.ControlPlaneClasses[i] + classPath := cpPath.Child("controlPlaneClasses").Index(i) + allErrs = append(allErrs, LocalObjectTemplateIsValid(&cpc.LocalObjectTemplate, clusterClass.Namespace, classPath)...) + if cpc.MachineInfrastructure != nil { + allErrs = append(allErrs, LocalObjectTemplateIsValid(cpc.MachineInfrastructure, clusterClass.Namespace, classPath.Child("machineInfrastructure"))...) + } } for i := range clusterClass.Spec.Workers.MachineDeployments { diff --git a/internal/topology/check/compatibility_test.go b/internal/topology/check/compatibility_test.go index 9b22f7c0aba3..96e9c1cee41d 100644 --- a/internal/topology/check/compatibility_test.go +++ b/internal/topology/check/compatibility_test.go @@ -993,6 +993,73 @@ func TestMachinePoolClassesAreCompatible(t *testing.T) { } } +func TestControlPlaneClassesAreUnique(t *testing.T) { + tests := []struct { + name string + clusterClass *clusterv1.ClusterClass + wantErr bool + }{ + { + name: "pass if ControlPlaneClasses are unique", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "bb"}, + }, + }, + }, + wantErr: false, + }, + { + name: "pass if no ControlPlaneClasses are defined", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlane: clusterv1.ControlPlaneClass{}, + }, + }, + wantErr: false, + }, + { + name: "fail if ControlPlaneClasses are duplicated", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + { + name: "fail if multiple ControlPlaneClasses are identical", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + allErrs := ControlPlaneClassesAreUnique(tt.clusterClass) + if tt.wantErr { + g.Expect(allErrs).ToNot(BeEmpty()) + return + } + g.Expect(allErrs).To(BeEmpty()) + }) + } +} + func TestMachineDeploymentClassesAreUnique(t *testing.T) { tests := []struct { name string diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 627411c0f659..30879baa34a3 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -77,8 +77,10 @@ type Cluster struct { decoder admission.Decoder } -var _ webhook.CustomDefaulter = &Cluster{} -var _ webhook.CustomValidator = &Cluster{} +var ( + _ webhook.CustomDefaulter = &Cluster{} + _ webhook.CustomValidator = &Cluster{} +) var errClusterClassNotReconciled = errors.New("ClusterClass is not successfully reconciled") @@ -673,10 +675,18 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust if cluster.Spec.Topology.ControlPlane.MachineHealthCheck != nil { fldPath := field.NewPath("spec", "topology", "controlPlane", "machineHealthCheck") + // syself change + // Resolve the control plane class to use for validation. + cpClass, err := resolveControlPlaneClassForValidation(cluster, clusterClass) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + return allErrs + } + // Validate ControlPlane MachineHealthCheck if defined. if !cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { - // Ensure ControlPlane does not define a MachineHealthCheck if the ClusterClass does not define MachineInfrastructure. - if clusterClass.Spec.ControlPlane.MachineInfrastructure == nil { + // Ensure ControlPlane does not define a MachineHealthCheck if the ControlPlaneClass does not define MachineInfrastructure. + if cpClass.MachineInfrastructure == nil { allErrs = append(allErrs, field.Forbidden( fldPath, "can be set only if spec.controlPlane.machineInfrastructure is set in ClusterClass", @@ -688,12 +698,8 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust // If MachineHealthCheck is explicitly enabled then make sure that a MachineHealthCheck definition is // available either in the Cluster topology or in the ClusterClass. - // (One of these definitions will be used in the controller to create the MachineHealthCheck) - - // Check if the machineHealthCheck is explicitly enabled in the ControlPlaneTopology. if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable != nil && *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable { - // Ensure the MHC is defined in at least one of the ControlPlaneTopology of the Cluster or the ControlPlaneClass of the ClusterClass. - if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() && clusterClass.Spec.ControlPlane.MachineHealthCheck == nil { + if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() && cpClass.MachineHealthCheck == nil { allErrs = append(allErrs, field.Forbidden( fldPath.Child("enable"), fmt.Sprintf("cannot be set to %t as MachineHealthCheck definition is not available in the Cluster topology or the ClusterClass", *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable), @@ -749,6 +755,15 @@ func machineDeploymentClassOfName(clusterClass *clusterv1.ClusterClass, name str return nil } +func controlPlaneClassOfName(clusterClass *clusterv1.ClusterClass, name string) *clusterv1.ControlPlaneClass { + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + if cpClass.Class == name { + return &cpClass + } + } + return nil +} + // validateCIDRBlocks ensures the passed CIDR is valid. func validateCIDRBlocks(fldPath *field.Path, cidrs []string) field.ErrorList { var allErrs field.ErrorList @@ -928,6 +943,10 @@ func ValidateClusterForClusterClass(cluster *clusterv1.Cluster, clusterClass *cl if clusterClass == nil { return field.ErrorList{field.InternalError(field.NewPath(""), errors.New("ClusterClass can not be nil"))} } + + // syself change + allErrs = append(allErrs, check.ControlPlaneTopologyClassIsDefinedInClusterClass(cluster, clusterClass)...) + allErrs = append(allErrs, check.MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) allErrs = append(allErrs, check.MachinePoolTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) @@ -1138,3 +1157,24 @@ func validateAutoscalerAnnotationsForCluster(cluster *clusterv1.Cluster, cluster } return allErrs } + +// resolveControlPlaneClassForValidation returns the ControlPlaneClass to use for validation. +// If the Cluster topology specifies a control plane class, it is looked up from ClusterClass.spec.controlPlane.classes. +// Otherwise, the inline ClusterClass.spec.controlPlane definition is used. +func resolveControlPlaneClassForValidation(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) (*clusterv1.ControlPlaneClass, error) { + if cluster.Spec.Topology.ControlPlane.Class == "" { + return &clusterClass.Spec.ControlPlane, nil + } + + for i := range clusterClass.Spec.ControlPlaneClasses { + if clusterClass.Spec.ControlPlaneClasses[i].Class == cluster.Spec.Topology.ControlPlane.Class { + return &clusterClass.Spec.ControlPlaneClasses[i], nil + } + } + + return nil, fmt.Errorf("control plane class %q not found in ClusterClass %s/%s", + cluster.Spec.Topology.ControlPlane.Class, + clusterClass.Namespace, + clusterClass.Name, + ) +} diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index 1b190c9425a7..d8739d3ea745 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -60,8 +60,10 @@ type ClusterClass struct { Client client.Reader } -var _ webhook.CustomDefaulter = &ClusterClass{} -var _ webhook.CustomValidator = &ClusterClass{} +var ( + _ webhook.CustomDefaulter = &ClusterClass{} + _ webhook.CustomValidator = &ClusterClass{} +) // Default implements defaulting for ClusterClass create and update. func (webhook *ClusterClass) Default(_ context.Context, obj runtime.Object) error { @@ -71,16 +73,28 @@ func (webhook *ClusterClass) Default(_ context.Context, obj runtime.Object) erro } // Default all namespaces in the references to the object namespace. defaultNamespace(in.Spec.Infrastructure.Ref, in.Namespace) - defaultNamespace(in.Spec.ControlPlane.Ref, in.Namespace) + // syself change + // Default inline control plane refs. + defaultNamespace(in.Spec.ControlPlane.Ref, in.Namespace) if in.Spec.ControlPlane.MachineInfrastructure != nil { defaultNamespace(in.Spec.ControlPlane.MachineInfrastructure.Ref, in.Namespace) } - if in.Spec.ControlPlane.MachineHealthCheck != nil { defaultNamespace(in.Spec.ControlPlane.MachineHealthCheck.RemediationTemplate, in.Namespace) } + // Default named control plane class refs. + for i := range in.Spec.ControlPlaneClasses { + defaultNamespace(in.Spec.ControlPlaneClasses[i].Ref, in.Namespace) + if in.Spec.ControlPlaneClasses[i].MachineInfrastructure != nil { + defaultNamespace(in.Spec.ControlPlaneClasses[i].MachineInfrastructure.Ref, in.Namespace) + } + if in.Spec.ControlPlaneClasses[i].MachineHealthCheck != nil { + defaultNamespace(in.Spec.ControlPlaneClasses[i].MachineHealthCheck.RemediationTemplate, in.Namespace) + } + } + for i := range in.Spec.Workers.MachineDeployments { defaultNamespace(in.Spec.Workers.MachineDeployments[i].Template.Bootstrap.Ref, in.Namespace) defaultNamespace(in.Spec.Workers.MachineDeployments[i].Template.Infrastructure.Ref, in.Namespace) @@ -252,6 +266,49 @@ func validateUpdatesToMachineHealthCheckClasses(clusters []clusterv1.Cluster, ol } } + // syself change + // For each ControlPlaneClass check if the MachineHealthCheck definition is dropped. + // For each ControlPlaneClass check if the MachineHealthCheck definition is dropped. + for _, newCPClass := range newClusterClass.Spec.ControlPlaneClasses { + oldCPClass := controlPlaneClassOfName(oldClusterClass, newCPClass.Class) + if oldCPClass == nil { + // New ControlPlaneClass. Nothing to validate. + continue + } + + // If the MachineHealthCheck was dropped then check that no cluster is using it. + if oldCPClass.MachineHealthCheck != nil && newCPClass.MachineHealthCheck == nil { + clustersUsingMHC := []string{} + + for _, cluster := range clusters { + if cluster.Spec.Topology == nil { + continue + } + if cluster.Spec.Topology.ControlPlane.Class != newCPClass.Class { + continue + } + + if cluster.Spec.Topology.ControlPlane.MachineHealthCheck != nil && + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable != nil && + *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable && + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + + clustersUsingMHC = append(clustersUsingMHC, cluster.Name) + } + } + + if len(clustersUsingMHC) != 0 { + allErrs = append(allErrs, field.Forbidden( + field.NewPath("spec", "controlPlaneClasses").Key(newCPClass.Class).Child("machineHealthCheck"), + fmt.Sprintf( + "MachineHealthCheck cannot be deleted because it is used by Cluster(s) %q", + strings.Join(clustersUsingMHC, ","), + ), + )) + } + } + } + // For each MachineDeploymentClass check if the MachineHealthCheck definition is dropped. for _, newMdClass := range newClusterClass.Spec.Workers.MachineDeployments { oldMdClass := machineDeploymentClassOfName(oldClusterClass, newMdClass.Class) @@ -473,12 +530,34 @@ func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorL } } - for _, md := range clusterClass.Spec.Workers.MachineDeployments { + // syself change + // Validate naming strategies for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + if cp.NamingStrategy == nil || cp.NamingStrategy.Template == nil { + continue + } + name, err := topologynames.ControlPlaneNameGenerator(*cp.NamingStrategy.Template, "cluster").GenerateName() + templateFldPath := field.NewPath("spec", "controlPlaneClasses").Index(i).Child("namingStrategy", "template") + if err != nil { + allErrs = append(allErrs, + field.Invalid( + templateFldPath, + *cp.NamingStrategy.Template, + fmt.Sprintf("invalid ControlPlaneClass name template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, field.Invalid(templateFldPath, *cp.NamingStrategy.Template, err)) + } + } + } + + for i, md := range clusterClass.Spec.Workers.MachineDeployments { if md.NamingStrategy == nil || md.NamingStrategy.Template == nil { continue } name, err := topologynames.MachineDeploymentNameGenerator(*md.NamingStrategy.Template, "cluster", "mdtopology").GenerateName() - templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Key(md.Class).Child("namingStrategy", "template") + templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( @@ -493,12 +572,12 @@ func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorL } } - for _, mp := range clusterClass.Spec.Workers.MachinePools { + for i, mp := range clusterClass.Spec.Workers.MachinePools { if mp.NamingStrategy == nil || mp.NamingStrategy.Template == nil { continue } name, err := topologynames.MachinePoolNameGenerator(*mp.NamingStrategy.Template, "cluster", "mptopology").GenerateName() - templateFldPath := field.NewPath("spec", "workers", "machinePools").Key(mp.Class).Child("namingStrategy", "template") + templateFldPath := field.NewPath("spec", "workers", "machinePools").Index(i).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( @@ -528,7 +607,8 @@ func validateMachineHealthCheckClass(fldPath *field.Path, namepace string, m *cl UnhealthyConditions: m.UnhealthyConditions, UnhealthyRange: m.UnhealthyRange, RemediationTemplate: m.RemediationTemplate, - }} + }, + } return (&MachineHealthCheck{}).validateCommonFields(&mhc, fldPath) } @@ -539,6 +619,15 @@ func validateClusterClassMetadata(clusterClass *clusterv1.ClusterClass) field.Er for _, m := range clusterClass.Spec.Workers.MachineDeployments { allErrs = append(allErrs, m.Template.Metadata.Validate(field.NewPath("spec", "workers", "machineDeployments").Key(m.Class).Child("template", "metadata"))...) } + + // syself change + // Validate metadata for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + allErrs = append(allErrs, + cp.Metadata.Validate( + field.NewPath("spec", "controlPlaneClasses").Index(i).Child("metadata"))...) + } + for _, m := range clusterClass.Spec.Workers.MachinePools { allErrs = append(allErrs, m.Template.Metadata.Validate(field.NewPath("spec", "workers", "machinePools").Key(m.Class).Child("template", "metadata"))...) } diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index ee1fc561da0e..3a0f6659b2e8 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -167,6 +167,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste // Return an error if none of the possible selectors are enabled. if !(selector.MatchResources.InfrastructureCluster || selector.MatchResources.ControlPlane || + (selector.MatchResources.ControlPlaneClass != nil && len(selector.MatchResources.ControlPlaneClass.Names) > 0) || (selector.MatchResources.MachineDeploymentClass != nil && len(selector.MatchResources.MachineDeploymentClass.Names) > 0) || (selector.MatchResources.MachinePoolClass != nil && len(selector.MatchResources.MachinePoolClass.Names) > 0)) { return append(allErrs, @@ -205,6 +206,43 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste } } + // Validate selectors for control plane classes + if selector.MatchResources.ControlPlaneClass != nil && len(selector.MatchResources.ControlPlaneClass.Names) > 0 { + for i, name := range selector.MatchResources.ControlPlaneClass.Names { + match := false + err := validateSelectorName(name, path, "controlPlaneClass", i) + if err != nil { + allErrs = append(allErrs, err) + break + } + for _, cp := range class.Spec.ControlPlaneClasses { + var matches bool + if cp.Class == name || name == "*" { + matches = true + } else if strings.HasPrefix(name, "*") && strings.HasSuffix(cp.Class, strings.TrimPrefix(name, "*")) { + matches = true + } else if strings.HasSuffix(name, "*") && strings.HasPrefix(cp.Class, strings.TrimSuffix(name, "*")) { + matches = true + } + + if matches { + if selectorMatchTemplate(selector, cp.Ref) || + (cp.MachineInfrastructure != nil && selectorMatchTemplate(selector, cp.MachineInfrastructure.Ref)) { + match = true + break + } + } + } + if !match { + allErrs = append(allErrs, field.Invalid( + path.Child("matchResources", "controlPlaneClass", "names").Index(i), + name, + "selector is enabled but matches neither the controlPlane ref nor the controlPlane machineInfrastructure ref of a ControlPlane class", + )) + } + } + } + if selector.MatchResources.MachineDeploymentClass != nil && len(selector.MatchResources.MachineDeploymentClass.Names) > 0 { for i, name := range selector.MatchResources.MachineDeploymentClass.Names { match := false