diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go index dee36b2a9..0d0c35ff1 100644 --- a/pkg/apis/apis.go +++ b/pkg/apis/apis.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/aws/karpenter-core/pkg/apis/settings" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/utils/functional" ) @@ -30,7 +29,6 @@ import ( var ( // Builder includes all types within the apis package Builder = runtime.NewSchemeBuilder( - v1alpha5.SchemeBuilder.AddToScheme, v1beta1.SchemeBuilder.AddToScheme, ) // AddToScheme may be used to add all resources defined in the project to a Scheme @@ -38,19 +36,13 @@ var ( Settings = []settings.Injectable{&settings.Settings{}} ) -//go:generate controller-gen crd:generateEmbeddedObjectMeta=true object:headerFile="../../hack/boilerplate.go.txt" paths="./..." output:crd:artifacts:config=crds +//go:generate controller-gen crd object:headerFile="../../hack/boilerplate.go.txt" paths="./..." output:crd:artifacts:config=crds var ( - //go:embed crds/karpenter.sh_provisioners.yaml - ProvisionerCRD []byte - //go:embed crds/karpenter.sh_machines.yaml - MachineCRD []byte //go:embed crds/karpenter.sh_nodepools.yaml NodePoolCRD []byte //go:embed crds/karpenter.sh_nodeclaims.yaml NodeClaimCRD []byte CRDs = []*v1.CustomResourceDefinition{ - lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](ProvisionerCRD)), - lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](MachineCRD)), lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodePoolCRD)), lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodeClaimCRD)), } diff --git a/pkg/apis/crds/karpenter.sh_machines.yaml b/pkg/apis/crds/karpenter.sh_machines.yaml deleted file mode 100644 index fa753deb5..000000000 --- a/pkg/apis/crds/karpenter.sh_machines.yaml +++ /dev/null @@ -1,354 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.13.0 - name: machines.karpenter.sh -spec: - group: karpenter.sh - names: - categories: - - karpenter - kind: Machine - listKind: MachineList - plural: machines - singular: machine - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.labels.node\.kubernetes\.io/instance-type - name: Type - type: string - - jsonPath: .metadata.labels.topology\.kubernetes\.io/zone - name: Zone - type: string - - jsonPath: .status.nodeName - name: Node - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .metadata.labels.karpenter\.sh/capacity-type - name: Capacity - priority: 1 - type: string - - jsonPath: .metadata.labels.karpenter\.sh/provisioner-name - name: Provisioner - priority: 1 - type: string - - jsonPath: .spec.machineTemplateRef.name - name: Template - priority: 1 - type: string - name: v1alpha5 - schema: - openAPIV3Schema: - description: Machine is the Schema for the Machines API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: MachineSpec describes the desired state of the Machine - properties: - kubelet: - description: Kubelet are options passed to the kubelet when provisioning - nodes - properties: - clusterDNS: - description: clusterDNS is a list of IP addresses for the cluster - DNS server. Note that not all providers may use all addresses. - items: - type: string - type: array - containerRuntime: - description: ContainerRuntime is the container runtime to be used - with your worker nodes. - type: string - cpuCFSQuota: - description: CPUCFSQuota enables CPU CFS quota enforcement for - containers that specify CPU limits. - type: boolean - evictionHard: - additionalProperties: - type: string - description: EvictionHard is the map of signal names to quantities - that define hard eviction thresholds - type: object - evictionMaxPodGracePeriod: - description: EvictionMaxPodGracePeriod is the maximum allowed - grace period (in seconds) to use when terminating pods in response - to soft eviction thresholds being met. - format: int32 - type: integer - evictionSoft: - additionalProperties: - type: string - description: EvictionSoft is the map of signal names to quantities - that define soft eviction thresholds - type: object - evictionSoftGracePeriod: - additionalProperties: - type: string - description: EvictionSoftGracePeriod is the map of signal names - to quantities that define grace periods for each eviction signal - type: object - imageGCHighThresholdPercent: - description: ImageGCHighThresholdPercent is the percent of disk - usage after which image garbage collection is always run. The - percent is calculated by dividing this field value by 100, so - this field must be between 0 and 100, inclusive. When specified, - the value must be greater than ImageGCLowThresholdPercent. - format: int32 - maximum: 100 - minimum: 0 - type: integer - imageGCLowThresholdPercent: - description: ImageGCLowThresholdPercent is the percent of disk - usage before which image garbage collection is never run. Lowest - disk usage to garbage collect to. The percent is calculated - by dividing this field value by 100, so the field value must - be between 0 and 100, inclusive. When specified, the value must - be less than imageGCHighThresholdPercent - format: int32 - maximum: 100 - minimum: 0 - type: integer - kubeReserved: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: KubeReserved contains resources reserved for Kubernetes - system components. - type: object - maxPods: - description: MaxPods is an override for the maximum number of - pods that can run on a worker node instance. - format: int32 - minimum: 0 - type: integer - podsPerCore: - description: PodsPerCore is an override for the number of pods - that can run on a worker node instance based on the number of - cpu cores. This value cannot exceed MaxPods, so, if MaxPods - is a lower value, that value will be used. - format: int32 - minimum: 0 - type: integer - systemReserved: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: SystemReserved contains resources reserved for OS - system daemons and kernel memory. - type: object - type: object - machineTemplateRef: - description: MachineTemplateRef is a reference to an object that defines - provider specific configuration - properties: - apiVersion: - description: API version of the referent - type: string - kind: - description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' - type: string - name: - description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - required: - - name - type: object - requirements: - description: Requirements are layered with Labels and applied to every - node. - items: - description: A node selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If the operator is In - or NotIn, the values array must be non-empty. If the operator - is Exists or DoesNotExist, the values array must be empty. - If the operator is Gt or Lt, the values array must have a - single element, which will be interpreted as an integer. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - resources: - description: Resources models the resource requirements for the Machine - to launch - properties: - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Requests describes the minimum required resources - for the Machine to launch - type: object - type: object - startupTaints: - description: StartupTaints are taints that are applied to nodes upon - startup which are expected to be removed automatically within a - short period of time, typically by a DaemonSet that tolerates the - taint. These are commonly used by daemonsets to allow initialization - and enforce startup ordering. StartupTaints are ignored for provisioning - purposes in that pods are not required to tolerate a StartupTaint - in order to have nodes provisioned for them. - items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. - properties: - effect: - description: Required. The effect of the taint on pods that - do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Required. The taint key to be applied to a node. - type: string - timeAdded: - description: TimeAdded represents the time at which the taint - was added. It is only written for NoExecute taints. - format: date-time - type: string - value: - description: The taint value corresponding to the taint key. - type: string - required: - - effect - - key - type: object - type: array - taints: - description: Taints will be applied to the machine's node. - items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. - properties: - effect: - description: Required. The effect of the taint on pods that - do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Required. The taint key to be applied to a node. - type: string - timeAdded: - description: TimeAdded represents the time at which the taint - was added. It is only written for NoExecute taints. - format: date-time - type: string - value: - description: The taint value corresponding to the taint key. - type: string - required: - - effect - - key - type: object - type: array - type: object - status: - description: MachineStatus defines the observed state of Machine - properties: - allocatable: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Allocatable is the estimated allocatable capacity of - the machine - type: object - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Capacity is the estimated full capacity of the machine - type: object - conditions: - description: Conditions contains signals for health and readiness - items: - description: 'Condition defines a readiness condition for a Knative - resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. We use VolatileTime - in place of metav1.Time to exclude this from creating equality.Semantic - differences (all other things held constant). - type: string - message: - description: A human readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - severity: - description: Severity with which to treat failures of this type - of condition. When this is not specified, it defaults to Error. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - status - - type - type: object - type: array - nodeName: - description: NodeName is the name of the corresponding node object - type: string - providerID: - description: ProviderID of the corresponding node object - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/pkg/apis/crds/karpenter.sh_provisioners.yaml b/pkg/apis/crds/karpenter.sh_provisioners.yaml deleted file mode 100644 index 192412474..000000000 --- a/pkg/apis/crds/karpenter.sh_provisioners.yaml +++ /dev/null @@ -1,381 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.13.0 - name: provisioners.karpenter.sh -spec: - group: karpenter.sh - names: - categories: - - karpenter - kind: Provisioner - listKind: ProvisionerList - plural: provisioners - singular: provisioner - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.providerRef.name - name: Template - type: string - - jsonPath: .spec.weight - name: Weight - priority: 1 - type: string - name: v1alpha5 - schema: - openAPIV3Schema: - description: Provisioner is the Schema for the Provisioners API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ProvisionerSpec is the top level provisioner specification. - Provisioners launch nodes in response to pods that are unschedulable. - A single provisioner is capable of managing a diverse set of nodes. - Node properties are determined from a combination of provisioner and - pod scheduling constraints. - properties: - annotations: - additionalProperties: - type: string - description: Annotations are applied to every node. - type: object - consolidation: - description: Consolidation are the consolidation parameters - properties: - enabled: - description: Enabled enables consolidation if it has been set - type: boolean - type: object - kubeletConfiguration: - description: KubeletConfiguration are options passed to the kubelet - when provisioning nodes - properties: - clusterDNS: - description: clusterDNS is a list of IP addresses for the cluster - DNS server. Note that not all providers may use all addresses. - items: - type: string - type: array - containerRuntime: - description: ContainerRuntime is the container runtime to be used - with your worker nodes. - type: string - cpuCFSQuota: - description: CPUCFSQuota enables CPU CFS quota enforcement for - containers that specify CPU limits. - type: boolean - evictionHard: - additionalProperties: - type: string - description: EvictionHard is the map of signal names to quantities - that define hard eviction thresholds - type: object - evictionMaxPodGracePeriod: - description: EvictionMaxPodGracePeriod is the maximum allowed - grace period (in seconds) to use when terminating pods in response - to soft eviction thresholds being met. - format: int32 - type: integer - evictionSoft: - additionalProperties: - type: string - description: EvictionSoft is the map of signal names to quantities - that define soft eviction thresholds - type: object - evictionSoftGracePeriod: - additionalProperties: - type: string - description: EvictionSoftGracePeriod is the map of signal names - to quantities that define grace periods for each eviction signal - type: object - imageGCHighThresholdPercent: - description: ImageGCHighThresholdPercent is the percent of disk - usage after which image garbage collection is always run. The - percent is calculated by dividing this field value by 100, so - this field must be between 0 and 100, inclusive. When specified, - the value must be greater than ImageGCLowThresholdPercent. - format: int32 - maximum: 100 - minimum: 0 - type: integer - imageGCLowThresholdPercent: - description: ImageGCLowThresholdPercent is the percent of disk - usage before which image garbage collection is never run. Lowest - disk usage to garbage collect to. The percent is calculated - by dividing this field value by 100, so the field value must - be between 0 and 100, inclusive. When specified, the value must - be less than imageGCHighThresholdPercent - format: int32 - maximum: 100 - minimum: 0 - type: integer - kubeReserved: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: KubeReserved contains resources reserved for Kubernetes - system components. - type: object - maxPods: - description: MaxPods is an override for the maximum number of - pods that can run on a worker node instance. - format: int32 - minimum: 0 - type: integer - podsPerCore: - description: PodsPerCore is an override for the number of pods - that can run on a worker node instance based on the number of - cpu cores. This value cannot exceed MaxPods, so, if MaxPods - is a lower value, that value will be used. - format: int32 - minimum: 0 - type: integer - systemReserved: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: SystemReserved contains resources reserved for OS - system daemons and kernel memory. - type: object - type: object - labels: - additionalProperties: - type: string - description: Labels are layered with Requirements and applied to every - node. - type: object - limits: - description: Limits define a set of bounds for provisioning capacity. - properties: - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Resources contains all the allocatable resources - that Karpenter supports for limiting. - type: object - type: object - provider: - description: Provider contains fields specific to your cloudprovider. - type: object - x-kubernetes-preserve-unknown-fields: true - providerRef: - description: ProviderRef is a reference to a dedicated CRD for the - chosen provider, that holds additional configuration options - properties: - apiVersion: - description: API version of the referent - type: string - kind: - description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' - type: string - name: - description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - required: - - name - type: object - requirements: - description: Requirements are layered with Labels and applied to every - node. - items: - description: A node selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If the operator is In - or NotIn, the values array must be non-empty. If the operator - is Exists or DoesNotExist, the values array must be empty. - If the operator is Gt or Lt, the values array must have a - single element, which will be interpreted as an integer. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - startupTaints: - description: StartupTaints are taints that are applied to nodes upon - startup which are expected to be removed automatically within a - short period of time, typically by a DaemonSet that tolerates the - taint. These are commonly used by daemonsets to allow initialization - and enforce startup ordering. StartupTaints are ignored for provisioning - purposes in that pods are not required to tolerate a StartupTaint - in order to have nodes provisioned for them. - items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. - properties: - effect: - description: Required. The effect of the taint on pods that - do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Required. The taint key to be applied to a node. - type: string - timeAdded: - description: TimeAdded represents the time at which the taint - was added. It is only written for NoExecute taints. - format: date-time - type: string - value: - description: The taint value corresponding to the taint key. - type: string - required: - - effect - - key - type: object - type: array - taints: - description: Taints will be applied to every node launched by the - Provisioner. If specified, the provisioner will not provision nodes - for pods that do not have matching tolerations. Additional taints - will be created that match pod tolerations on a per-node basis. - items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. - properties: - effect: - description: Required. The effect of the taint on pods that - do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Required. The taint key to be applied to a node. - type: string - timeAdded: - description: TimeAdded represents the time at which the taint - was added. It is only written for NoExecute taints. - format: date-time - type: string - value: - description: The taint value corresponding to the taint key. - type: string - required: - - effect - - key - type: object - type: array - ttlSecondsAfterEmpty: - description: "TTLSecondsAfterEmpty is the number of seconds the controller - will wait before attempting to delete a node, measured from when - the node is detected to be empty. A Node is considered to be empty - when it does not have pods scheduled to it, excluding daemonsets. - \n Termination due to no utilization is disabled if this field is - not set." - format: int64 - type: integer - ttlSecondsUntilExpired: - description: "TTLSecondsUntilExpired is the number of seconds the - controller will wait before terminating a node, measured from when - the node is created. This is useful to implement features like eventually - consistent node upgrade, memory leak protection, and disruption - testing. \n Termination due to expiration is disabled if this field - is not set." - format: int64 - type: integer - weight: - description: Weight is the priority given to the provisioner during - scheduling. A higher numerical weight indicates that this provisioner - will be ordered ahead of other provisioners with lower weights. - A provisioner with no weight will be treated as if it is a provisioner - with a weight of 0. - format: int32 - maximum: 100 - minimum: 1 - type: integer - type: object - status: - description: ProvisionerStatus defines the observed state of Provisioner - properties: - conditions: - description: Conditions is the set of conditions required for this - provisioner to scale its target, and indicates whether or not those - conditions are met. - items: - description: 'Condition defines a readiness condition for a Knative - resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. We use VolatileTime - in place of metav1.Time to exclude this from creating equality.Semantic - differences (all other things held constant). - type: string - message: - description: A human readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - severity: - description: Severity with which to treat failures of this type - of condition. When this is not specified, it defaults to Error. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - status - - type - type: object - type: array - lastScaleTime: - description: LastScaleTime is the last time the Provisioner scaled - the number of nodes - format: date-time - type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Resources is the list of resources that have been provisioned. - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/pkg/apis/v1alpha5/doc.go b/pkg/apis/v1alpha5/doc.go index ce68d0b7a..02496f2fc 100644 --- a/pkg/apis/v1alpha5/doc.go +++ b/pkg/apis/v1alpha5/doc.go @@ -12,6 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +kubebuilder:skip // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta diff --git a/pkg/apis/v1alpha5/provisioner.go b/pkg/apis/v1alpha5/provisioner.go index b6063d876..507494b13 100644 --- a/pkg/apis/v1alpha5/provisioner.go +++ b/pkg/apis/v1alpha5/provisioner.go @@ -118,8 +118,6 @@ func ProviderAnnotation(p *Provider) map[string]string { return map[string]string{ProviderCompatabilityAnnotationKey: string(raw)} } -// TODO @joinnis: Mark this version as deprecated when v1beta1 APIs are formally released - // Provisioner is the Schema for the Provisioners API // +kubebuilder:object:root=true // +kubebuilder:resource:path=provisioners,scope=Cluster,categories=karpenter diff --git a/pkg/apis/v1beta1/nodeclaim.go b/pkg/apis/v1beta1/nodeclaim.go index 6fcba52b9..055f3cef5 100644 --- a/pkg/apis/v1beta1/nodeclaim.go +++ b/pkg/apis/v1beta1/nodeclaim.go @@ -50,9 +50,6 @@ type NodeClaimSpec struct { // NodeClassRef is a reference to an object that defines provider specific configuration // +required NodeClassRef *NodeClassReference `json:"nodeClassRef"` - // Provider stores CloudProvider-specific details from a conversion from a v1alpha5.Provisioner - // TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter - Provider *Provider `json:"-"` } // ResourceRequirements models the required resources for the NodeClaim to launch @@ -73,10 +70,6 @@ type KubeletConfiguration struct { // Note that not all providers may use all addresses. //+optional ClusterDNS []string `json:"clusterDNS,omitempty"` - // TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter - // ContainerRuntime is the container runtime to be used with your worker nodes. - // +optional - ContainerRuntime *string `json:"-"` // MaxPods is an override for the maximum number of pods that can run on // a worker node instance. // +kubebuilder:validation:Minimum:=0 @@ -170,11 +163,6 @@ type NodeClaim struct { Spec NodeClaimSpec `json:"spec,omitempty"` Status NodeClaimStatus `json:"status,omitempty"` - - // IsMachine tells Karpenter whether the in-memory representation of this object - // is actually referring to a NodeClaim object. This value is not actually part of the v1beta1 public-facing API - // TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter - IsMachine bool `json:"-"` } // NodeClaimList contains a list of NodeClaims diff --git a/pkg/apis/v1beta1/zz_generated.deepcopy.go b/pkg/apis/v1beta1/zz_generated.deepcopy.go index 425c1a9b9..4edf64622 100644 --- a/pkg/apis/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/v1beta1/zz_generated.deepcopy.go @@ -55,11 +55,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.ContainerRuntime != nil { - in, out := &in.ContainerRuntime, &out.ContainerRuntime - *out = new(string) - **out = **in - } if in.MaxPods != nil { in, out := &in.MaxPods, &out.MaxPods *out = new(int32) @@ -272,11 +267,6 @@ func (in *NodeClaimSpec) DeepCopyInto(out *NodeClaimSpec) { *out = new(NodeClassReference) **out = **in } - if in.Provider != nil { - in, out := &in.Provider, &out.Provider - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaimSpec. diff --git a/pkg/cloudprovider/fake/cloudprovider.go b/pkg/cloudprovider/fake/cloudprovider.go index 152a43bc5..e9789548e 100644 --- a/pkg/cloudprovider/fake/cloudprovider.go +++ b/pkg/cloudprovider/fake/cloudprovider.go @@ -135,7 +135,6 @@ func (c *CloudProvider) Create(ctx context.Context, nodeClaim *v1beta1.NodeClaim Capacity: functional.FilterMap(instanceType.Capacity, func(_ v1.ResourceName, v resource.Quantity) bool { return !resources.IsZero(v) }), Allocatable: functional.FilterMap(instanceType.Allocatable(), func(_ v1.ResourceName, v resource.Quantity) bool { return !resources.IsZero(v) }), }, - IsMachine: nodeClaim.IsMachine, } c.CreatedNodeClaims[created.Status.ProviderID] = created return created, nil diff --git a/pkg/controllers/disruption/consolidation.go b/pkg/controllers/disruption/consolidation.go index b8d8986a1..cec55ffd7 100644 --- a/pkg/controllers/disruption/consolidation.go +++ b/pkg/controllers/disruption/consolidation.go @@ -180,9 +180,9 @@ func (c *consolidation) computeConsolidation(ctx context.Context, candidates ... // assumption, that the spot variant will launch. We also need to add a requirement to the node to ensure that if // spot capacity is insufficient we don't replace the node with a more expensive on-demand node. Instead the launch // should fail and we'll just leave the node alone. - ctReq := results.NewNodeClaims[0].Requirements.Get(v1alpha5.LabelCapacityType) - if ctReq.Has(v1alpha5.CapacityTypeSpot) && ctReq.Has(v1alpha5.CapacityTypeOnDemand) { - results.NewNodeClaims[0].Requirements.Add(scheduling.NewRequirement(v1alpha5.LabelCapacityType, v1.NodeSelectorOpIn, v1alpha5.CapacityTypeSpot)) + ctReq := results.NewNodeClaims[0].Requirements.Get(v1beta1.CapacityTypeLabelKey) + if ctReq.Has(v1beta1.CapacityTypeSpot) && ctReq.Has(v1beta1.CapacityTypeOnDemand) { + results.NewNodeClaims[0].Requirements.Add(scheduling.NewRequirement(v1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, v1beta1.CapacityTypeSpot)) } return Command{ diff --git a/pkg/controllers/disruption/suite_test.go b/pkg/controllers/disruption/suite_test.go index 67bf77081..e3c68bc76 100644 --- a/pkg/controllers/disruption/suite_test.go +++ b/pkg/controllers/disruption/suite_test.go @@ -37,7 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" coreapis "github.com/aws/karpenter-core/pkg/apis" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" @@ -114,7 +113,7 @@ var _ = BeforeEach(func() { onDemandInstances = lo.Filter(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType, _ int) bool { for _, o := range i.Offerings.Available() { - if o.CapacityType == v1alpha5.CapacityTypeOnDemand { + if o.CapacityType == v1beta1.CapacityTypeOnDemand { return true } } @@ -279,7 +278,7 @@ var _ = Describe("Disruption Taints", func() { Name: "current-on-demand", Offerings: []cloudprovider.Offering{ { - CapacityType: v1alpha5.CapacityTypeOnDemand, + CapacityType: v1beta1.CapacityTypeOnDemand, Zone: "test-zone-1a", Price: 1.5, Available: false, @@ -290,19 +289,19 @@ var _ = Describe("Disruption Taints", func() { Name: "spot-replacement", Offerings: []cloudprovider.Offering{ { - CapacityType: v1alpha5.CapacityTypeSpot, + CapacityType: v1beta1.CapacityTypeSpot, Zone: "test-zone-1a", Price: 1.0, Available: true, }, { - CapacityType: v1alpha5.CapacityTypeSpot, + CapacityType: v1beta1.CapacityTypeSpot, Zone: "test-zone-1b", Price: 0.2, Available: true, }, { - CapacityType: v1alpha5.CapacityTypeSpot, + CapacityType: v1beta1.CapacityTypeSpot, Zone: "test-zone-1c", Price: 0.4, Available: true, @@ -313,10 +312,10 @@ var _ = Describe("Disruption Taints", func() { nodeClaim, node = test.NodeClaimAndNode(v1beta1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - v1.LabelInstanceTypeStable: currentInstance.Name, - v1alpha5.LabelCapacityType: currentInstance.Offerings[0].CapacityType, - v1.LabelTopologyZone: currentInstance.Offerings[0].Zone, - v1beta1.NodePoolLabelKey: nodePool.Name, + v1.LabelInstanceTypeStable: currentInstance.Name, + v1beta1.CapacityTypeLabelKey: currentInstance.Offerings[0].CapacityType, + v1.LabelTopologyZone: currentInstance.Offerings[0].Zone, + v1beta1.NodePoolLabelKey: nodePool.Name, }, }, Status: v1beta1.NodeClaimStatus{ diff --git a/pkg/controllers/node/termination/controller.go b/pkg/controllers/node/termination/controller.go index c61aa9570..a0c88daea 100644 --- a/pkg/controllers/node/termination/controller.go +++ b/pkg/controllers/node/termination/controller.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/controllers/node/termination/terminator" @@ -126,13 +125,11 @@ func (c *Controller) removeFinalizer(ctx context.Context, n *v1.Node) error { return client.IgnoreNotFound(fmt.Errorf("patching node, %w", err)) } metrics.NodesTerminatedCounter.With(prometheus.Labels{ - metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey], - metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey], + metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey], }).Inc() // We use stored.DeletionTimestamp since the api-server may give back a node after the patch without a deletionTimestamp TerminationSummary.With(prometheus.Labels{ - metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey], - metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey], + metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey], }).Observe(time.Since(stored.DeletionTimestamp.Time).Seconds()) logging.FromContext(ctx).Infof("deleted node") } diff --git a/pkg/controllers/nodeclaim/disruption/drift.go b/pkg/controllers/nodeclaim/disruption/drift.go index a76d82e65..8db171c8e 100644 --- a/pkg/controllers/nodeclaim/disruption/drift.go +++ b/pkg/controllers/nodeclaim/disruption/drift.go @@ -26,7 +26,6 @@ import ( "github.com/samber/lo" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/metrics" @@ -112,24 +111,12 @@ func (d *Drift) isDrifted(ctx context.Context, nodePool *v1beta1.NodePool, nodeC // Eligible fields for static drift are described in the docs // https://karpenter.sh/docs/concepts/deprovisioning/#drift func areStaticFieldsDrifted(nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) cloudprovider.DriftReason { - var ownerHashKey string - if nodeClaim.IsMachine { - ownerHashKey = v1alpha5.ProvisionerHashAnnotationKey - } else { - ownerHashKey = v1beta1.NodePoolHashAnnotationKey - } - nodePoolHash, foundHashNodePool := nodePool.Annotations[ownerHashKey] - nodeClaimHash, foundHashNodeClaim := nodeClaim.Annotations[ownerHashKey] + nodePoolHash, foundHashNodePool := nodePool.Annotations[v1beta1.NodePoolHashAnnotationKey] + nodeClaimHash, foundHashNodeClaim := nodeClaim.Annotations[v1beta1.NodePoolHashAnnotationKey] if !foundHashNodePool || !foundHashNodeClaim { return "" } - if nodePoolHash != nodeClaimHash { - if nodeClaim.IsMachine { - return ProvisionerDrifted - } - return NodePoolDrifted - } - return "" + return lo.Ternary(nodePoolHash != nodeClaimHash, NodePoolDrifted, "") } func areRequirementsDrifted(nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) cloudprovider.DriftReason { diff --git a/pkg/controllers/nodeclaim/garbagecollection/controller.go b/pkg/controllers/nodeclaim/garbagecollection/controller.go index 72850d9ed..d52d27545 100644 --- a/pkg/controllers/nodeclaim/garbagecollection/controller.go +++ b/pkg/controllers/nodeclaim/garbagecollection/controller.go @@ -86,7 +86,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc "provider-id", nodeClaims[i].Status.ProviderID, "nodepool", nodeClaims[i].Labels[v1beta1.NodePoolLabelKey], ). - Debugf("garbage collecting %s with no cloudprovider representation", lo.Ternary(nodeClaims[i].IsMachine, "machine", "nodeclaim")) + Debugf("garbage collecting nodeclaim with no cloudprovider representation") nodeclaimutil.TerminatedCounter(nodeClaims[i], "garbage_collected").Inc() }) if err = multierr.Combine(errs...); err != nil { diff --git a/pkg/controllers/nodeclaim/lifecycle/initialization.go b/pkg/controllers/nodeclaim/lifecycle/initialization.go index 7bce11834..2c76e4c0a 100644 --- a/pkg/controllers/nodeclaim/lifecycle/initialization.go +++ b/pkg/controllers/nodeclaim/lifecycle/initialization.go @@ -79,7 +79,7 @@ func (i *Initialization) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeC return reconcile.Result{}, err } } - logging.FromContext(ctx).Infof("initialized %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim")) + logging.FromContext(ctx).Infof("initialized nodeclaim") nodeClaim.StatusConditions().MarkTrue(v1beta1.Initialized) nodeclaimutil.InitializedCounter(nodeClaim).Inc() return reconcile.Result{}, nil diff --git a/pkg/controllers/nodeclaim/lifecycle/launch.go b/pkg/controllers/nodeclaim/lifecycle/launch.go index 649db0a79..09664728c 100644 --- a/pkg/controllers/nodeclaim/lifecycle/launch.go +++ b/pkg/controllers/nodeclaim/lifecycle/launch.go @@ -25,7 +25,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/events" @@ -51,14 +50,10 @@ func (l *Launch) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (r // One of the following scenarios can happen with a NodeClaim that isn't marked as launched: // 1. It was already launched by the CloudProvider but the client-go cache wasn't updated quickly enough or // patching failed on the status. In this case, we use the in-memory cached value for the created NodeClaim. - // 2. It is a "linked" NodeClaim, which implies that the CloudProvider NodeClaim already exists for the NodeClaim CR, but we - // need to grab info from the CloudProvider to get details on the NodeClaim. - // 3. It is a standard NodeClaim launch where we should call CloudProvider Create() and fill in details of the launched + // 2. It is a standard NodeClaim launch where we should call CloudProvider Create() and fill in details of the launched // NodeClaim into the NodeClaim CR. if ret, ok := l.cache.Get(string(nodeClaim.UID)); ok { created = ret.(*v1beta1.NodeClaim) - } else if _, ok := nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]; ok { - created, err = l.linkNodeClaim(ctx, nodeClaim) } else { created, err = l.launchNodeClaim(ctx, nodeClaim) } @@ -77,30 +72,6 @@ func (l *Launch) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (r return reconcile.Result{}, nil } -func (l *Launch) linkNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey])) - created, err := l.cloudProvider.Get(ctx, nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]) - if err != nil { - if !cloudprovider.IsNodeClaimNotFoundError(err) { - nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LinkFailed", truncateMessage(err.Error())) - return nil, fmt.Errorf("linking, %w", err) - } - if err = nodeclaimutil.Delete(ctx, l.kubeClient, nodeClaim); err != nil { - return nil, client.IgnoreNotFound(err) - } - logging.FromContext(ctx).Debugf("garbage collected with no cloudprovider representation") - nodeclaimutil.TerminatedCounter(nodeClaim, "garbage_collected").Inc() - return nil, nil - } - logging.FromContext(ctx).With( - "provider-id", created.Status.ProviderID, - "instance-type", created.Labels[v1.LabelInstanceTypeStable], - "zone", created.Labels[v1.LabelTopologyZone], - "capacity-type", created.Labels[v1alpha5.LabelCapacityType], - "allocatable", created.Status.Allocatable).Infof("linked %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim")) - return created, nil -} - func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) { created, err := l.cloudProvider.Create(ctx, nodeClaim) if err != nil { @@ -116,10 +87,10 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla case cloudprovider.IsNodeClassNotReadyError(err): l.recorder.Publish(NodeClassNotReadyEvent(nodeClaim, err)) nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LaunchFailed", truncateMessage(err.Error())) - return nil, fmt.Errorf("launching %s, %w", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"), err) + return nil, fmt.Errorf("launching nodeclaim, %w", err) default: nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LaunchFailed", truncateMessage(err.Error())) - return nil, fmt.Errorf("launching %s, %w", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"), err) + return nil, fmt.Errorf("launching nodeclaim, %w", err) } } logging.FromContext(ctx).With( @@ -127,7 +98,7 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla "instance-type", created.Labels[v1.LabelInstanceTypeStable], "zone", created.Labels[v1.LabelTopologyZone], "capacity-type", created.Labels[v1beta1.CapacityTypeLabelKey], - "allocatable", created.Status.Allocatable).Infof("launched %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim")) + "allocatable", created.Status.Allocatable).Infof("launched nodeclaim") return created, nil } diff --git a/pkg/controllers/nodeclaim/lifecycle/registration.go b/pkg/controllers/nodeclaim/lifecycle/registration.go index 4ffae2a4b..7b57c4be0 100644 --- a/pkg/controllers/nodeclaim/lifecycle/registration.go +++ b/pkg/controllers/nodeclaim/lifecycle/registration.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/metrics" "github.com/aws/karpenter-core/pkg/scheduling" @@ -40,9 +39,7 @@ type Registration struct { func (r *Registration) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { if nodeClaim.StatusConditions().GetCondition(v1beta1.Registered).IsTrue() { - // TODO @joinnis: Remove the back-propagation of this label onto the Node once all Nodes are guaranteed to have this label - // We can assume that all nodes will have this label and no back-propagation will be required once we hit v1 - return reconcile.Result{}, r.backPropagateRegistrationLabel(ctx, nodeClaim) + return reconcile.Result{}, nil } if !nodeClaim.StatusConditions().GetCondition(v1beta1.Launched).IsTrue() { nodeClaim.StatusConditions().MarkFalse(v1beta1.Registered, "NotLaunched", "Node not launched") @@ -66,18 +63,14 @@ func (r *Registration) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeCla if err = r.syncNode(ctx, nodeClaim, node); err != nil { return reconcile.Result{}, fmt.Errorf("syncing node, %w", err) } - logging.FromContext(ctx).Debugf("registered %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim")) + logging.FromContext(ctx).Debugf("registered nodeclaim") nodeClaim.StatusConditions().MarkTrue(v1beta1.Registered) nodeClaim.Status.NodeName = node.Name nodeclaimutil.RegisteredCounter(nodeClaim).Inc() - // If the NodeClaim is linked, then the node already existed, so we don't mark it as created - if _, ok := nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]; !ok { - metrics.NodesCreatedCounter.With(prometheus.Labels{ - metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], - metrics.ProvisionerLabel: nodeClaim.Labels[v1alpha5.ProvisionerNameLabelKey], - }).Inc() - } + metrics.NodesCreatedCounter.With(prometheus.Labels{ + metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], + }).Inc() return reconcile.Result{}, nil } @@ -86,15 +79,11 @@ func (r *Registration) syncNode(ctx context.Context, nodeClaim *v1beta1.NodeClai controllerutil.AddFinalizer(node, v1beta1.TerminationFinalizer) node = nodeclaimutil.UpdateNodeOwnerReferences(nodeClaim, node) - // If the NodeClaim isn't registered as linked, then sync it - // This prevents us from messing with nodes that already exist and are scheduled - if _, ok := nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]; !ok { - node.Labels = lo.Assign(node.Labels, nodeClaim.Labels) - node.Annotations = lo.Assign(node.Annotations, nodeClaim.Annotations) - // Sync all taints inside NodeClaim into the Node taints - node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(nodeClaim.Spec.Taints) - node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(nodeClaim.Spec.StartupTaints) - } + node.Labels = lo.Assign(node.Labels, nodeClaim.Labels) + node.Annotations = lo.Assign(node.Annotations, nodeClaim.Annotations) + // Sync all taints inside NodeClaim into the Node taints + node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(nodeClaim.Spec.Taints) + node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(nodeClaim.Spec.StartupTaints) node.Labels = lo.Assign(node.Labels, nodeClaim.Labels, map[string]string{ v1beta1.NodeRegisteredLabelKey: "true", }) @@ -105,22 +94,3 @@ func (r *Registration) syncNode(ctx context.Context, nodeClaim *v1beta1.NodeClai } return nil } - -// backPropagateRegistrationLabel ports the `karpenter.sh/registered` label onto nodes that are registered by the Machine -// but don't have this label on the Node yet -func (r *Registration) backPropagateRegistrationLabel(ctx context.Context, nodeClaim *v1beta1.NodeClaim) error { - node, err := nodeclaimutil.NodeForNodeClaim(ctx, r.kubeClient, nodeClaim) - stored := node.DeepCopy() - if err != nil { - return nodeclaimutil.IgnoreDuplicateNodeError(nodeclaimutil.IgnoreNodeNotFoundError(err)) - } - node.Labels = lo.Assign(node.Labels, map[string]string{ - v1alpha5.LabelNodeRegistered: "true", - }) - if !equality.Semantic.DeepEqual(stored, node) { - if err := r.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil { - return fmt.Errorf("syncing node registration label, %w", err) - } - } - return nil -} diff --git a/pkg/controllers/nodeclaim/termination/controller.go b/pkg/controllers/nodeclaim/termination/controller.go index f4e6dae0c..fe7ac5487 100644 --- a/pkg/controllers/nodeclaim/termination/controller.go +++ b/pkg/controllers/nodeclaim/termination/controller.go @@ -19,7 +19,6 @@ import ( "fmt" "time" - "github.com/samber/lo" "golang.org/x/time/rate" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -95,7 +94,7 @@ func (c *Controller) Finalize(ctx context.Context, nodeClaim *v1beta1.NodeClaim) if err = nodeclaimutil.Patch(ctx, c.kubeClient, stored, nodeClaim); err != nil { return reconcile.Result{}, client.IgnoreNotFound(fmt.Errorf("removing termination finalizer, %w", err)) } - logging.FromContext(ctx).Infof("deleted %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim")) + logging.FromContext(ctx).Infof("deleted nodeclaim") } return reconcile.Result{}, nil } diff --git a/pkg/controllers/nodeclaim/termination/suite_test.go b/pkg/controllers/nodeclaim/termination/suite_test.go index 7a373a5df..47b207eb5 100644 --- a/pkg/controllers/nodeclaim/termination/suite_test.go +++ b/pkg/controllers/nodeclaim/termination/suite_test.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/karpenter-core/pkg/apis" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" @@ -92,10 +91,10 @@ var _ = Describe("Termination", func() { nodeClaim = test.NodeClaim(v1beta1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - v1alpha5.ProvisionerNameLabelKey: nodePool.Name, + v1beta1.NodePoolLabelKey: nodePool.Name, }, Finalizers: []string{ - v1alpha5.TerminationFinalizer, + v1beta1.TerminationFinalizer, }, }, Spec: v1beta1.NodeClaimSpec{ @@ -160,22 +159,6 @@ var _ = Describe("Termination", func() { _, err = cloudProvider.Get(ctx, nodeClaim.Status.ProviderID) Expect(cloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) }) - It("should delete the Instance if the NodeClaim is linked but doesn't have its providerID resolved yet", func() { - node := test.NodeClaimLinkedNode(nodeClaim) - - nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{v1alpha5.MachineLinkedAnnotationKey: nodeClaim.Status.ProviderID}) - nodeClaim.Status.ProviderID = "" - ExpectApplied(ctx, env.Client, nodePool, nodeClaim, node) - - // Expect the nodeClaim to be gone - Expect(env.Client.Delete(ctx, nodeClaim)).To(Succeed()) - ExpectReconcileSucceeded(ctx, nodeClaimTerminationController, client.ObjectKeyFromObject(nodeClaim)) // triggers the nodeClaim deletion - ExpectNotFound(ctx, env.Client, nodeClaim) - - // Expect the nodeClaim to be gone from the cloudprovider - _, err := cloudProvider.Get(ctx, nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]) - Expect(cloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) - }) It("should not delete the NodeClaim until all the Nodes are removed", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClaim) ExpectReconcileSucceeded(ctx, nodeClaimController, client.ObjectKeyFromObject(nodeClaim)) diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 24777fdb0..63734c9cf 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -35,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/scheduling" @@ -419,17 +418,10 @@ func (p *Provisioner) Validate(ctx context.Context, pod *v1.Pod) error { // validateKarpenterManagedLabelCanExist provides a more clear error message in the event of scheduling a pod that specifically doesn't // want to run on a Karpenter node (e.g. a Karpenter controller replica). func validateKarpenterManagedLabelCanExist(p *v1.Pod) error { - hasProvisionerNameLabel, hasNodePoolLabel := false, false for _, req := range scheduling.NewPodRequirements(p) { - if req.Key == v1alpha5.ProvisionerNameLabelKey && req.Operator() == v1.NodeSelectorOpDoesNotExist { - hasProvisionerNameLabel = true - } if req.Key == v1beta1.NodePoolLabelKey && req.Operator() == v1.NodeSelectorOpDoesNotExist { - hasNodePoolLabel = true - } - if hasProvisionerNameLabel && hasNodePoolLabel { - return fmt.Errorf("configured to not run on a Karpenter provisioned node via %s %s and %s %s requirements", - v1alpha5.ProvisionerNameLabelKey, v1.NodeSelectorOpDoesNotExist, v1beta1.NodePoolLabelKey, v1.NodeSelectorOpDoesNotExist) + return fmt.Errorf("configured to not run on a Karpenter provisioned node via the %s %s requirement", + v1beta1.NodePoolLabelKey, v1.NodeSelectorOpDoesNotExist) } } return nil @@ -488,11 +480,7 @@ func validateNodeSelectorTerm(term v1.NodeSelectorTerm) (errs error) { } if term.MatchExpressions != nil { for _, requirement := range term.MatchExpressions { - alphaErr := v1alpha5.ValidateRequirement(requirement) - betaErr := v1beta1.ValidateRequirement(requirement) - if alphaErr != nil && betaErr != nil { - errs = multierr.Append(errs, betaErr) - } + errs = multierr.Append(errs, v1beta1.ValidateRequirement(requirement)) } } return errs diff --git a/pkg/controllers/provisioning/scheduling/events.go b/pkg/controllers/provisioning/scheduling/events.go index a1c88d0b0..6f7b5c2a6 100644 --- a/pkg/controllers/provisioning/scheduling/events.go +++ b/pkg/controllers/provisioning/scheduling/events.go @@ -19,7 +19,6 @@ import ( "strings" "time" - "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/flowcontrol" @@ -33,7 +32,7 @@ var PodNominationRateLimiter = flowcontrol.NewTokenBucketRateLimiter(5, 10) func NominatePodEvent(pod *v1.Pod, node *v1.Node, nodeClaim *v1beta1.NodeClaim) events.Event { var info []string if nodeClaim != nil { - info = append(info, fmt.Sprintf("%s/%s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"), nodeClaim.GetName())) + info = append(info, fmt.Sprintf("nodeclaim/%s", nodeClaim.GetName())) } if node != nil { info = append(info, fmt.Sprintf("node/%s", node.Name)) diff --git a/pkg/controllers/provisioning/scheduling/nodeclaim.go b/pkg/controllers/provisioning/scheduling/nodeclaim.go index 6e0798410..4c27f3861 100644 --- a/pkg/controllers/provisioning/scheduling/nodeclaim.go +++ b/pkg/controllers/provisioning/scheduling/nodeclaim.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" + "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/scheduling" "github.com/aws/karpenter-core/pkg/utils/resources" @@ -268,7 +268,7 @@ func fits(instanceType *cloudprovider.InstanceType, requests v1.ResourceList) bo func hasOffering(instanceType *cloudprovider.InstanceType, requirements scheduling.Requirements) bool { for _, offering := range instanceType.Offerings.Available() { if (!requirements.Has(v1.LabelTopologyZone) || requirements.Get(v1.LabelTopologyZone).Has(offering.Zone)) && - (!requirements.Has(v1alpha5.LabelCapacityType) || requirements.Get(v1alpha5.LabelCapacityType).Has(offering.CapacityType)) { + (!requirements.Has(v1beta1.CapacityTypeLabelKey) || requirements.Get(v1beta1.CapacityTypeLabelKey).Has(offering.CapacityType)) { return true } } diff --git a/pkg/controllers/provisioning/scheduling/suite_test.go b/pkg/controllers/provisioning/scheduling/suite_test.go index 8c2d0411e..862e136ca 100644 --- a/pkg/controllers/provisioning/scheduling/suite_test.go +++ b/pkg/controllers/provisioning/scheduling/suite_test.go @@ -37,7 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/karpenter-core/pkg/apis" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" @@ -3300,7 +3299,7 @@ func ExpectMaxSkew(ctx context.Context, c client.Client, namespace string, const skew[key]++ } } - if constraint.TopologyKey == v1alpha5.LabelCapacityType { + if constraint.TopologyKey == v1beta1.CapacityTypeLabelKey { if key, ok := node.Labels[constraint.TopologyKey]; ok { skew[key]++ } diff --git a/pkg/controllers/state/cluster.go b/pkg/controllers/state/cluster.go index af40c2e72..d25ce6eff 100644 --- a/pkg/controllers/state/cluster.go +++ b/pkg/controllers/state/cluster.go @@ -36,7 +36,6 @@ import ( "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/scheduling" @@ -239,7 +238,7 @@ func (c *Cluster) UpdateNode(ctx context.Context, node *v1.Node) error { c.mu.Lock() defer c.mu.Unlock() - managed := node.Labels[v1alpha5.ProvisionerNameLabelKey] != "" || node.Labels[v1beta1.NodePoolLabelKey] != "" + managed := node.Labels[v1beta1.NodePoolLabelKey] != "" initialized := node.Labels[v1beta1.NodeInitializedLabelKey] != "" if node.Spec.ProviderID == "" { // If we know that we own this node, we shouldn't allow the providerID to be empty diff --git a/pkg/controllers/state/statenode.go b/pkg/controllers/state/statenode.go index 7fec2628a..f89dec085 100644 --- a/pkg/controllers/state/statenode.go +++ b/pkg/controllers/state/statenode.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/operator/options" "github.com/aws/karpenter-core/pkg/scheduling" @@ -123,9 +122,7 @@ func (in *StateNode) Name() string { if in.NodeClaim == nil { return in.Node.Name } - // TODO @joinnis: The !in.Initialized() check can be removed when we can assume that all nodes have the v1alpha5.NodeRegisteredLabel on them - // We can assume that all nodes will have this label and no back-propagation will be required once we hit v1 - if !in.Registered() && !in.Initialized() { + if !in.Registered() { return in.NodeClaim.Name } return in.Node.Name @@ -165,9 +162,7 @@ func (in *StateNode) Annotations() map[string]string { if in.NodeClaim == nil { return in.Node.Annotations } - // TODO @joinnis: The !in.Initialized() check can be removed when we can assume that all nodes have the v1alpha5.NodeRegisteredLabel on them - // We can assume that all nodes will have this label and no back-propagation will be required once we hit v1 - if !in.Registered() && !in.Initialized() { + if !in.Registered() { return in.NodeClaim.Annotations } return in.Node.Annotations @@ -187,9 +182,7 @@ func (in *StateNode) Labels() map[string]string { if in.NodeClaim == nil { return in.Node.Labels } - // TODO @joinnis: The !in.Initialized() check can be removed when we can assume that all nodes have the v1alpha5.NodeRegisteredLabel on them - // We can assume that all nodes will have this label and no back-propagation will be required once we hit v1 - if !in.Registered() && !in.Initialized() { + if !in.Registered() { return in.NodeClaim.Labels } return in.Node.Labels @@ -209,9 +202,7 @@ func (in *StateNode) Taints() []v1.Taint { } var taints []v1.Taint - // TODO @joinnis: The !in.Initialized() check can be removed when we can assume that all nodes have the v1alpha5.NodeRegisteredLabel on them - // We can assume that all nodes will have this label and no back-propagation will be required once we hit v1 - if (!in.Registered() && !in.Initialized() && in.NodeClaim != nil) || in.Node == nil { + if (!in.Registered() && in.NodeClaim != nil) || in.Node == nil { taints = in.NodeClaim.Spec.Taints } else { taints = in.Node.Spec.Taints @@ -336,7 +327,6 @@ func (in *StateNode) MarkedForDeletion() bool { // 1. The Node has MarkedForDeletion set // 2. The Node has a NodeClaim counterpart and is actively deleting // 3. The Node has no NodeClaim counterpart and is actively deleting - // TODO remove check for machine after v1alpha5 APIs are dropped. return in.markedForDeletion || (in.NodeClaim != nil && !in.NodeClaim.DeletionTimestamp.IsZero()) || (in.Node != nil && in.NodeClaim == nil && !in.Node.DeletionTimestamp.IsZero()) @@ -352,7 +342,6 @@ func (in *StateNode) Nominated() bool { func (in *StateNode) Managed() bool { return in.NodeClaim != nil || - (in.Node != nil && in.Node.Labels[v1alpha5.ProvisionerNameLabelKey] != "") || (in.Node != nil && in.Node.Labels[v1beta1.NodePoolLabelKey] != "") } diff --git a/pkg/controllers/state/suite_test.go b/pkg/controllers/state/suite_test.go index 5d8a7d787..29d43a1d9 100644 --- a/pkg/controllers/state/suite_test.go +++ b/pkg/controllers/state/suite_test.go @@ -806,7 +806,7 @@ var _ = Describe("Node Resource Level", func() { v1beta1.NodePoolLabelKey: nodePool.Name, v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name, }, - Finalizers: []string{v1alpha5.TerminationFinalizer}, + Finalizers: []string{v1beta1.TerminationFinalizer}, }, Allocatable: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: resource.MustParse("4"), @@ -827,7 +827,7 @@ var _ = Describe("Node Resource Level", func() { It("should mark node for deletion when nodeclaim is deleted", func() { nodeClaim := test.NodeClaim(v1beta1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{v1alpha5.TerminationFinalizer}, + Finalizers: []string{v1beta1.TerminationFinalizer}, }, Spec: v1beta1.NodeClaimSpec{ Requirements: []v1.NodeSelectorRequirement{ @@ -866,7 +866,7 @@ var _ = Describe("Node Resource Level", func() { v1beta1.NodePoolLabelKey: nodePool.Name, v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name, }, - Finalizers: []string{v1alpha5.TerminationFinalizer}, + Finalizers: []string{v1beta1.TerminationFinalizer}, }, ProviderID: nodeClaim.Status.ProviderID, }) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 3fd4d7f1c..ea6fa4cc8 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -21,7 +21,6 @@ import ( const ( NodeSubsystem = "nodes" - machineSubsystem = "machines" nodeClaimSubsystem = "nodeclaims" ) @@ -116,7 +115,6 @@ var ( }, []string{ NodePoolLabel, - ProvisionerLabel, }, ) NodesTerminatedCounter = prometheus.NewCounterVec( @@ -128,89 +126,6 @@ var ( }, []string{ NodePoolLabel, - ProvisionerLabel, - }, - ) - // TODO @joinnis: Remove these metrics when dropping v1alpha5 and no longer supporting Machines - MachinesCreatedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "created", - Help: "Number of machines created in total by Karpenter. Labeled by reason the machine was created and the owning provisioner.", - }, - []string{ - ReasonLabel, - ProvisionerLabel, - }, - ) - MachinesTerminatedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "terminated", - Help: "Number of machines terminated in total by Karpenter. Labeled by reason the machine was terminated and the owning provisioner.", - }, - []string{ - ReasonLabel, - ProvisionerLabel, - }, - ) - MachinesLaunchedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "launched", - Help: "Number of machines launched in total by Karpenter. Labeled by the owning provisioner.", - }, - []string{ - ProvisionerLabel, - }, - ) - MachinesRegisteredCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "registered", - Help: "Number of machines registered in total by Karpenter. Labeled by the owning provisioner.", - }, - []string{ - ProvisionerLabel, - }, - ) - MachinesInitializedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "initialized", - Help: "Number of machines initialized in total by Karpenter. Labeled by the owning provisioner.", - }, - []string{ - ProvisionerLabel, - }, - ) - MachinesDisruptedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "disrupted", - Help: "Number of machines disrupted in total by Karpenter. Labeled by disruption type of the machine and the owning provisioner.", - }, - []string{ - TypeLabel, - ProvisionerLabel, - }, - ) - MachinesDriftedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: machineSubsystem, - Name: "drifted", - Help: "Number of machine drifted reasons in total by Karpenter. Labeled by drift type of the machine and the owning provisioner..", - }, - []string{ - TypeLabel, - ProvisionerLabel, }, ) ) @@ -218,6 +133,5 @@ var ( func init() { crmetrics.Registry.MustRegister(NodeClaimsCreatedCounter, NodeClaimsTerminatedCounter, NodeClaimsLaunchedCounter, NodeClaimsRegisteredCounter, NodeClaimsInitializedCounter, NodeClaimsDisruptedCounter, NodeClaimsDriftedCounter, - MachinesCreatedCounter, MachinesTerminatedCounter, MachinesLaunchedCounter, MachinesRegisteredCounter, MachinesInitializedCounter, - MachinesDisruptedCounter, MachinesDriftedCounter, NodesCreatedCounter, NodesTerminatedCounter) + NodesCreatedCounter, NodesTerminatedCounter) } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index e72a7c042..31ac01693 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -46,7 +46,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "github.com/aws/karpenter-core/pkg/apis" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" @@ -179,9 +178,6 @@ func NewOperator() (context.Context, *Operator) { lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Node{}, "spec.providerID", func(o client.Object) []string { return []string{o.(*v1.Node).Spec.ProviderID} }), "failed to setup node provider id indexer") - lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1alpha5.Machine{}, "status.providerID", func(o client.Object) []string { - return []string{o.(*v1alpha5.Machine).Status.ProviderID} - }), "failed to setup machine provider id indexer") lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1beta1.NodeClaim{}, "status.providerID", func(o client.Object) []string { return []string{o.(*v1beta1.NodeClaim).Status.ProviderID} }), "failed to setup nodeclaim provider id indexer") diff --git a/pkg/scheduling/requirement.go b/pkg/scheduling/requirement.go index 3e1ebaa51..1f2bbef49 100644 --- a/pkg/scheduling/requirement.go +++ b/pkg/scheduling/requirement.go @@ -24,7 +24,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" ) @@ -38,9 +37,6 @@ type Requirement struct { } func NewRequirement(key string, operator v1.NodeSelectorOperator, values ...string) *Requirement { - if normalized, ok := v1alpha5.NormalizedLabels[key]; ok { - key = normalized - } if normalized, ok := v1beta1.NormalizedLabels[key]; ok { key = normalized } diff --git a/pkg/test/expectations/expectations.go b/pkg/test/expectations/expectations.go index 99afc11b0..8a712d46d 100644 --- a/pkg/test/expectations/expectations.go +++ b/pkg/test/expectations/expectations.go @@ -46,7 +46,6 @@ import ( crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/controllers/nodeclaim/lifecycle" @@ -68,7 +67,6 @@ const ( type Bindings map[*v1.Pod]*Binding type Binding struct { - Machine *v1alpha5.Machine NodeClaim *v1beta1.NodeClaim Node *v1.Node } @@ -188,7 +186,7 @@ func ExpectCleanedUp(ctx context.Context, c client.Client) { wg := sync.WaitGroup{} namespaces := &v1.NamespaceList{} Expect(c.List(ctx, namespaces)).To(Succeed()) - ExpectFinalizersRemovedFromList(ctx, c, &v1.NodeList{}, &v1alpha5.MachineList{}, &v1beta1.NodeClaimList{}, &v1.PersistentVolumeClaimList{}) + ExpectFinalizersRemovedFromList(ctx, c, &v1.NodeList{}, &v1beta1.NodeClaimList{}, &v1.PersistentVolumeClaimList{}) for _, object := range []client.Object{ &v1.Pod{}, &v1.Node{}, @@ -198,8 +196,6 @@ func ExpectCleanedUp(ctx context.Context, c client.Client) { &v1.PersistentVolumeClaim{}, &v1.PersistentVolume{}, &storagev1.StorageClass{}, - &v1alpha5.Provisioner{}, - &v1alpha5.Machine{}, &v1beta1.NodePool{}, &v1beta1.NodeClaim{}, } { @@ -309,7 +305,7 @@ func ExpectNodeClaimDeployedNoNode(ctx context.Context, c client.Client, cluster } Expect(err).To(Succeed()) - // Make the machine ready in the status conditions + // Make the nodeclaim ready in the status conditions nc = lifecycle.PopulateNodeClaimDetails(nc, resolved) nc.StatusConditions().MarkTrue(v1beta1.Launched) ExpectApplied(ctx, c, nc) @@ -325,7 +321,7 @@ func ExpectNodeClaimDeployed(ctx context.Context, c client.Client, cluster *stat } nc.StatusConditions().MarkTrue(v1beta1.Registered) - // Mock the machine launch and node joining at the apiserver + // Mock the nodeclaim launch and node joining at the apiserver node := test.NodeClaimLinkedNode(nc) node.Labels = lo.Assign(node.Labels, map[string]string{v1beta1.NodeRegisteredLabelKey: "true"}) ExpectApplied(ctx, c, nc, node) @@ -521,13 +517,6 @@ func ExpectNodes(ctx context.Context, c client.Client) []*v1.Node { return lo.ToSlicePtr(nodeList.Items) } -func ExpectMachines(ctx context.Context, c client.Client) []*v1alpha5.Machine { - GinkgoHelper() - machineList := &v1alpha5.MachineList{} - Expect(c.List(ctx, machineList)).To(Succeed()) - return lo.ToSlicePtr(machineList.Items) -} - func ExpectNodeClaims(ctx context.Context, c client.Client) []*v1beta1.NodeClaim { GinkgoHelper() nodeClaims := &v1beta1.NodeClaimList{} @@ -569,7 +558,7 @@ func ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx context.Context, ExpectMakeNodesInitialized(ctx, c, nodes...) ExpectMakeNodeClaimsInitialized(ctx, c, nodeClaims...) - // Inform cluster state about node and machine readiness + // Inform cluster state about node and nodeclaim readiness for _, n := range nodes { ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(n)) } diff --git a/pkg/utils/nodeclaim/nodeclaim.go b/pkg/utils/nodeclaim/nodeclaim.go index c7d8c696f..c29c3ee25 100644 --- a/pkg/utils/nodeclaim/nodeclaim.go +++ b/pkg/utils/nodeclaim/nodeclaim.go @@ -29,7 +29,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/apis/v1beta1" "github.com/aws/karpenter-core/pkg/metrics" "github.com/aws/karpenter-core/pkg/scheduling" @@ -171,27 +170,6 @@ func AllNodesForNodeClaim(ctx context.Context, c client.Client, nodeClaim *v1bet return lo.ToSlicePtr(nodeList.Items), nil } -func NewKubeletConfiguration(kc *v1alpha5.KubeletConfiguration) *v1beta1.KubeletConfiguration { - if kc == nil { - return nil - } - return &v1beta1.KubeletConfiguration{ - ClusterDNS: kc.ClusterDNS, - ContainerRuntime: kc.ContainerRuntime, - MaxPods: kc.MaxPods, - PodsPerCore: kc.PodsPerCore, - SystemReserved: kc.SystemReserved, - KubeReserved: kc.KubeReserved, - EvictionHard: kc.EvictionHard, - EvictionSoft: kc.EvictionSoft, - EvictionSoftGracePeriod: kc.EvictionSoftGracePeriod, - EvictionMaxPodGracePeriod: kc.EvictionMaxPodGracePeriod, - ImageGCHighThresholdPercent: kc.ImageGCHighThresholdPercent, - ImageGCLowThresholdPercent: kc.ImageGCLowThresholdPercent, - CPUCFSQuota: kc.CPUCFSQuota, - } -} - // NewFromNode converts a node into a pseudo-NodeClaim using known values from the node // Deprecated: This NodeClaim generator function can be removed when v1beta1 migration has completed. func NewFromNode(node *v1.Node) *v1beta1.NodeClaim { @@ -200,7 +178,7 @@ func NewFromNode(node *v1.Node) *v1beta1.NodeClaim { Name: node.Name, Annotations: node.Annotations, Labels: node.Labels, - Finalizers: []string{v1alpha5.TerminationFinalizer}, + Finalizers: []string{v1beta1.TerminationFinalizer}, }, Spec: v1beta1.NodeClaimSpec{ Taints: node.Spec.Taints, diff --git a/pkg/utils/nodepool/nodepool.go b/pkg/utils/nodepool/nodepool.go index 1817d5f59..45cb73605 100644 --- a/pkg/utils/nodepool/nodepool.go +++ b/pkg/utils/nodepool/nodepool.go @@ -43,7 +43,6 @@ func New(provisioner *v1alpha5.Provisioner) *v1beta1.NodePool { Requirements: provisioner.Spec.Requirements, Kubelet: NewKubeletConfiguration(provisioner.Spec.KubeletConfiguration), NodeClassRef: NewNodeClassReference(provisioner.Spec.ProviderRef), - Provider: provisioner.Spec.Provider, }, }, Weight: provisioner.Spec.Weight, @@ -73,7 +72,6 @@ func NewKubeletConfiguration(kc *v1alpha5.KubeletConfiguration) *v1beta1.Kubelet } return &v1beta1.KubeletConfiguration{ ClusterDNS: kc.ClusterDNS, - ContainerRuntime: kc.ContainerRuntime, MaxPods: kc.MaxPods, PodsPerCore: kc.PodsPerCore, SystemReserved: kc.SystemReserved, diff --git a/pkg/utils/nodepool/suite_test.go b/pkg/utils/nodepool/suite_test.go index b8e7f0d46..c547ba2b4 100644 --- a/pkg/utils/nodepool/suite_test.go +++ b/pkg/utils/nodepool/suite_test.go @@ -16,7 +16,6 @@ package nodepool_test import ( "context" - "encoding/json" "testing" "time" @@ -26,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "knative.dev/pkg/apis" . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" @@ -205,7 +203,6 @@ var _ = Describe("NodePoolUtils", func() { Expect(nodePool.Spec.Template.Spec.Requirements).To(Equal(provisioner.Spec.Requirements)) Expect(nodePool.Spec.Template.Spec.Kubelet.ClusterDNS).To(Equal(provisioner.Spec.KubeletConfiguration.ClusterDNS)) - Expect(nodePool.Spec.Template.Spec.Kubelet.ContainerRuntime).To(Equal(provisioner.Spec.KubeletConfiguration.ContainerRuntime)) Expect(nodePool.Spec.Template.Spec.Kubelet.MaxPods).To(Equal(provisioner.Spec.KubeletConfiguration.MaxPods)) Expect(nodePool.Spec.Template.Spec.Kubelet.PodsPerCore).To(Equal(provisioner.Spec.KubeletConfiguration.PodsPerCore)) Expect(nodePool.Spec.Template.Spec.Kubelet.SystemReserved).To(Equal(provisioner.Spec.KubeletConfiguration.SystemReserved)) @@ -231,16 +228,6 @@ var _ = Describe("NodePoolUtils", func() { ExpectResources(nodePool.Status.Resources, provisioner.Status.Resources) }) - It("should convert a Provisioner to a NodePool (with Provider)", func() { - provisioner.Spec.Provider = &runtime.RawExtension{Raw: lo.Must(json.Marshal(map[string]string{ - "test-key": "test-value", - "test-key2": "test-value2", - }))} - provisioner.Spec.ProviderRef = nil - - nodePool := nodepoolutil.New(provisioner) - Expect(nodePool.Spec.Template.Spec.Provider).To(Equal(provisioner.Spec.Provider)) - }) It("should patch the status on a NodePool", func() { nodePool := test.NodePool(v1beta1.NodePool{ Spec: v1beta1.NodePoolSpec{ diff --git a/pkg/utils/provisioner/provisioner.go b/pkg/utils/provisioner/provisioner.go deleted file mode 100644 index 22e0da8ec..000000000 --- a/pkg/utils/provisioner/provisioner.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provisioner - -import ( - "github.com/samber/lo" - v1 "k8s.io/api/core/v1" - - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" - "github.com/aws/karpenter-core/pkg/apis/v1beta1" -) - -func New(nodePool *v1beta1.NodePool) *v1alpha5.Provisioner { - p := &v1alpha5.Provisioner{ - TypeMeta: nodePool.TypeMeta, - ObjectMeta: nodePool.ObjectMeta, - Spec: v1alpha5.ProvisionerSpec{ - Annotations: nodePool.Spec.Template.Annotations, - Labels: nodePool.Spec.Template.Labels, - Taints: nodePool.Spec.Template.Spec.Taints, - StartupTaints: nodePool.Spec.Template.Spec.StartupTaints, - Requirements: nodePool.Spec.Template.Spec.Requirements, - KubeletConfiguration: NewKubeletConfiguration(nodePool.Spec.Template.Spec.Kubelet), - Provider: nodePool.Spec.Template.Spec.Provider, - ProviderRef: NewProviderRef(nodePool.Spec.Template.Spec.NodeClassRef), - Limits: NewLimits(v1.ResourceList(nodePool.Spec.Limits)), - Weight: nodePool.Spec.Weight, - }, - Status: v1alpha5.ProvisionerStatus{ - Resources: nodePool.Status.Resources, - }, - } - if nodePool.Spec.Disruption.ExpireAfter.Duration != nil { - p.Spec.TTLSecondsUntilExpired = lo.ToPtr(int64(nodePool.Spec.Disruption.ExpireAfter.Seconds())) - } - if nodePool.Spec.Disruption.ConsolidationPolicy == v1beta1.ConsolidationPolicyWhenEmpty { - p.Spec.TTLSecondsAfterEmpty = lo.ToPtr(int64(nodePool.Spec.Disruption.ConsolidateAfter.Seconds())) - } - if nodePool.Spec.Disruption.ConsolidationPolicy == v1beta1.ConsolidationPolicyWhenUnderutilized { - p.Spec.Consolidation = &v1alpha5.Consolidation{ - Enabled: lo.ToPtr(true), - } - } - return p -} - -func NewKubeletConfiguration(kc *v1beta1.KubeletConfiguration) *v1alpha5.KubeletConfiguration { - if kc == nil { - return nil - } - return &v1alpha5.KubeletConfiguration{ - ClusterDNS: kc.ClusterDNS, - ContainerRuntime: kc.ContainerRuntime, - MaxPods: kc.MaxPods, - PodsPerCore: kc.PodsPerCore, - SystemReserved: kc.SystemReserved, - KubeReserved: kc.KubeReserved, - EvictionHard: kc.EvictionHard, - EvictionSoft: kc.EvictionSoft, - EvictionSoftGracePeriod: kc.EvictionSoftGracePeriod, - EvictionMaxPodGracePeriod: kc.EvictionMaxPodGracePeriod, - ImageGCHighThresholdPercent: kc.ImageGCHighThresholdPercent, - ImageGCLowThresholdPercent: kc.ImageGCLowThresholdPercent, - CPUCFSQuota: kc.CPUCFSQuota, - } -} - -func NewProviderRef(nc *v1beta1.NodeClassReference) *v1alpha5.MachineTemplateRef { - if nc == nil { - return nil - } - return &v1alpha5.MachineTemplateRef{ - Kind: nc.Kind, - Name: nc.Name, - APIVersion: nc.APIVersion, - } -} - -func NewLimits(limits v1.ResourceList) *v1alpha5.Limits { - return &v1alpha5.Limits{ - Resources: limits, - } -} diff --git a/pkg/utils/provisioner/suite_test.go b/pkg/utils/provisioner/suite_test.go deleted file mode 100644 index 89815e25e..000000000 --- a/pkg/utils/provisioner/suite_test.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provisioner_test - -import ( - "context" - "encoding/json" - "testing" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/samber/lo" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - . "knative.dev/pkg/logging/testing" - "knative.dev/pkg/ptr" - - . "github.com/aws/karpenter-core/pkg/test/expectations" - - "github.com/aws/karpenter-core/pkg/apis/v1alpha5" - "github.com/aws/karpenter-core/pkg/apis/v1beta1" - "github.com/aws/karpenter-core/pkg/test" - provisionerutil "github.com/aws/karpenter-core/pkg/utils/provisioner" -) - -var ctx context.Context - -func TestAPIs(t *testing.T) { - ctx = TestContextWithLogger(t) - RegisterFailHandler(Fail) - RunSpecs(t, "ProvisionerUtils") -} - -var _ = Describe("ProvisionerUtils", func() { - var nodePool *v1beta1.NodePool - BeforeEach(func() { - nodePool = test.NodePool(v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "top-level-annotation": "top-level-annotation-value", - }, - Labels: map[string]string{ - "top-level-label": "top-level-label-value", - }, - }, - Spec: v1beta1.NodePoolSpec{ - Template: v1beta1.NodeClaimTemplate{ - ObjectMeta: v1beta1.ObjectMeta{ - Annotations: map[string]string{ - "test-annotation-key": "test-annotation-value", - "test-annotation-key2": "test-annotation-value2", - }, - Labels: map[string]string{ - "test-label-key": "test-label-value", - "test-label-key2": "test-label-value2", - }, - }, - Spec: v1beta1.NodeClaimSpec{ - Taints: []v1.Taint{ - { - Key: "test-taint-key", - Effect: v1.TaintEffectNoSchedule, - Value: "test-taint-value", - }, - { - Key: "test-taint-key2", - Effect: v1.TaintEffectNoExecute, - Value: "test-taint-value2", - }, - }, - StartupTaints: []v1.Taint{ - { - Key: "test-startup-taint-key", - Effect: v1.TaintEffectNoSchedule, - Value: "test-startup-taint-value", - }, - { - Key: "test-startup-taint-key2", - Effect: v1.TaintEffectNoExecute, - Value: "test-startup-taint-value2", - }, - }, - Requirements: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelTopologyZone, - Operator: v1.NodeSelectorOpIn, - Values: []string{"test-zone-1", "test-zone-2"}, - }, - { - Key: v1alpha5.LabelCapacityType, - Operator: v1.NodeSelectorOpIn, - Values: []string{v1alpha5.CapacityTypeOnDemand}, - }, - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpExists, - }, - }, - Resources: v1beta1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("10"), - v1.ResourceMemory: resource.MustParse("10Mi"), - v1.ResourceEphemeralStorage: resource.MustParse("100Gi"), - }, - }, - Kubelet: &v1beta1.KubeletConfiguration{ - ContainerRuntime: ptr.String("containerd"), - MaxPods: ptr.Int32(110), - PodsPerCore: ptr.Int32(10), - SystemReserved: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("200m"), - v1.ResourceMemory: resource.MustParse("200Mi"), - v1.ResourceEphemeralStorage: resource.MustParse("1Gi"), - }, - KubeReserved: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("200m"), - v1.ResourceMemory: resource.MustParse("200Mi"), - v1.ResourceEphemeralStorage: resource.MustParse("1Gi"), - }, - EvictionHard: map[string]string{ - "memory.available": "5%", - "nodefs.available": "5%", - "nodefs.inodesFree": "5%", - "imagefs.available": "5%", - "imagefs.inodesFree": "5%", - "pid.available": "3%", - }, - EvictionSoft: map[string]string{ - "memory.available": "10%", - "nodefs.available": "10%", - "nodefs.inodesFree": "10%", - "imagefs.available": "10%", - "imagefs.inodesFree": "10%", - "pid.available": "6%", - }, - EvictionSoftGracePeriod: map[string]metav1.Duration{ - "memory.available": {Duration: time.Minute * 2}, - "nodefs.available": {Duration: time.Minute * 2}, - "nodefs.inodesFree": {Duration: time.Minute * 2}, - "imagefs.available": {Duration: time.Minute * 2}, - "imagefs.inodesFree": {Duration: time.Minute * 2}, - "pid.available": {Duration: time.Minute * 2}, - }, - EvictionMaxPodGracePeriod: ptr.Int32(120), - ImageGCHighThresholdPercent: ptr.Int32(50), - ImageGCLowThresholdPercent: ptr.Int32(10), - CPUCFSQuota: ptr.Bool(false), - }, - NodeClassRef: &v1beta1.NodeClassReference{ - Kind: "NodeClassRef", - APIVersion: "test.cloudprovider/v1", - Name: "default", - }, - }, - }, - Disruption: v1beta1.Disruption{ - ConsolidationPolicy: v1beta1.ConsolidationPolicyWhenUnderutilized, - ExpireAfter: v1beta1.NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("2160h")))}, - }, - Limits: v1beta1.Limits(v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("10"), - v1.ResourceMemory: resource.MustParse("10Mi"), - v1.ResourceEphemeralStorage: resource.MustParse("1000Gi"), - }), - Weight: lo.ToPtr[int32](100), - }, - }) - }) - It("should convert a Provisioner to a NodePool", func() { - provisioner := provisionerutil.New(nodePool) - for k, v := range nodePool.Annotations { - Expect(provisioner.Annotations).To(HaveKeyWithValue(k, v)) - } - for k, v := range nodePool.Labels { - Expect(provisioner.Labels).To(HaveKeyWithValue(k, v)) - } - for k, v := range nodePool.Spec.Template.Annotations { - Expect(provisioner.Spec.Annotations).To(HaveKeyWithValue(k, v)) - } - for k, v := range nodePool.Spec.Template.Labels { - Expect(provisioner.Spec.Labels).To(HaveKeyWithValue(k, v)) - } - - Expect(provisioner.Spec.Taints).To(Equal(nodePool.Spec.Template.Spec.Taints)) - Expect(provisioner.Spec.StartupTaints).To(Equal(nodePool.Spec.Template.Spec.StartupTaints)) - Expect(provisioner.Spec.Requirements).To(Equal(nodePool.Spec.Template.Spec.Requirements)) - - Expect(provisioner.Spec.KubeletConfiguration.ClusterDNS).To(Equal(nodePool.Spec.Template.Spec.Kubelet.ClusterDNS)) - Expect(provisioner.Spec.KubeletConfiguration.ContainerRuntime).To(Equal(nodePool.Spec.Template.Spec.Kubelet.ContainerRuntime)) - Expect(provisioner.Spec.KubeletConfiguration.MaxPods).To(Equal(nodePool.Spec.Template.Spec.Kubelet.MaxPods)) - Expect(provisioner.Spec.KubeletConfiguration.PodsPerCore).To(Equal(nodePool.Spec.Template.Spec.Kubelet.PodsPerCore)) - Expect(provisioner.Spec.KubeletConfiguration.SystemReserved).To(Equal(nodePool.Spec.Template.Spec.Kubelet.SystemReserved)) - Expect(provisioner.Spec.KubeletConfiguration.KubeReserved).To(Equal(nodePool.Spec.Template.Spec.Kubelet.KubeReserved)) - Expect(provisioner.Spec.KubeletConfiguration.EvictionHard).To(Equal(nodePool.Spec.Template.Spec.Kubelet.EvictionHard)) - Expect(provisioner.Spec.KubeletConfiguration.EvictionSoft).To(Equal(nodePool.Spec.Template.Spec.Kubelet.EvictionSoft)) - Expect(provisioner.Spec.KubeletConfiguration.EvictionSoftGracePeriod).To(Equal(nodePool.Spec.Template.Spec.Kubelet.EvictionSoftGracePeriod)) - Expect(provisioner.Spec.KubeletConfiguration.EvictionMaxPodGracePeriod).To(Equal(nodePool.Spec.Template.Spec.Kubelet.EvictionMaxPodGracePeriod)) - Expect(provisioner.Spec.KubeletConfiguration.ImageGCHighThresholdPercent).To(Equal(nodePool.Spec.Template.Spec.Kubelet.ImageGCHighThresholdPercent)) - Expect(provisioner.Spec.KubeletConfiguration.ImageGCLowThresholdPercent).To(Equal(nodePool.Spec.Template.Spec.Kubelet.ImageGCLowThresholdPercent)) - Expect(provisioner.Spec.KubeletConfiguration.CPUCFSQuota).To(Equal(nodePool.Spec.Template.Spec.Kubelet.CPUCFSQuota)) - - Expect(provisioner.Spec.ProviderRef.Kind).To(Equal(nodePool.Spec.Template.Spec.NodeClassRef.Kind)) - Expect(provisioner.Spec.ProviderRef.APIVersion).To(Equal(nodePool.Spec.Template.Spec.NodeClassRef.APIVersion)) - Expect(provisioner.Spec.ProviderRef.Name).To(Equal(nodePool.Spec.Template.Spec.NodeClassRef.Name)) - - Expect(provisioner.Spec.Consolidation).ToNot(BeNil()) - Expect(provisioner.Spec.Consolidation.Enabled).ToNot(BeNil()) - Expect(lo.FromPtr(provisioner.Spec.Consolidation.Enabled)).To(BeTrue()) - - Expect(lo.FromPtr(provisioner.Spec.TTLSecondsUntilExpired)).To(BeNumerically("==", nodePool.Spec.Disruption.ExpireAfter.Duration.Seconds())) - Expect(provisioner.Spec.TTLSecondsAfterEmpty).To(BeNil()) - - ExpectResources(provisioner.Spec.Limits.Resources, v1.ResourceList(nodePool.Spec.Limits)) - Expect(lo.FromPtr(provisioner.Spec.Weight)).To(BeNumerically("==", lo.FromPtr(nodePool.Spec.Weight))) - - ExpectResources(provisioner.Status.Resources, nodePool.Status.Resources) - }) - It("should convert a Provisioner to a NodePool (with Provider)", func() { - nodePool.Spec.Template.Spec.Provider = &runtime.RawExtension{Raw: lo.Must(json.Marshal(map[string]string{ - "test-key": "test-value", - "test-key2": "test-value2", - }))} - nodePool.Spec.Template.Spec.NodeClassRef = nil - - provisioner := provisionerutil.New(nodePool) - Expect(provisioner.Spec.Provider).To(Equal(nodePool.Spec.Template.Spec.Provider)) - }) -})