Skip to content

Commit

Permalink
feat: CEL Validation for NodePool.Spec.Template Labels (#4906)
Browse files Browse the repository at this point in the history
  • Loading branch information
engedaam committed Oct 24, 2023
1 parent 3be36ee commit 206c130
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 23 deletions.
1 change: 1 addition & 0 deletions Makefile
Expand Up @@ -125,6 +125,7 @@ verify: tidy download ## Verify code. Includes dependencies, linting, formatting
hack/boilerplate.sh
cp $(KARPENTER_CORE_DIR)/pkg/apis/crds/* pkg/apis/crds
hack/validation/requirements.sh
hack/validation/labels.sh
$(foreach dir,$(MOD_DIRS),cd $(dir) && golangci-lint run $(newline))
@git diff --quiet ||\
{ echo "New file modification detected in the Git working tree. Please check in before commit."; git --no-pager diff --name-only | uniq | awk '{print " - " $$0}'; \
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Expand Up @@ -7,7 +7,7 @@ require (
github.com/PuerkitoBio/goquery v1.8.1
github.com/avast/retry-go v3.0.0+incompatible
github.com/aws/aws-sdk-go v1.46.2
github.com/aws/karpenter-core v0.31.1-0.20231024004605-223dcd0fda49
github.com/aws/karpenter-core v0.31.1-0.20231024212423-074467327555
github.com/aws/karpenter/tools/kompat v0.0.0-20231010173459-62c25a3ea85c
github.com/go-logr/zapr v1.2.4
github.com/imdario/mergo v0.3.16
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Expand Up @@ -57,8 +57,8 @@ github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHS
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/aws/aws-sdk-go v1.46.2 h1:XZbOmjtN1VCfEtQq7QNFsbxIqO+bB+bRhiOBjp6AzWc=
github.com/aws/aws-sdk-go v1.46.2/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/karpenter-core v0.31.1-0.20231024004605-223dcd0fda49 h1:xhotj4aEGeTH5MWM1vqdtxDQJlkoeAmYF3KeByrFef0=
github.com/aws/karpenter-core v0.31.1-0.20231024004605-223dcd0fda49/go.mod h1:liN81BwfVdlE5VHhgUnNZQdE+TEfO5cOYZXyR034T58=
github.com/aws/karpenter-core v0.31.1-0.20231024212423-074467327555 h1:Rr+u23lxqMaubLd8JLks3L9uO8ACLSNbzID3VXIm/B4=
github.com/aws/karpenter-core v0.31.1-0.20231024212423-074467327555/go.mod h1:liN81BwfVdlE5VHhgUnNZQdE+TEfO5cOYZXyR034T58=
github.com/aws/karpenter/tools/kompat v0.0.0-20231010173459-62c25a3ea85c h1:oXWwIttmjYLbBKhLazG21aQvpJ3NOOr8IXhCJ/p6e/M=
github.com/aws/karpenter/tools/kompat v0.0.0-20231010173459-62c25a3ea85c/go.mod h1:l/TIBsaCx/IrOr0Xvlj/cHLOf05QzuQKEZ1hx2XWmfU=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
Expand Down
7 changes: 7 additions & 0 deletions hack/validation/labels.sh
@@ -0,0 +1,7 @@
# Labels Validation

# # Adding validation for nodepool

# ## checking for restricted labels while filtering out well known labels
yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.x-kubernetes-validations += [
{"message": "label domain \"karpenter.k8s.aws\" is restricted", "rule": "self in [\"karpenter.k8s.aws/instance-encryption-in-transit-supported\", \"karpenter.k8s.aws/instance-category\", \"karpenter.k8s.aws/instance-hypervisor\", \"karpenter.k8s.aws/instance-family\", \"karpenter.k8s.aws/instance-generation\", \"karpenter.k8s.aws/instance-local-nvme\", \"karpenter.k8s.aws/instance-size\", \"karpenter.k8s.aws/instance-cpu\",\"karpenter.k8s.aws/instance-memory\", \"karpenter.k8s.aws/instance-network-bandwidth\", \"karpenter.k8s.aws/instance-gpu-name\", \"karpenter.k8s.aws/instance-gpu-manufacturer\", \"karpenter.k8s.aws/instance-gpu-count\", \"karpenter.k8s.aws/instance-gpu-memory\", \"karpenter.k8s.aws/instance-accelerator-name\", \"karpenter.k8s.aws/instance-accelerator-manufacturer\", \"karpenter.k8s.aws/instance-accelerator-count\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.k8s.aws\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml
16 changes: 16 additions & 0 deletions pkg/apis/crds/karpenter.sh_nodepools.yaml
Expand Up @@ -90,8 +90,24 @@ spec:
labels:
additionalProperties:
type: string
maxLength: 63
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels'
type: object
maxProperties: 100
x-kubernetes-validations:
- message: label domain "kubernetes.io" is restricted
rule: self.all(x, x in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || x.startsWith("node.kubernetes.io") || x.startsWith("node-restriction.kubernetes.io") || !x.find("^([^/]+)").endsWith("kubernetes.io"))
- message: label domain "k8s.io" is restricted
rule: self.all(x, x.startsWith("kops.k8s.io") || !x.find("^([^/]+)").endsWith("k8s.io"))
- message: label domain "karpenter.sh" is restricted
rule: self.all(x, x in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !x.find("^([^/]+)").endsWith("karpenter.sh"))
- message: label "karpenter.sh/nodepool" is restricted
rule: self.all(x, x != "karpenter.sh/nodepool")
- message: label "kubernetes.io/hostname" is restricted
rule: self.all(x, x != "kubernetes.io/hostname")
- message: label domain "karpenter.k8s.aws" is restricted
rule: self in ["karpenter.k8s.aws/instance-encryption-in-transit-supported", "karpenter.k8s.aws/instance-category", "karpenter.k8s.aws/instance-hypervisor", "karpenter.k8s.aws/instance-family", "karpenter.k8s.aws/instance-generation", "karpenter.k8s.aws/instance-local-nvme", "karpenter.k8s.aws/instance-size", "karpenter.k8s.aws/instance-cpu","karpenter.k8s.aws/instance-memory", "karpenter.k8s.aws/instance-network-bandwidth", "karpenter.k8s.aws/instance-gpu-name", "karpenter.k8s.aws/instance-gpu-manufacturer", "karpenter.k8s.aws/instance-gpu-count", "karpenter.k8s.aws/instance-gpu-memory", "karpenter.k8s.aws/instance-accelerator-name", "karpenter.k8s.aws/instance-accelerator-manufacturer", "karpenter.k8s.aws/instance-accelerator-count"] || !self.find("^([^/]+)").endsWith("karpenter.k8s.aws")
type: object
spec:
description: NodeClaimSpec describes the desired state of the NodeClaim
Expand Down
68 changes: 48 additions & 20 deletions pkg/apis/v1beta1/nodepool_validation_cel_test.go
Expand Up @@ -54,28 +54,56 @@ var _ = Describe("CEL/Validation", func() {
},
}
})
It("should allow restricted domains exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.LabelDomainExceptions {
nodePool.Spec.Template.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
Context("Requirements", func() {
It("should allow restricted domains exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.LabelDomainExceptions {
nodePool.Spec.Template.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
})
It("should allow well known label exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) {
nodePool.Spec.Template.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
})
})
It("should allow well known label exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) {
nodePool.Spec.Template.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
Context("Labels", func() {
It("should allow restricted domains exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.LabelDomainExceptions {
nodePool.Spec.Template.Labels = map[string]string{
label: "test",
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
})
It("should allow well known label exceptions", func() {
oldNodePool := nodePool.DeepCopy()
for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) {
nodePool.Spec.Template.Labels = map[string]string{
label: "test",
}
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
Expect(nodePool.RuntimeValidate()).To(Succeed())
Expect(env.Client.Delete(ctx, nodePool)).To(Succeed())
nodePool = oldNodePool.DeepCopy()
}
})
})
})

0 comments on commit 206c130

Please sign in to comment.