Skip to content

Commit

Permalink
Merge branch 'main' into bump-go-version
Browse files Browse the repository at this point in the history
  • Loading branch information
rschalo committed Jul 2, 2024
2 parents 9b14f29 + c7fcb8b commit 1ab77ee
Show file tree
Hide file tree
Showing 7 changed files with 164 additions and 170 deletions.
3 changes: 2 additions & 1 deletion pkg/apis/v1/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
. "knative.dev/pkg/logging/testing"

. "sigs.k8s.io/karpenter/pkg/utils/testing"

"sigs.k8s.io/karpenter/pkg/apis"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
Expand Down
4 changes: 0 additions & 4 deletions pkg/controllers/metrics/pod/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,6 @@ func NewController(kubeClient client.Client) *Controller {
}
}

func (c *Controller) Name() string {
return "metrics.pod"
}

// Reconcile executes a termination control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "metrics.pod")
Expand Down
7 changes: 0 additions & 7 deletions pkg/controllers/nodeclaim/termination/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ import (
"fmt"
"time"

"knative.dev/pkg/logging"

"sigs.k8s.io/karpenter/pkg/utils/termination"

"sigs.k8s.io/karpenter/pkg/metrics"
Expand Down Expand Up @@ -136,15 +134,10 @@ func (c *Controller) finalize(ctx context.Context, nodeClaim *v1beta1.NodeClaim)
NodeClaimTerminationDuration.With(map[string]string{
metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey],
}).Observe(time.Since(stored.DeletionTimestamp.Time).Seconds())
logging.FromContext(ctx).Infof("deleted nodeclaim")
}
return reconcile.Result{}, nil
}

func (*Controller) Name() string {
return ""
}

func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.termination").
Expand Down
307 changes: 154 additions & 153 deletions pkg/controllers/provisioning/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,160 @@ var _ = Describe("Provisioning", func() {
Expect(n.Node.Name).ToNot(Equal(node.Name))
}
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when initcontainer comes first", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
InitContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10"), v1.ResourceMemory: resource.MustParse("4Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10"), v1.ResourceMemory: resource.MustParse("4Gi")},
},
},
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when sidecar container comes first and init container resources are smaller than container resources", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
InitContainers: []v1.Container{
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when sidecar container comes first and init container resources are bigger than container resources", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
},
InitContainers: []v1.Container{
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})

Context("Resource Limits", func() {
It("should not schedule when limits are exceeded", func() {
ExpectApplied(ctx, env.Client, test.NodePool(v1beta1.NodePool{
Expand Down Expand Up @@ -563,159 +717,6 @@ var _ = Describe("Provisioning", func() {
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when initcontainer comes first", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
InitContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10"), v1.ResourceMemory: resource.MustParse("4Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10"), v1.ResourceMemory: resource.MustParse("4Gi")},
},
},
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when sidecar container comes first and init container resources are smaller than container resources", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
InitContainers: []v1.Container{
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})
It("should schedule based on the max resource requests of containers and initContainers with sidecar containers when sidecar container comes first and init container resources are bigger than container resources", func() {
if env.Version.Minor() < 29 {
Skip("Native Sidecar containers is only on by default starting in K8s version >= 1.29.x")
}

ExpectApplied(ctx, env.Client, test.NodePool())

// Add three instance types, one that's what we want, one that's slightly smaller, one that's slightly bigger.
// If we miscalculate resources, we'll schedule to the smaller instance type rather than the larger one
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 10)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 11)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 5)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 12)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 6)),
})

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5"), v1.ResourceMemory: resource.MustParse("1Gi")},
},
InitContainers: []v1.Container{
{
RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.9"), v1.ResourceMemory: resource.MustParse("2.9Gi")},
},
},
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("2Gi")},
},
},
},
})

ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("5Gi"),
}, node.Status.Capacity)
})
It("should not schedule if combined max resources are too large for any node", func() {
ExpectApplied(ctx, env.Client, test.NodePool(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
Expand Down
7 changes: 4 additions & 3 deletions pkg/utils/disruption/disruption.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ package disruption

import (
"context"
"fmt"
"math"
"strconv"

"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"

"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
)
Expand All @@ -51,8 +52,8 @@ func EvictionCost(ctx context.Context, p *v1.Pod) float64 {
if ok {
podDeletionCost, err := strconv.ParseFloat(podDeletionCostStr, 64)
if err != nil {
logging.FromContext(ctx).Errorf("parsing %s=%s from pod %s, %s",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err)
log.FromContext(ctx).Error(err, fmt.Sprintf("failed parsing %s=%s from pod %s",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p)))
} else {
// the pod deletion disruptionCost is in [-2147483647, 2147483647]
// the min pod disruptionCost makes one pod ~ -15 pods, and the max pod disruptionCost to ~ 17 pods.
Expand Down
Loading

0 comments on commit 1ab77ee

Please sign in to comment.