Skip to content

Commit

Permalink
Overridden quantity must be below namespace Maximum
Browse files Browse the repository at this point in the history
The overridden quantity must be below the namespace Maximum specified
via LimitRange object.

Currently the mutating admission webhook overrides a resource above
the namespace maximum and thus fails Pod creation, we see the
following error:
"pods "croe2e-f7c9h" is forbidden: maximum cpu usage per Container is 1, but limit is 2"

Query the Maximum limit for CPU and Memory resource specified in
LimitRange and ensure that the overridden resource quantity never
exceeds the namespace Maximum.
  • Loading branch information
tkashem committed Feb 11, 2020
1 parent 7767a17 commit 251b8e1
Show file tree
Hide file tree
Showing 7 changed files with 201 additions and 47 deletions.
1 change: 1 addition & 0 deletions artifacts/manifests/300_rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ rules:
- ""
resources:
- namespaces
- limitranges
verbs:
- get
- list
Expand Down
35 changes: 22 additions & 13 deletions pkg/clusterresourceoverride/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,34 +109,42 @@ func NewAdmission(kubeClientConfig *restclient.Config, stopCh <-chan struct{}, c
}

factory := informers.NewSharedInformerFactory(client, defaultResyncPeriod)
informer := factory.Core().V1().Namespaces()
nsLister := informer.Lister()

nsInformer := informer.Informer()
namespaces := factory.Core().V1().Namespaces()
nsInformer := namespaces.Informer()
go nsInformer.Run(stopCh)

limitRanges := factory.Core().V1().LimitRanges()
limitRangeInformer := limitRanges.Informer()
go limitRangeInformer.Run(stopCh)

if !cache.WaitForCacheSync(stopCh, nsInformer.HasSynced) {
err = fmt.Errorf("name=%s failed to wait for cache to sync", Name)
err = fmt.Errorf("name=%s failed to wait for Namespace informer cache to sync", Name)

klog.V(1).Info(err.Error())
return
}

limitRangesLister := factory.Core().V1().LimitRanges().Lister()
if !cache.WaitForCacheSync(stopCh, limitRangeInformer.HasSynced) {
err = fmt.Errorf("name=%s failed to wait for LimitRange informer cache to sync", Name)

klog.V(1).Info(err.Error())
return
}

admission = &clusterResourceOverrideAdmission{
config: config,
nsLister: nsLister,
nsLister: namespaces.Lister(),
limitQuerier: &namespaceLimitQuerier{
limitRangesLister: limitRangesLister,
limitRangesLister: limitRanges.Lister(),
},
}

return
}

func setNamespaceFloor(nsMinimum *Floor) *Floor {
target := &Floor{
func setNamespaceFloor(nsMinimum *CPUMemory) *CPUMemory {
target := &CPUMemory{
Memory: &defaultMemoryFloor,
CPU: &defaultCPUFloor,
}
Expand Down Expand Up @@ -214,14 +222,15 @@ func (p *clusterResourceOverrideAdmission) Admit(request *admissionv1beta1.Admis

// Don't mutate resource requirements below the namespace
// limit minimums.
nsMinimum, err := p.limitQuerier.QueryMinimum(request.Namespace)
nsMinimum, nsMaximum, err := p.limitQuerier.QueryFloorAndCeiling(request.Namespace)
if err != nil {
return admissionresponse.WithForbidden(request, err)
}
klog.V(5).Infof("namespace=%s LimitRange query - minimum=%v maximum=%v", request.Namespace, nsMinimum, nsMaximum)

klog.V(5).Infof("namespace=%s initial pod limits are: %#v", request.Namespace, pod.Spec)
klog.V(5).Infof("namespace=%s initial pod: initContainers=%#v containers=%#v", request.Namespace, pod.Spec.InitContainers, pod.Spec.Containers)

mutator, err := NewMutator(p.config, setNamespaceFloor(nsMinimum), cpuBaseScaleFactor)
mutator, err := NewMutator(p.config, setNamespaceFloor(nsMinimum), nsMaximum, cpuBaseScaleFactor)
if err != nil {
return admissionresponse.WithInternalServerError(request, err)
}
Expand All @@ -231,7 +240,7 @@ func (p *clusterResourceOverrideAdmission) Admit(request *admissionv1beta1.Admis
return admissionresponse.WithInternalServerError(request, err)
}

klog.V(5).Infof("namespace=%s pod limits after overrides are: %#v", request.Namespace, current.Spec)
klog.V(5).Infof("namespace=%s pod limits after overrides are: initContainers=%#v containers=%#v", request.Namespace, current.Spec.InitContainers, current.Spec.Containers)

patch, patchErr := Patch(request.Object, current)
if patchErr != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/clusterresourceoverride/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func TestSetNamespaceFloor(t *testing.T) {
memory := resource.MustParse("1Gi")
require.False(t, memory.Equal(defaultMemoryFloor), "bad test setup, default and namespace memory floor must not be equal")

namespaceFloor := &Floor{
namespaceFloor := &CPUMemory{
CPU: &cpu,
Memory: &memory,
}
Expand Down
78 changes: 56 additions & 22 deletions pkg/clusterresourceoverride/limitquerier.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,48 +13,66 @@ type namespaceLimitQuerier struct {
limitRangesLister corev1listers.LimitRangeLister
}

func (l *namespaceLimitQuerier) QueryMinimum(namespace string) (minimum *Floor, err error) {
limits, listErr := l.limitRangesLister.LimitRanges(namespace).List(labels.Everything())
func (l *namespaceLimitQuerier) QueryFloorAndCeiling(namespace string) (floor *CPUMemory, ceiling *CPUMemory, err error) {
limitRanges, listErr := l.limitRangesLister.LimitRanges(namespace).List(labels.Everything())
if listErr != nil {
err = fmt.Errorf("failed to query limitrange - %v", listErr)
return
}

nsCPUFloor := minResourceLimits(limits, corev1.ResourceCPU)
nsMemFloor := minResourceLimits(limits, corev1.ResourceMemory)
nsCPUMinimum, nsCPUMaximum := GetMinMax(limitRanges, corev1.ResourceCPU)
nsMemMinimum, nsMemMaximum := GetMinMax(limitRanges, corev1.ResourceMemory)

minimum = &Floor{
CPU: nsCPUFloor,
Memory: nsMemFloor,
floor = &CPUMemory{
CPU: nsCPUMinimum,
Memory: nsMemMinimum,
}
ceiling = &CPUMemory{
CPU: nsCPUMaximum,
Memory: nsMemMaximum,
}
return
}

// minResourceLimits finds the Min limit for resourceName. Nil is
// returned if limitRanges is empty or limits contains no resourceName
// limits.
func minResourceLimits(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) *resource.Quantity {
limits := []*resource.Quantity{}
// GetMinMax finds the Minimum and Maximum limit for respectively for the specified resource.
// Nil is returned if limitRanges is empty or limits contains no resourceName limits.
func GetMinMax(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) (minimum *resource.Quantity, maximum *resource.Quantity) {
minList, maxList := findMinMaxLimits(limitRanges, resourceName)

minimum = minQuantity(minList)
maximum = maxQuantity(maxList)

return
}

func findMinMaxLimits(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) (minimum []*resource.Quantity, maximum []*resource.Quantity) {
minimum = []*resource.Quantity{}
maximum = []*resource.Quantity{}

for _, limitRange := range limitRanges {
for _, limit := range limitRange.Spec.Limits {
if limit.Type == corev1.LimitTypeContainer {
if limit, found := limit.Min[resourceName]; found {
clone := limit.DeepCopy()
limits = append(limits, &clone)
for _, limits := range limitRange.Spec.Limits {
if limits.Type == corev1.LimitTypeContainer {
if min, found := limits.Min[resourceName]; found {
clone := min.DeepCopy()
minimum = append(minimum, &clone)
}

if max, found := limits.Max[resourceName]; found {
clone := max.DeepCopy()
maximum = append(maximum, &clone)
}
}
}
}

if len(limits) == 0 {
return nil
}

return minQuantity(limits)
return
}

func minQuantity(quantities []*resource.Quantity) *resource.Quantity {
if len(quantities) == 0 {
return nil
}

min := quantities[0].DeepCopy()

for i := range quantities {
Expand All @@ -65,3 +83,19 @@ func minQuantity(quantities []*resource.Quantity) *resource.Quantity {

return &min
}

func maxQuantity(quantities []*resource.Quantity) *resource.Quantity {
if len(quantities) == 0 {
return nil
}

max := quantities[0].DeepCopy()

for i := range quantities {
if quantities[i].Cmp(max) > 0 {
max = quantities[i].DeepCopy()
}
}

return &max
}
81 changes: 81 additions & 0 deletions pkg/clusterresourceoverride/limitquerier_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
package clusterresourceoverride

import (
"testing"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"

"github.com/stretchr/testify/assert"
)

func TestGetMinMax(t *testing.T) {
tests := []struct {
name string
limitRanges []*corev1.LimitRange
resourceName corev1.ResourceName
minimumWant resource.Quantity
maximumWant resource.Quantity
}{
{
name: "WithMaximumCPULimit",
limitRanges: []*corev1.LimitRange{
{
Spec: corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypeContainer,
Max: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1024Mi"),
corev1.ResourceCPU: resource.MustParse("1000m"),
},
Min: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
corev1.ResourceCPU: resource.MustParse("100m"),
},
},
},
},
},
},
resourceName: corev1.ResourceCPU,
maximumWant: resource.MustParse("1000m"),
minimumWant: resource.MustParse("100m"),
},

{
name: "WithMaximumMemoryLimit",
limitRanges: []*corev1.LimitRange{
{
Spec: corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypeContainer,
Max: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1024Mi"),
corev1.ResourceCPU: resource.MustParse("1000m"),
},
Min: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
corev1.ResourceCPU: resource.MustParse("100m"),
},
},
},
},
},
},
resourceName: corev1.ResourceMemory,
maximumWant: resource.MustParse("1024Mi"),
minimumWant: resource.MustParse("128Mi"),
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
minimumGot, maximumGot := GetMinMax(test.limitRanges, test.resourceName)

assert.True(t, test.minimumWant.Equal(*minimumGot))
assert.True(t, test.maximumWant.Equal(*maximumGot))
})
}
}
41 changes: 35 additions & 6 deletions pkg/clusterresourceoverride/mutator.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,30 @@ import (
"k8s.io/klog"
)

type Floor struct {
type CPUMemory struct {
CPU *resource.Quantity
Memory *resource.Quantity
}

func NewMutator(config *Config, floor *Floor, cpuBaseScaleFactor float64) (mutator *podMutator, err error) {
if config == nil || floor == nil {
err = errors.New("invalid input")
func NewMutator(config *Config, minimum *CPUMemory, maximum *CPUMemory, cpuBaseScaleFactor float64) (mutator *podMutator, err error) {
if config == nil || minimum == nil || maximum == nil {
err = errors.New("NewMutator: invalid input")
return
}

mutator = &podMutator{
config: config,
floor: floor,
floor: minimum,
ceiling: maximum,
cpuBaseScaleFactor: cpuBaseScaleFactor,
}
return
}

type podMutator struct {
config *Config
floor *Floor
floor *CPUMemory
ceiling *CPUMemory
cpuBaseScaleFactor float64
}

Expand Down Expand Up @@ -94,6 +96,12 @@ func (m *podMutator) OverrideMemory(resources *corev1.ResourceRequirements) {
overridden = &copy
}

if m.IsMemoryCeilingSpecified() && overridden.Cmp(*m.ceiling.Memory) > 0 {
klog.V(5).Infof("%s pod limit %q above namespace limit; setting limit to %q", corev1.ResourceMemory, overridden.String(), m.ceiling.Memory.String())
copy := m.ceiling.Memory.DeepCopy()
overridden = &copy
}

ensureRequests(resources)
resources.Requests[corev1.ResourceMemory] = *overridden
}
Expand Down Expand Up @@ -121,6 +129,13 @@ func (m *podMutator) OverrideCPULimit(resources *corev1.ResourceRequirements) {
overridden = &clone
}

if m.IsCpuCeilingSpecified() && overridden.Cmp(*m.ceiling.CPU) > 0 {
klog.V(5).Infof("%s pod limit %q above namespace limit; setting limit to %q", corev1.ResourceCPU, overridden.String(), m.ceiling.CPU.String())

clone := m.ceiling.CPU.DeepCopy()
overridden = &clone
}

ensureLimits(resources)
resources.Limits[corev1.ResourceCPU] = *overridden
}
Expand All @@ -146,6 +161,12 @@ func (m *podMutator) OverrideCPU(resources *corev1.ResourceRequirements) {
overridden = &clone
}

if m.IsCpuCeilingSpecified() && overridden.Cmp(*m.ceiling.CPU) > 0 {
klog.V(5).Infof("%s pod limit %q above namespace limit; setting limit to %q", corev1.ResourceCPU, overridden.String(), m.ceiling.CPU.String())
clone := m.ceiling.CPU.DeepCopy()
overridden = &clone
}

ensureRequests(resources)
resources.Requests[corev1.ResourceCPU] = *overridden
}
Expand All @@ -158,6 +179,14 @@ func (m *podMutator) IsMemoryFloorSpecified() bool {
return m.floor != nil && m.floor.Memory != nil
}

func (m *podMutator) IsCpuCeilingSpecified() bool {
return m.ceiling != nil && m.ceiling.CPU != nil
}

func (m *podMutator) IsMemoryCeilingSpecified() bool {
return m.ceiling != nil && m.ceiling.Memory != nil
}

func ensureRequests(resources *corev1.ResourceRequirements) {
if len(resources.Requests) == 0 {
resources.Requests = corev1.ResourceList{}
Expand Down
Loading

0 comments on commit 251b8e1

Please sign in to comment.