Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix e2e resource limits #70203

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 4 additions & 2 deletions pkg/scheduler/algorithmprovider/defaults/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,14 +207,16 @@ func ApplyFeatureGates() {
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)

glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
glog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
}

// Prioritizes nodes that satisfy pod's resource limits
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
glog.Infof("Registering resourcelimits priority function")
factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)
// Register the priority function to specific provider too.
factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1))
}

}

func registerAlgorithmProvider(predSet, priSet sets.String) {
Expand Down
1 change: 0 additions & 1 deletion pkg/scheduler/factory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -1096,7 +1096,6 @@ func (c *configFactory) CreateFromProvider(providerName string) (*Config, error)
if err != nil {
return nil, err
}

return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
}

Expand Down
11 changes: 11 additions & 0 deletions pkg/scheduler/factory/plugins.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,17 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) {
return
}

// InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap.
func InsertPriorityKeyToAlgorithmProviderMap(key string) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()

for _, provider := range algorithmProviderMap {
provider.PriorityFunctionKeys.Insert(key)
}
return
Copy link
Member

@Huang-Wei Huang-Wei Oct 24, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like no need to return

Suggested change
return

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a coding style to indicate end of function, I don't have a preference though.

}

// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
// registered.
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/scheduling/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ go_library(
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/quota/v1/evaluator/core:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
Expand All @@ -41,6 +42,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//test/e2e/common:go_default_library",
Expand Down
29 changes: 19 additions & 10 deletions test/e2e/scheduling/priorities.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
Expand Down Expand Up @@ -83,7 +85,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
})

It("Pod should be schedule to node that don't match the PodAntiAffinity terms", func() {
It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "pod-with-label-security-s1",
Expand Down Expand Up @@ -144,7 +146,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName))
})

It("Pod should avoid to schedule to node that have avoidPod annotation", func() {
It("Pod should avoid nodes that have avoidPod annotation", func() {
nodeName := nodeList.Items[0].Name
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
Expand Down Expand Up @@ -207,7 +209,8 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
})

It("Pod should perfer to scheduled to nodes pod can tolerate", func() {
It("Pod should be preferably scheduled to nodes pod can tolerate", func() {

// make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
Expand Down Expand Up @@ -257,33 +260,38 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
})
It("Pod should be preferably scheduled to nodes which satisfy its limits", func() {
if !utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
framework.Skipf("ResourceLimits Priority function is not enabled, so skipping this test")
}
var podwithLargeRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("3000Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceCPU: resource.MustParse("5000m"),
},
}
// Update one node to have large allocatable.
lastNode := nodeList.Items[len(nodeList.Items)-1]
nodeName := lastNode.Name
nodeOriginalMemory, found := lastNode.Status.Allocatable[v1.ResourceMemory]
nodeOriginalCPU, found := lastNode.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
nodeOriginalMemoryVal := nodeOriginalMemory.Value()
err := updateMemoryOfNode(cs, nodeName, int64(10000))
nodeOriginalCPUVal := nodeOriginalCPU.MilliValue()
err := updateNodeAllocatable(cs, nodeName, int64(10000), int64(12000))
Expect(err).NotTo(HaveOccurred())
defer func() {
// Resize the node back to its original memory.
if err := updateMemoryOfNode(cs, nodeName, nodeOriginalMemoryVal); err != nil {
// Resize the node back to its original allocatable values.
if err := updateNodeAllocatable(cs, nodeName, nodeOriginalMemoryVal, nodeOriginalCPUVal); err != nil {
framework.Logf("Failed to revert node memory with %v", err)
}
}()
err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
// After the above we should see 50% of node to be available which is 5000MiB for large node.
// After the above we should see 50% of node to be available which is 5000MiB memory, 6000m cpu for large node.
By("Create a pod with unusual large limits")
podWithLargeLimits := "with-large-limits"

Expand Down Expand Up @@ -445,15 +453,16 @@ func addRandomTaitToNode(cs clientset.Interface, nodeName string) *v1.Taint {
return &testTaint
}

// updateMemoryOfNode updates the memory of given node with the given value
func updateMemoryOfNode(c clientset.Interface, nodeName string, memory int64) error {
// updateNodeAllocatable updates the allocatable values of given node with the given values.
func updateNodeAllocatable(c clientset.Interface, nodeName string, memory, cpu int64) error {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
oldData, err := json.Marshal(node)
if err != nil {
return err
}
node.Status.Allocatable[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
node.Status.Allocatable[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
newData, err := json.Marshal(node)
if err != nil {
return err
Expand Down