diff --git a/pkg/scheduler/actions/allocate/allocate.go b/pkg/scheduler/actions/allocate/allocate.go index 913a7ffb26..70cb208f8d 100644 --- a/pkg/scheduler/actions/allocate/allocate.go +++ b/pkg/scheduler/actions/allocate/allocate.go @@ -110,7 +110,7 @@ func (alloc *Action) Execute(ssn *framework.Session) { } predicateFn := func(task *api.TaskInfo, node *api.NodeInfo) error { // Check for Resource Predicate - if !task.InitResreq.LessEqual(node.FutureIdle()) { + if !task.InitResreq.LessEqualInAllDimension(node.FutureIdle(), api.Zero) { return api.NewFitError(task, node, api.NodeResourceFitFailed) } @@ -211,7 +211,7 @@ func (alloc *Action) Execute(ssn *framework.Session) { var candidateNodes []*api.NodeInfo for _, n := range predicateNodes { - if task.InitResreq.LessEqual(n.Idle) || task.InitResreq.LessEqual(n.FutureIdle()) { + if task.InitResreq.LessEqualInAllDimension(n.Idle, api.Zero) || task.InitResreq.LessEqualInAllDimension(n.FutureIdle(), api.Zero) { candidateNodes = append(candidateNodes, n) } } @@ -229,7 +229,7 @@ func (alloc *Action) Execute(ssn *framework.Session) { } // Allocate idle resource to the task. - if task.InitResreq.LessEqual(node.Idle) { + if task.InitResreq.LessEqualInAllDimension(node.Idle, api.Zero) { klog.V(3).Infof("Binding Task <%v/%v> to node <%v>", task.Namespace, task.Name, node.Name) if err := stmt.Allocate(task, node); err != nil { @@ -243,7 +243,7 @@ func (alloc *Action) Execute(ssn *framework.Session) { task.Namespace, task.Name, node.Name) // Allocate releasing resource to the task if any. - if task.InitResreq.LessEqual(node.FutureIdle()) { + if task.InitResreq.LessEqualInAllDimension(node.FutureIdle(), api.Zero) { klog.V(3).Infof("Pipelining Task <%v/%v> to node <%v> for <%v> on <%v>", task.Namespace, task.Name, node.Name, task.InitResreq, node.Releasing) if err := stmt.Pipeline(task, node.Name); err != nil { diff --git a/pkg/scheduler/actions/preempt/preempt.go b/pkg/scheduler/actions/preempt/preempt.go index ca9f8209ae..608cffb902 100644 --- a/pkg/scheduler/actions/preempt/preempt.go +++ b/pkg/scheduler/actions/preempt/preempt.go @@ -236,7 +236,7 @@ func preempt( for !victimsQueue.Empty() { // If reclaimed enough resources, break loop to avoid Sub panic. - if preemptor.InitResreq.LessEqual(node.FutureIdle()) { + if preemptor.InitResreq.LessEqualInAllDimension(node.FutureIdle(), api.Zero) { break } preemptee := victimsQueue.Pop().(*api.TaskInfo) @@ -254,7 +254,7 @@ func preempt( klog.V(3).Infof("Preempted <%v> for Task <%s/%s> requested <%v>.", preempted, preemptor.Namespace, preemptor.Name, preemptor.InitResreq) - if preemptor.InitResreq.LessEqual(node.FutureIdle()) { + if preemptor.InitResreq.LessEqualInAllDimension(node.FutureIdle(), api.Zero) { if err := stmt.Pipeline(preemptor, node.Name); err != nil { klog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>", preemptor.Namespace, preemptor.Name, node.Name) diff --git a/pkg/scheduler/actions/reclaim/reclaim.go b/pkg/scheduler/actions/reclaim/reclaim.go index 503fc6af33..12faab095e 100644 --- a/pkg/scheduler/actions/reclaim/reclaim.go +++ b/pkg/scheduler/actions/reclaim/reclaim.go @@ -160,7 +160,7 @@ func (ra *Action) Execute(ssn *framework.Session) { } reclaimed.Add(reclaimee.Resreq) // If reclaimed enough resources, break loop to avoid Sub panic. - if resreq.LessEqual(reclaimed) { + if resreq.LessEqualInAllDimension(reclaimed, api.Zero) { break } } @@ -168,7 +168,7 @@ func (ra *Action) Execute(ssn *framework.Session) { klog.V(3).Infof("Reclaimed <%v> for task <%s/%s> requested <%v>.", reclaimed, task.Namespace, task.Name, task.InitResreq) - if task.InitResreq.LessEqual(reclaimed) { + if task.InitResreq.LessEqualInAllDimension(reclaimed, api.Zero) { if err := ssn.Pipeline(task, n.Name); err != nil { klog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>", task.Namespace, task.Name, n.Name) diff --git a/pkg/scheduler/api/node_info.go b/pkg/scheduler/api/node_info.go index 305384a772..fec7718c12 100644 --- a/pkg/scheduler/api/node_info.go +++ b/pkg/scheduler/api/node_info.go @@ -239,7 +239,7 @@ func (ni *NodeInfo) setNodeState(node *v1.Node) { } // set NodeState according to resources - if !ni.Used.LessEqual(ni.Allocatable) { + if !ni.Used.LessEqualInAllDimension(ni.Allocatable, Zero) { ni.State = NodeState{ Phase: NotReady, Reason: "OutOfSync", @@ -331,7 +331,7 @@ func (ni *NodeInfo) SetNode(node *v1.Node) { } func (ni *NodeInfo) allocateIdleResource(ti *TaskInfo) error { - if ti.Resreq.LessEqual(ni.Idle) { + if ti.Resreq.LessEqualInAllDimension(ni.Idle, Zero) { ni.Idle.Sub(ti.Resreq) return nil } diff --git a/pkg/scheduler/api/resource_info.go b/pkg/scheduler/api/resource_info.go index 2377832967..cd0fdfbf98 100644 --- a/pkg/scheduler/api/resource_info.go +++ b/pkg/scheduler/api/resource_info.go @@ -189,7 +189,7 @@ func (r *Resource) Add(rr *Resource) *Resource { //Sub subtracts two Resource objects. func (r *Resource) Sub(rr *Resource) *Resource { - assert.Assertf(rr.LessEqual(r), "resource is not sufficient to do operation: <%v> sub <%v>", r, rr) + assert.Assertf(rr.LessEqualInAllDimension(r, Zero), "resource is not sufficient to do operation: <%v> sub <%v>", r, rr) r.MilliCPU -= rr.MilliCPU r.Memory -= rr.Memory @@ -304,9 +304,10 @@ func (r *Resource) LessInAllDimension(rr *Resource, defaultValue DimensionDefaul return true } -// LessEqual works as the same as the LessEqualStrict. -// The difference is that function lessEqualFunc regards tiny value difference as Equal. -func (r *Resource) LessEqual(rr *Resource) bool { +// LessEqualInAllDimension returns true only on condition that all dimensions of resources in r are less than or equal with that of rr, +// Otherwise returns false. +// @param defaultValue "default value for resource dimension not defined in ScalarResources. Its value can only be one of 'Zero' and 'Infinity'" +func (r *Resource) LessEqualInAllDimension(rr *Resource, defaultValue DimensionDefaultValue) bool { lessEqualFunc := func(l, r, diff float64) bool { if l < r || math.Abs(l-r) < diff { return true @@ -314,59 +315,27 @@ func (r *Resource) LessEqual(rr *Resource) bool { return false } - if !lessEqualFunc(r.MilliCPU, rr.MilliCPU, minResource) { + leftResource := r.Clone() + rightResource := rr.Clone() + + if !lessEqualFunc(leftResource.MilliCPU, rightResource.MilliCPU, minResource) { return false } - if !lessEqualFunc(r.Memory, rr.Memory, minResource) { + if !lessEqualFunc(leftResource.Memory, rightResource.Memory, minResource) { return false } - if r.ScalarResources == nil { - return true - } - - if rr.ScalarResources == nil { - return false - } + r.setDefaultValue(leftResource, rightResource, defaultValue) - for rName, rQuant := range r.ScalarResources { - if rQuant <= minResource { + for resourceName, leftValue := range leftResource.ScalarResources { + rightValue, _ := rightResource.ScalarResources[resourceName] + if rightValue == -1 { continue } - rrQuant, ok := rr.ScalarResources[rName] - if !ok || !lessEqualFunc(rQuant, rrQuant, minResource) { - return false - } - } - - return true -} - -// LessEqualStrict returns true only on the following conditions: -// 1. All dimensions resources in r are less than that of rr -// 2. Part dimensions are equal while the others are less in r -// 3. All dimensions resources in r are equal with that of rr -// Otherwise returns false. -// Note: Any dimension of resource, which is not listed in resource object, is regarded as zero. -func (r *Resource) LessEqualStrict(rr *Resource) bool { - lessFunc := func(l, r float64) bool { - return l <= r - } - - if !lessFunc(r.MilliCPU, rr.MilliCPU) { - return false - } - if !lessFunc(r.Memory, rr.Memory) { - return false - } - - for rName, rQuant := range r.ScalarResources { - _, ok := rr.ScalarResources[rName] - if !ok || !lessFunc(rQuant, rr.ScalarResources[rName]) { + if leftValue == -1 || !lessEqualFunc(leftValue, rightValue, minResource) { return false } } - return true } diff --git a/pkg/scheduler/api/resource_info_test.go b/pkg/scheduler/api/resource_info_test.go index a5220e1719..cd7520f5f7 100644 --- a/pkg/scheduler/api/resource_info_test.go +++ b/pkg/scheduler/api/resource_info_test.go @@ -243,74 +243,6 @@ func TestAddResource(t *testing.T) { } } -func TestLessEqual(t *testing.T) { - tests := []struct { - resource1 *Resource - resource2 *Resource - expected bool - }{ - { - resource1: &Resource{}, - resource2: &Resource{ - MilliCPU: 4000, - Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, - }, - expected: true, - }, - { - resource1: &Resource{ - MilliCPU: 4000, - Memory: 4000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, - }, - resource2: &Resource{ - MilliCPU: 2000, - Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, - }, - expected: false, - }, - { - resource1: &Resource{ - MilliCPU: 4, - Memory: 4000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1}, - }, - resource2: &Resource{}, - expected: false, - }, - { - resource1: &Resource{ - MilliCPU: 0, - Memory: 0, - }, - resource2: &Resource{}, - expected: true, - }, - { - resource1: &Resource{ - MilliCPU: 4000, - Memory: 4000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, - }, - resource2: &Resource{ - MilliCPU: 8000, - Memory: 8000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, - }, - expected: true, - }, - } - - for _, test := range tests { - flag := test.resource1.LessEqual(test.resource2) - if !reflect.DeepEqual(test.expected, flag) { - t.Errorf("expected: %#v, got: %#v", test.expected, flag) - } - } -} - func TestSubResource(t *testing.T) { tests := []struct { resource1 *Resource @@ -465,207 +397,142 @@ func TestLessInAllDimension(t *testing.T) { } } -func TestLessEqualStrict(t *testing.T) { - tests := []struct { - name string - former *Resource - latter *Resource - expected bool +func TestLessEqualInAllDimension(t *testing.T) { + testsForDefaultZero := []struct { + resource1 *Resource + resource2 *Resource + expected bool }{ { - name: "same resource", - former: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, - }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, - }, - expected: true, + resource1: &Resource{}, + resource2: &Resource{}, + expected: true, }, { - name: "cpu less", - former: &Resource{ - MilliCPU: 1000 - 1, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, - }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource1: &Resource{}, + resource2: &Resource{ + MilliCPU: 4000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, expected: true, }, { - name: "memory less", - former: &Resource{ - MilliCPU: 1000, - Memory: 1*1024*1024 - 1, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, - }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource1: &Resource{ + MilliCPU: 4000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, - expected: true, + resource2: &Resource{}, + expected: false, }, { - name: "scalar resource less", - former: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000 - 1, - }, + resource1: &Resource{ + MilliCPU: 4000, + Memory: 4000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource2: &Resource{ + MilliCPU: 8000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, }, expected: true, }, { - name: "memory larger", - former: &Resource{ - MilliCPU: 1000, - Memory: 1*1024*1024 + 1, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource1: &Resource{ + MilliCPU: 4000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource2: &Resource{ + MilliCPU: 8000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, }, - expected: false, + expected: true, }, { - name: "scalar larger", - former: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000 + 1, - }, + resource1: &Resource{ + MilliCPU: 4000, + Memory: 4000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 2000}, }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource2: &Resource{ + MilliCPU: 8000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, }, - expected: false, + expected: true, }, { - name: "former does not have scalar resource", - former: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, + resource1: &Resource{ + MilliCPU: 4000, + Memory: 4000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 5000, "hugepages-test": 2000}, }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource2: &Resource{ + MilliCPU: 8000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, }, - expected: true, + expected: false, }, { - name: "latter does not have scalar resource", - former: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, - ScalarResources: map[v1.ResourceName]float64{ - "nvidia.com/gpu-tesla-p100-16GB": 8000, - }, + resource1: &Resource{ + MilliCPU: 9000, + Memory: 4000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, - latter: &Resource{ - MilliCPU: 1000, - Memory: 1 * 1024 * 1024, + resource2: &Resource{ + MilliCPU: 8000, + Memory: 8000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000}, }, expected: false, }, } - for _, test := range tests { - result := test.former.LessEqualStrict(test.latter) - if !reflect.DeepEqual(test.expected, result) { - t.Errorf("case %s, expected: %#v, got: %#v", test.name, test.expected, result) - } - } -} - -func TestMinDimensionResource(t *testing.T) { - tests := []struct { + testsForDefaultInfinity := []struct { resource1 *Resource resource2 *Resource - expected *Resource + expected bool }{ { - resource1: &Resource{ - MilliCPU: 4000, - Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1, "hugepages-test": 2}, - }, + resource1: &Resource{}, + resource2: &Resource{}, + expected: true, + }, + { + resource1: &Resource{}, resource2: &Resource{ - MilliCPU: 3000, - Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 0}, - }, - expected: &Resource{ - MilliCPU: 3000, + MilliCPU: 4000, Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 0}, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, + expected: false, }, { resource1: &Resource{ - MilliCPU: 4000, - Memory: 4000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, - }, - resource2: &Resource{ - MilliCPU: 5000, - Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 3000}, - }, - expected: &Resource{ MilliCPU: 4000, Memory: 2000, - ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 2000}, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, }, + resource2: &Resource{}, + expected: false, }, } - for _, test := range tests { - test.resource1.MinDimensionResource(test.resource2) - if !reflect.DeepEqual(test.expected, test.resource1) { - t.Errorf("expected: %#v, got: %#v", test.expected, test.resource1) + for _, test := range testsForDefaultZero { + flag := test.resource1.LessEqualInAllDimension(test.resource2, Zero) + if !reflect.DeepEqual(test.expected, flag) { + t.Errorf("expected: %#v, got: %#v", test.expected, flag) + } + } + for _, test := range testsForDefaultInfinity { + flag := test.resource1.LessEqualInAllDimension(test.resource2, Infinity) + if !reflect.DeepEqual(test.expected, flag) { + t.Errorf("expected: %#v, got: %#v", test.expected, flag) } } } @@ -763,3 +630,53 @@ func TestLessInSomeDimension(t *testing.T) { } } } + +func TestMinDimensionResource(t *testing.T) { + tests := []struct { + resource1 *Resource + resource2 *Resource + expected *Resource + }{ + { + resource1: &Resource{ + MilliCPU: 4000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + resource2: &Resource{ + MilliCPU: 3000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 0}, + }, + expected: &Resource{ + MilliCPU: 3000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 0}, + }, + }, + { + resource1: &Resource{ + MilliCPU: 4000, + Memory: 4000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000}, + }, + resource2: &Resource{ + MilliCPU: 5000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 3000}, + }, + expected: &Resource{ + MilliCPU: 4000, + Memory: 2000, + ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 0, "hugepages-test": 2000}, + }, + }, + } + + for _, test := range tests { + test.resource1.MinDimensionResource(test.resource2) + if !reflect.DeepEqual(test.expected, test.resource1) { + t.Errorf("expected: %#v, got: %#v", test.expected, test.resource1) + } + } +} diff --git a/pkg/scheduler/plugins/overcommit/overcommit.go b/pkg/scheduler/plugins/overcommit/overcommit.go index 67f3089761..ba028ff6e4 100644 --- a/pkg/scheduler/plugins/overcommit/overcommit.go +++ b/pkg/scheduler/plugins/overcommit/overcommit.go @@ -107,7 +107,7 @@ func (op *overcommitPlugin) OnSessionOpen(ssn *framework.Session) { //TODO: if allow 1 more job to be inqueue beyond overcommit-factor, large job may be inqueue and create pods jobMinReq := api.NewResource(*job.PodGroup.Spec.MinResources) - if inqueue.Add(jobMinReq).LessEqual(idle) { + if inqueue.Add(jobMinReq).LessEqualInAllDimension(idle, api.Zero) { klog.V(4).Infof("Sufficient resources, permit job <%s/%s> to be inqueue", job.Namespace, job.Name) op.inqueueResource.Add(jobMinReq) return util.Permit diff --git a/pkg/scheduler/plugins/proportion/proportion.go b/pkg/scheduler/plugins/proportion/proportion.go index 08fc5b2a6d..d6fa7a0bec 100644 --- a/pkg/scheduler/plugins/proportion/proportion.go +++ b/pkg/scheduler/plugins/proportion/proportion.go @@ -161,12 +161,12 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { oldDeserved := attr.deserved.Clone() attr.deserved.Add(remaining.Clone().Multi(float64(attr.weight) / float64(totalWeight))) - if attr.capability != nil && !attr.deserved.LessEqualStrict(attr.capability) { + if attr.capability != nil && !attr.deserved.LessEqualInAllDimension(attr.capability, api.Infinity) { attr.deserved = helpers.Min(attr.deserved, attr.capability) attr.deserved = helpers.Min(attr.deserved, attr.request) meet[attr.queueID] = struct{}{} klog.V(4).Infof("queue <%s> is meet cause of the capability", attr.name) - } else if attr.request.LessEqualStrict(attr.deserved) { + } else if attr.request.LessEqualInAllDimension(attr.deserved, api.Zero) { attr.deserved = helpers.Min(attr.deserved, attr.request) meet[attr.queueID] = struct{}{} klog.V(4).Infof("queue <%s> is meet", attr.name) @@ -228,7 +228,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { continue } - if !allocated.LessEqualStrict(attr.deserved) { + if !allocated.LessEqualInAllDimension(attr.deserved, api.Zero) { allocated.Sub(reclaimee.Resreq) victims = append(victims, reclaimee) } @@ -241,7 +241,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { queue := obj.(*api.QueueInfo) attr := pp.queueOpts[queue.UID] - overused := !attr.allocated.LessEqual(attr.deserved) + overused := !attr.allocated.LessEqualInAllDimension(attr.deserved, api.Zero) metrics.UpdateQueueOverused(attr.name, overused) if overused { klog.V(3).Infof("Queue <%v>: deserved <%v>, allocated <%v>, share <%v>", @@ -269,7 +269,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { } minReq := job.GetMinResources() // The queue resource quota limit has not reached - inqueue := minReq.Add(attr.allocated).Add(attr.inqueue).LessEqual(api.NewResource(queue.Queue.Spec.Capability)) + inqueue := minReq.Add(attr.allocated).Add(attr.inqueue).LessEqualInAllDimension(api.NewResource(queue.Queue.Spec.Capability), api.Infinity) if inqueue { attr.inqueue.Add(job.GetMinResources()) return util.Permit diff --git a/pkg/scheduler/plugins/reservation/reservation.go b/pkg/scheduler/plugins/reservation/reservation.go index 9dcfdd5163..b03c1ba10d 100644 --- a/pkg/scheduler/plugins/reservation/reservation.go +++ b/pkg/scheduler/plugins/reservation/reservation.go @@ -127,7 +127,7 @@ func (rp *reservationPlugin) getUnlockedNodesWithMaxIdle(ssn *framework.Session) break } } - if !hasLocked && (maxIdleNode == nil || maxIdleNode.Idle.LessEqual(node.Idle)) { + if !hasLocked && (maxIdleNode == nil || maxIdleNode.Idle.LessEqualInAllDimension(node.Idle, api.Zero)) { maxIdleNode = node } } diff --git a/pkg/scheduler/plugins/task-topology/topology.go b/pkg/scheduler/plugins/task-topology/topology.go index 2797da0e26..0aa000c89a 100644 --- a/pkg/scheduler/plugins/task-topology/topology.go +++ b/pkg/scheduler/plugins/task-topology/topology.go @@ -165,7 +165,7 @@ func (p *taskTopologyPlugin) calcBucketScore(task *api.TaskInfo, node *api.NodeI // 3. the other tasks in bucket take into considering score += len(bucket.tasks) - if bucket.request == nil || bucket.request.LessEqual(maxResource) { + if bucket.request == nil || bucket.request.LessEqualInAllDimension(maxResource, api.Zero) { return score, jobManager, nil } @@ -178,7 +178,7 @@ func (p *taskTopologyPlugin) calcBucketScore(task *api.TaskInfo, node *api.NodeI } remains.Sub(bucketTask.Resreq) score-- - if remains.LessEqual(maxResource) { + if remains.LessEqualInAllDimension(maxResource, api.Zero) { break } } diff --git a/pkg/scheduler/util/scheduler_helper.go b/pkg/scheduler/util/scheduler_helper.go index 67eeab5c27..723e3ab42e 100644 --- a/pkg/scheduler/util/scheduler_helper.go +++ b/pkg/scheduler/util/scheduler_helper.go @@ -244,7 +244,7 @@ func ValidateVictims(preemptor *api.TaskInfo, node *api.NodeInfo, victims []*api } // Every resource of the preemptor needs to be less or equal than corresponding // idle resource after preemption. - if !preemptor.InitResreq.LessEqual(futureIdle) { + if !preemptor.InitResreq.LessEqualInAllDimension(futureIdle, api.Zero) { return fmt.Errorf("not enough resources: requested <%v>, but future idle <%v>", preemptor.InitResreq, futureIdle) } diff --git a/test/e2e/schedulingbase/job_scheduling.go b/test/e2e/schedulingbase/job_scheduling.go index 81c0b137f1..df067eb9bc 100644 --- a/test/e2e/schedulingbase/job_scheduling.go +++ b/test/e2e/schedulingbase/job_scheduling.go @@ -317,7 +317,7 @@ var _ = Describe("Job E2E Test", func() { need := schedulingapi.NewResource(v1.ResourceList{"cpu": resource.MustParse("500m")}) var count int32 - for need.LessEqual(alloc) { + for need.LessEqualInAllDimension(alloc, schedulingapi.Zero) { count++ alloc.Sub(need) } diff --git a/test/e2e/util/node.go b/test/e2e/util/node.go index afbe5bdc84..f73d61edc8 100644 --- a/test/e2e/util/node.go +++ b/test/e2e/util/node.go @@ -76,7 +76,7 @@ func ClusterSize(ctx *TestContext, req v1.ResourceList) int32 { alloc.Sub(res) } - for slot.LessEqual(alloc) { + for slot.LessEqualInAllDimension(alloc, schedulerapi.Zero) { alloc.Sub(slot) res++ } @@ -146,7 +146,7 @@ func ComputeNode(ctx *TestContext, req v1.ResourceList) (string, int32) { alloc.Sub(res) } - for slot.LessEqual(alloc) { + for slot.LessEqualInAllDimension(alloc, schedulerapi.Zero) { alloc.Sub(slot) res++ }