Skip to content

Commit

Permalink
Change the error message.
Browse files Browse the repository at this point in the history
when pod is unscheduled, make the error message similar  to kube-scheduler。

Signed-off-by: gj199575 <409237405@qq.com>
  • Loading branch information
gj199575 committed Apr 17, 2023
1 parent 31b47aa commit be8cff3
Show file tree
Hide file tree
Showing 5 changed files with 175 additions and 4 deletions.
4 changes: 2 additions & 2 deletions pkg/scheduler/actions/allocate/allocate.go
Expand Up @@ -98,8 +98,8 @@ func (alloc *Action) Execute(ssn *framework.Session) {
allNodes := ssn.NodeList
predicateFn := func(task *api.TaskInfo, node *api.NodeInfo) error {
// Check for Resource Predicate
if !task.InitResreq.LessEqual(node.FutureIdle(), api.Zero) {
return api.NewFitError(task, node, api.NodeResourceFitFailed)
if ok, reason := task.InitResreq.CheckResource(node.FutureIdle(), api.Zero); !ok {
return api.NewFitError(task, node, reason)
}

return ssn.PredicateFn(task, node)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/api/job_info_test.go
Expand Up @@ -231,7 +231,7 @@ func TestTaskSchedulingReason(t *testing.T) {
t3.UID: "Pod ns1/task-3 can possibly be assigned to node1",
t4.UID: "Pod ns1/task-4 can possibly be assigned to node2",
t5.UID: "Pod ns1/task-5 can possibly be assigned to node3",
t6.UID: "all nodes are unavailable: 1 node(s) pod number exceeded, 2 node(s) resource fit failed.",
t6.UID: "0/3 nodes are unavailable: 1 node(s) pod number exceeded, 2 node(s) resource fit failed.",
},
},
}
Expand Down
31 changes: 31 additions & 0 deletions pkg/scheduler/api/resource_info.go
Expand Up @@ -389,6 +389,37 @@ func (r *Resource) LessEqual(rr *Resource, defaultValue DimensionDefaultValue) b
return true
}

// CheckResource returns "" only on condition that all dimensions of resources in r are less than or equal with that of rr,
// Otherwise returns err string ,which show which resource is insufficient.
// @param defaultValue "default value for resource dimension not defined in ScalarResources. Its value can only be one of 'Zero' and 'Infinity'"
func (r *Resource) CheckResource(rr *Resource, defaultValue DimensionDefaultValue) (bool, string) {
lessEqualFunc := func(l, r, diff float64) bool {
if l < r || math.Abs(l-r) < diff {
return true
}
return false
}

if !lessEqualFunc(r.MilliCPU, rr.MilliCPU, minResource) {
return false, "Insufficient cpu"
}
if !lessEqualFunc(r.Memory, rr.Memory, minResource) {
return false, "Insufficient memory"
}

for resourceName, leftValue := range r.ScalarResources {
rightValue, ok := rr.ScalarResources[resourceName]
if !ok && defaultValue == Infinity {
continue
}

if !lessEqualFunc(leftValue, rightValue, minResource) {
return false, "Insufficient " + string(resourceName)
}
}
return true, ""
}

// LessPartly returns true if there exists any dimension whose resource amount in r is less than that in rr.
// Otherwise returns false.
// @param defaultValue "default value for resource dimension not defined in ScalarResources. Its value can only be one of 'Zero' and 'Infinity'"
Expand Down
140 changes: 140 additions & 0 deletions pkg/scheduler/api/resource_info_test.go
Expand Up @@ -1227,3 +1227,143 @@ func TestMinDimensionResourceInfinity(t *testing.T) {
}
}
}

func TestResource_LessEqualResource(t *testing.T) {
testsForDefaultZero := []struct {
resource1 *Resource
resource2 *Resource
expected string
}{
{
resource1: &Resource{},
resource2: &Resource{},
expected: "",
},
{
resource1: &Resource{},
resource2: &Resource{
MilliCPU: 4000,
Memory: 2000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
expected: "",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 2000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
resource2: &Resource{},
expected: "Insufficient cpu",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 4000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
resource2: &Resource{
MilliCPU: 8000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000},
},
expected: "",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
resource2: &Resource{
MilliCPU: 8000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000},
},
expected: "",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 4000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 2000},
},
resource2: &Resource{
MilliCPU: 8000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000},
},
expected: "",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 4000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 5000, "hugepages-test": 2000},
},
resource2: &Resource{
MilliCPU: 8000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000},
},
expected: "Insufficient scalar.test/scalar1",
},
{
resource1: &Resource{
MilliCPU: 9000,
Memory: 4000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
resource2: &Resource{
MilliCPU: 8000,
Memory: 8000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 4000, "hugepages-test": 5000},
},
expected: "Insufficient cpu",
},
}

testsForDefaultInfinity := []struct {
resource1 *Resource
resource2 *Resource
expected string
}{
{
resource1: &Resource{},
resource2: &Resource{},
expected: "",
},
{
resource1: &Resource{},
resource2: &Resource{
MilliCPU: 4000,
Memory: 2000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
expected: "",
},
{
resource1: &Resource{
MilliCPU: 4000,
Memory: 2000,
ScalarResources: map[v1.ResourceName]float64{"scalar.test/scalar1": 1000, "hugepages-test": 2000},
},
resource2: &Resource{},
expected: "Insufficient cpu",
},
}

for _, test := range testsForDefaultZero {
_, reason := test.resource1.CheckResource(test.resource2, Zero)
if !reflect.DeepEqual(test.expected, reason) {
t.Errorf("expected: %#v, got: %#v", test.expected, reason)
}
}
for caseID, test := range testsForDefaultInfinity {
_, reason := test.resource1.CheckResource(test.resource2, Infinity)
if !reflect.DeepEqual(test.expected, reason) {
t.Errorf("caseID %d expected: %#v, got: %#v", caseID, test.expected, reason)
}
}
}
2 changes: 1 addition & 1 deletion pkg/scheduler/api/unschedule_info.go
Expand Up @@ -66,7 +66,7 @@ func (f *FitErrors) SetNodeError(nodeName string, err error) {
// Error returns the final error message
func (f *FitErrors) Error() string {
if f.err == "" {
f.err = AllNodeUnavailableMsg
f.err = fmt.Sprintf("0/%v", len(f.nodes)) + " nodes are unavailable"
}
if len(f.nodes) == 0 {
return f.err
Expand Down

0 comments on commit be8cff3

Please sign in to comment.