Skip to content

Commit 1227a32

Browse files
authoredJun 24, 2021
consider initContainers when calculating a pod's requests (#203)
This commit also: - Standardises uses of resource.Quantity - Adds expectedNodeDelta to scale up tests fixes #202
1 parent b2d8ed8 commit 1227a32

File tree

7 files changed

+306
-107
lines changed

7 files changed

+306
-107
lines changed
 

‎pkg/controller/controller_scale_node_group_test.go

+29-12
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,14 @@ import (
44
"testing"
55
duration "time"
66

7+
"github.com/atlassian/escalator/pkg/k8s/resource"
78
"github.com/atlassian/escalator/pkg/test"
89
"github.com/pkg/errors"
910
log "github.com/sirupsen/logrus"
1011
time "github.com/stephanos/clock"
1112
"github.com/stretchr/testify/assert"
1213
"github.com/stretchr/testify/require"
1314
v1 "k8s.io/api/core/v1"
14-
"k8s.io/apimachinery/pkg/api/resource"
1515
)
1616

1717
type ListerOptions struct {
@@ -215,9 +215,10 @@ func TestScaleNodeGroup(t *testing.T) {
215215
}
216216

217217
tests := []struct {
218-
name string
219-
args args
220-
err error
218+
name string
219+
args args
220+
expectedNodeDelta int
221+
err error
221222
}{
222223
{
223224
"100% cpu, 50% threshold",
@@ -233,6 +234,7 @@ func TestScaleNodeGroup(t *testing.T) {
233234
},
234235
ListerOptions{},
235236
},
237+
10,
236238
nil,
237239
},
238240
{
@@ -249,6 +251,7 @@ func TestScaleNodeGroup(t *testing.T) {
249251
},
250252
ListerOptions{},
251253
},
254+
10,
252255
nil,
253256
},
254257
{
@@ -265,6 +268,7 @@ func TestScaleNodeGroup(t *testing.T) {
265268
},
266269
ListerOptions{},
267270
},
271+
5,
268272
nil,
269273
},
270274
{
@@ -281,6 +285,7 @@ func TestScaleNodeGroup(t *testing.T) {
281285
},
282286
ListerOptions{},
283287
},
288+
12,
284289
nil,
285290
},
286291
{
@@ -297,6 +302,7 @@ func TestScaleNodeGroup(t *testing.T) {
297302
},
298303
ListerOptions{},
299304
},
305+
0,
300306
nil,
301307
},
302308
{
@@ -313,6 +319,7 @@ func TestScaleNodeGroup(t *testing.T) {
313319
},
314320
ListerOptions{},
315321
},
322+
1,
316323
nil,
317324
},
318325
{
@@ -327,6 +334,7 @@ func TestScaleNodeGroup(t *testing.T) {
327334
},
328335
ListerOptions{},
329336
},
337+
0,
330338
errors.New("node count less than the minimum"),
331339
},
332340
{
@@ -341,6 +349,7 @@ func TestScaleNodeGroup(t *testing.T) {
341349
},
342350
ListerOptions{},
343351
},
352+
0,
344353
errors.New("node count larger than the maximum"),
345354
},
346355
{
@@ -356,6 +365,7 @@ func TestScaleNodeGroup(t *testing.T) {
356365
},
357366
ListerOptions{},
358367
},
368+
0,
359369
errors.New("cannot divide by zero in percent calculation"),
360370
},
361371
{
@@ -371,6 +381,7 @@ func TestScaleNodeGroup(t *testing.T) {
371381
},
372382
ListerOptions{},
373383
},
384+
0,
374385
errors.New("cannot divide by zero in percent calculation"),
375386
},
376387
{
@@ -386,6 +397,7 @@ func TestScaleNodeGroup(t *testing.T) {
386397
},
387398
ListerOptions{},
388399
},
400+
0,
389401
errors.New("cannot divide by zero in percent calculation"),
390402
},
391403
{
@@ -406,6 +418,7 @@ func TestScaleNodeGroup(t *testing.T) {
406418
},
407419
},
408420
},
421+
0,
409422
errors.New("unable to list pods"),
410423
},
411424
{
@@ -426,6 +439,7 @@ func TestScaleNodeGroup(t *testing.T) {
426439
},
427440
},
428441
},
442+
0,
429443
errors.New("unable to list nodes"),
430444
},
431445
{
@@ -442,6 +456,7 @@ func TestScaleNodeGroup(t *testing.T) {
442456
},
443457
ListerOptions{},
444458
},
459+
0,
445460
nil,
446461
},
447462
{
@@ -458,6 +473,7 @@ func TestScaleNodeGroup(t *testing.T) {
458473
},
459474
ListerOptions{},
460475
},
476+
38,
461477
nil,
462478
},
463479
}
@@ -507,6 +523,7 @@ func TestScaleNodeGroup(t *testing.T) {
507523
require.EqualError(t, tt.err, err.Error())
508524
}
509525

526+
assert.Equal(t, tt.expectedNodeDelta, nodesDelta)
510527
if nodesDelta <= 0 {
511528
return
512529
}
@@ -542,7 +559,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
542559
nodeGroupOptions NodeGroupOptions
543560
listerOptions ListerOptions
544561
}
545-
var defaultNodeCPUCapaity int64 = 2000
562+
var defaultNodeCPUCapacity int64 = 2000
546563
var defaultNodeMemCapacity int64 = 8000
547564

548565
tests := []struct {
@@ -557,7 +574,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
557574
{
558575
"10 nodes, 0 pods, min nodes 5, fast node removal",
559576
args{
560-
buildTestNodes(10, defaultNodeCPUCapaity, defaultNodeMemCapacity),
577+
buildTestNodes(10, defaultNodeCPUCapacity, defaultNodeMemCapacity),
561578
buildTestPods(0, 0, 0),
562579
NodeGroupOptions{
563580
Name: "default",
@@ -583,7 +600,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
583600
{
584601
"10 nodes, 10 pods, slow node removal",
585602
args{
586-
buildTestNodes(10, defaultNodeCPUCapaity, defaultNodeMemCapacity),
603+
buildTestNodes(10, defaultNodeCPUCapacity, defaultNodeMemCapacity),
587604
buildTestPods(10, 1000, 1000),
588605
NodeGroupOptions{
589606
Name: "default",
@@ -609,7 +626,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
609626
{
610627
"4 nodes, 0 pods, min nodes 0, fast node removal to scale down to 0",
611628
args{
612-
buildTestNodes(4, defaultNodeCPUCapaity, defaultNodeMemCapacity),
629+
buildTestNodes(4, defaultNodeCPUCapacity, defaultNodeMemCapacity),
613630
buildTestPods(0, 0, 0),
614631
NodeGroupOptions{
615632
Name: "default",
@@ -635,7 +652,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
635652
{
636653
"0 nodes, 10 pods, min nodes 0, scale up from 0 without cache",
637654
args{
638-
buildTestNodes(0, defaultNodeCPUCapaity, defaultNodeMemCapacity),
655+
buildTestNodes(0, defaultNodeCPUCapacity, defaultNodeMemCapacity),
639656
buildTestPods(40, 200, 800),
640657
NodeGroupOptions{
641658
Name: "default",
@@ -662,7 +679,7 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
662679
{
663680
"0 nodes, 10 pods, min nodes 0, scale up from 0 with cache",
664681
args{
665-
buildTestNodes(0, defaultNodeCPUCapaity, defaultNodeMemCapacity),
682+
buildTestNodes(0, defaultNodeCPUCapacity, defaultNodeMemCapacity),
666683
buildTestPods(40, 200, 800),
667684
NodeGroupOptions{
668685
Name: "default",
@@ -717,8 +734,8 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
717734
// add cached node allocatable capacity when configured
718735
if tt.scaleUpWithCachedCapacity {
719736
defaultNodeGroupState := nodeGroupsState[tt.args.nodeGroupOptions.Name]
720-
defaultNodeGroupState.cpuCapacity = *resource.NewMilliQuantity(defaultNodeCPUCapaity, resource.DecimalSI)
721-
defaultNodeGroupState.memCapacity = *resource.NewQuantity(defaultNodeMemCapacity, resource.DecimalSI)
737+
defaultNodeGroupState.cpuCapacity = *resource.NewCPUQuantity(defaultNodeCPUCapacity)
738+
defaultNodeGroupState.memCapacity = *resource.NewMemoryQuantity(defaultNodeMemCapacity)
722739
nodeGroupsState[tt.args.nodeGroupOptions.Name] = defaultNodeGroupState
723740
}
724741

‎pkg/controller/util_test.go

+31-26
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ import (
44
"testing"
55

66
"github.com/atlassian/escalator/pkg/k8s"
7+
"github.com/atlassian/escalator/pkg/k8s/resource"
78
"github.com/atlassian/escalator/pkg/test"
89
"github.com/pkg/errors"
910
"github.com/stretchr/testify/assert"
1011
"github.com/stretchr/testify/require"
1112
v1 "k8s.io/api/core/v1"
12-
"k8s.io/apimachinery/pkg/api/resource"
1313
)
1414

1515
func TestCalcScaleUpDeltaBelowThreshold(t *testing.T) {
@@ -203,10 +203,10 @@ func calculatePercentageUsage(pods []*v1.Pod, nodes []*v1.Node) (float64, float6
203203

204204
func TestCalcPercentUsage(t *testing.T) {
205205
type args struct {
206-
cpuRequest resource.Quantity
207-
memRequest resource.Quantity
208-
cpuCapacity resource.Quantity
209-
memCapacity resource.Quantity
206+
cpuRequest int64
207+
memRequest int64
208+
cpuCapacity int64
209+
memCapacity int64
210210
numberOfUntaintedNodes int64
211211
}
212212
tests := []struct {
@@ -219,10 +219,10 @@ func TestCalcPercentUsage(t *testing.T) {
219219
{
220220
"basic test",
221221
args{
222-
*resource.NewMilliQuantity(50, resource.DecimalSI),
223-
*resource.NewQuantity(50, resource.DecimalSI),
224-
*resource.NewMilliQuantity(100, resource.DecimalSI),
225-
*resource.NewQuantity(100, resource.DecimalSI),
222+
50,
223+
50,
224+
100,
225+
100,
226226
1,
227227
},
228228
50,
@@ -232,10 +232,10 @@ func TestCalcPercentUsage(t *testing.T) {
232232
{
233233
"divide by zero test",
234234
args{
235-
*resource.NewMilliQuantity(50, resource.DecimalSI),
236-
*resource.NewQuantity(50, resource.DecimalSI),
237-
*resource.NewMilliQuantity(0, resource.DecimalSI),
238-
*resource.NewQuantity(0, resource.DecimalSI),
235+
50,
236+
50,
237+
0,
238+
0,
239239
10,
240240
},
241241
0,
@@ -245,10 +245,10 @@ func TestCalcPercentUsage(t *testing.T) {
245245
{
246246
"no pods request while number of nodes is not 0",
247247
args{
248-
*resource.NewMilliQuantity(0, resource.DecimalSI),
249-
*resource.NewQuantity(0, resource.DecimalSI),
250-
*resource.NewMilliQuantity(0, resource.DecimalSI),
251-
*resource.NewQuantity(0, resource.DecimalSI),
248+
0,
249+
0,
250+
0,
251+
0,
252252
1,
253253
},
254254
0,
@@ -258,10 +258,10 @@ func TestCalcPercentUsage(t *testing.T) {
258258
{
259259
"zero numerator test",
260260
args{
261-
*resource.NewMilliQuantity(0, resource.DecimalSI),
262-
*resource.NewQuantity(0, resource.DecimalSI),
263-
*resource.NewMilliQuantity(66, resource.DecimalSI),
264-
*resource.NewQuantity(66, resource.DecimalSI),
261+
0,
262+
0,
263+
66,
264+
66,
265265
1,
266266
},
267267
0,
@@ -271,10 +271,10 @@ func TestCalcPercentUsage(t *testing.T) {
271271
{
272272
"zero all test",
273273
args{
274-
*resource.NewMilliQuantity(0, resource.DecimalSI),
275-
*resource.NewQuantity(0, resource.DecimalSI),
276-
*resource.NewMilliQuantity(0, resource.DecimalSI),
277-
*resource.NewQuantity(0, resource.DecimalSI),
274+
0,
275+
0,
276+
0,
277+
0,
278278
0,
279279
},
280280
0,
@@ -284,7 +284,12 @@ func TestCalcPercentUsage(t *testing.T) {
284284
}
285285
for _, tt := range tests {
286286
t.Run(tt.name, func(t *testing.T) {
287-
cpu, mem, err := calcPercentUsage(tt.args.cpuRequest, tt.args.memRequest, tt.args.cpuCapacity, tt.args.memCapacity, tt.args.numberOfUntaintedNodes)
287+
cpuRequest := *resource.NewCPUQuantity(tt.args.cpuRequest)
288+
cpuCapacity := *resource.NewCPUQuantity(tt.args.cpuCapacity)
289+
memRequest := *resource.NewMemoryQuantity(tt.args.memRequest)
290+
memCapacity := *resource.NewMemoryQuantity(tt.args.memCapacity)
291+
292+
cpu, mem, err := calcPercentUsage(cpuRequest, memRequest, cpuCapacity, memCapacity, tt.args.numberOfUntaintedNodes)
288293
if tt.err == nil {
289294
require.NoError(t, err)
290295
} else {

‎pkg/k8s/resource/quantity.go

+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
package resource
2+
3+
import (
4+
"k8s.io/apimachinery/pkg/api/resource"
5+
)
6+
7+
func NewMemoryQuantity(value int64) *resource.Quantity {
8+
return resource.NewQuantity(value, resource.BinarySI)
9+
}
10+
11+
func NewCPUQuantity(value int64) *resource.Quantity {
12+
return resource.NewMilliQuantity(value, resource.DecimalSI)
13+
}
14+
15+
func NewPodQuantity(value int64) *resource.Quantity {
16+
return resource.NewQuantity(value, resource.DecimalSI)
17+
}

0 commit comments

Comments
 (0)
Failed to load comments.