Skip to content
This repository was archived by the owner on Feb 8, 2021. It is now read-only.

Commit d524bd8

Browse files
author
k8s-merge-robot
committed
Merge pull request kubernetes#17942 from gmarek/fix-test
Auto commit by PR queue bot
2 parents d282974 + fa4f04e commit d524bd8

File tree

3 files changed

+78
-3
lines changed

3 files changed

+78
-3
lines changed

test/e2e/density.go

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,39 @@ func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric {
7070
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
7171
}
7272

73+
func density30AddonResourceVerifier() map[string]resourceConstraint {
74+
constraints := make(map[string]resourceConstraint)
75+
constraints["fluentd-elasticsearch"] = resourceConstraint{
76+
cpuConstraint: 0.03,
77+
memoryConstraint: 150 * (1024 * 1024),
78+
}
79+
constraints["elasticsearch-logging"] = resourceConstraint{
80+
cpuConstraint: 2,
81+
memoryConstraint: 750 * (1024 * 1024),
82+
}
83+
constraints["heapster"] = resourceConstraint{
84+
cpuConstraint: 2,
85+
memoryConstraint: 1800 * (1024 * 1024),
86+
}
87+
constraints["kibana-logging"] = resourceConstraint{
88+
cpuConstraint: 0.01,
89+
memoryConstraint: 100 * (1024 * 1024),
90+
}
91+
constraints["kube-proxy"] = resourceConstraint{
92+
cpuConstraint: 0.01,
93+
memoryConstraint: 20 * (1024 * 1024),
94+
}
95+
constraints["l7-lb-controller"] = resourceConstraint{
96+
cpuConstraint: 0.02,
97+
memoryConstraint: 20 * (1024 * 1024),
98+
}
99+
constraints["influxdb"] = resourceConstraint{
100+
cpuConstraint: 2,
101+
memoryConstraint: 300 * (1024 * 1024),
102+
}
103+
return constraints
104+
}
105+
73106
// This test suite can take a long time to run, and can affect or be affected by other tests.
74107
// So by default it is added to the ginkgo.skip list (see driver.go).
75108
// To run this suite you must explicitly ask for it by setting the
@@ -177,6 +210,7 @@ var _ = Describe("Density [Skipped]", func() {
177210
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
178211
if testArg.podsPerNode == 30 {
179212
name = "[Performance] " + name
213+
framework.addonResourceConstraints = density30AddonResourceVerifier()
180214
}
181215
itArg := testArg
182216
It(name, func() {

test/e2e/framework.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,10 @@ type Framework struct {
4343
NamespaceDeletionTimeout time.Duration
4444

4545
gatherer containerResourceGatherer
46+
// Constraints that passed to a check which is exectued after data is gathered to
47+
// see if 99% of results are within acceptable bounds. It as to be injected in the test,
48+
// as expectations vary greatly. Constraints are groupped by the container names.
49+
addonResourceConstraints map[string]resourceConstraint
4650

4751
logsSizeWaitGroup sync.WaitGroup
4852
logsSizeCloseChannel chan bool
@@ -53,7 +57,8 @@ type Framework struct {
5357
// you (you can write additional before/after each functions).
5458
func NewFramework(baseName string) *Framework {
5559
f := &Framework{
56-
BaseName: baseName,
60+
BaseName: baseName,
61+
addonResourceConstraints: make(map[string]resourceConstraint),
5762
}
5863

5964
BeforeEach(f.beforeEach)
@@ -140,7 +145,7 @@ func (f *Framework) afterEach() {
140145
}
141146

142147
if testContext.GatherKubeSystemResourceUsageData {
143-
f.gatherer.stopAndPrintData([]int{50, 90, 99, 100})
148+
f.gatherer.stopAndPrintData([]int{50, 90, 99, 100}, f.addonResourceConstraints)
144149
}
145150

146151
if testContext.GatherLogsSizes {

test/e2e/kubelet_stats.go

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ import (
4141
"k8s.io/kubernetes/pkg/master/ports"
4242
"k8s.io/kubernetes/pkg/util"
4343
"k8s.io/kubernetes/pkg/util/sets"
44+
45+
. "github.com/onsi/gomega"
4446
)
4547

4648
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
@@ -403,6 +405,11 @@ func computePercentiles(timeSeries map[time.Time]resourceUsagePerContainer, perc
403405
return result
404406
}
405407

408+
type resourceConstraint struct {
409+
cpuConstraint float64
410+
memoryConstraint int64
411+
}
412+
406413
type containerResourceGatherer struct {
407414
usageTimeseries map[time.Time]resourceUsagePerContainer
408415
stopCh chan struct{}
@@ -433,7 +440,7 @@ func (g *containerResourceGatherer) startGatheringData(c *client.Client, period
433440
}()
434441
}
435442

436-
func (g *containerResourceGatherer) stopAndPrintData(percentiles []int) {
443+
func (g *containerResourceGatherer) stopAndPrintData(percentiles []int, constraints map[string]resourceConstraint) {
437444
close(g.stopCh)
438445
g.timer.Stop()
439446
g.wg.Wait()
@@ -447,17 +454,46 @@ func (g *containerResourceGatherer) stopAndPrintData(percentiles []int) {
447454
sortedKeys = append(sortedKeys, name)
448455
}
449456
sort.Strings(sortedKeys)
457+
violatedConstraints := make([]string, 0)
450458
for _, perc := range percentiles {
451459
buf := &bytes.Buffer{}
452460
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
453461
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
454462
for _, name := range sortedKeys {
455463
usage := stats[perc][name]
456464
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", name, usage.CPUUsageInCores, float64(usage.MemoryWorkingSetInBytes)/(1024*1024))
465+
// Verifying 99th percentile of resource usage
466+
if perc == 99 {
467+
// Name has a form: <pod_name>/<container_name>
468+
containerName := strings.Split(name, "/")[1]
469+
if constraint, ok := constraints[containerName]; ok {
470+
if usage.CPUUsageInCores > constraint.cpuConstraint {
471+
violatedConstraints = append(
472+
violatedConstraints,
473+
fmt.Sprintf("Container %v is using %v/%v CPU",
474+
name,
475+
usage.CPUUsageInCores,
476+
constraint.cpuConstraint,
477+
),
478+
)
479+
}
480+
if usage.MemoryWorkingSetInBytes > constraint.memoryConstraint {
481+
violatedConstraints = append(
482+
violatedConstraints,
483+
fmt.Sprintf("Container %v is using %v/%v MB of memory",
484+
name,
485+
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
486+
float64(constraint.memoryConstraint)/(1024*1024),
487+
),
488+
)
489+
}
490+
}
491+
}
457492
}
458493
w.Flush()
459494
Logf("%v percentile:\n%v", perc, buf.String())
460495
}
496+
Expect(violatedConstraints).To(BeEmpty())
461497
}
462498

463499
// Performs a get on a node proxy endpoint given the nodename and rest client.

0 commit comments

Comments
 (0)