Skip to content

Commit

Permalink
Merge pull request kubernetes#91510 from ahg-g/ahg-preempt
Browse files Browse the repository at this point in the history
Add Preemption benchmark
  • Loading branch information
k8s-ci-robot committed May 28, 2020
2 parents 472a4e9 + d650b57 commit 0891f69
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 10 deletions.
1 change: 1 addition & 0 deletions test/integration/scheduler_perf/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
Expand Down
13 changes: 13 additions & 0 deletions test/integration/scheduler_perf/config/performance-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -227,3 +227,16 @@
- numNodes: 5000
numInitPods: [2000, 2000, 2000, 2000, 2000]
numPodsToSchedule: 1000
- template:
desc: Preemption
initPods:
- podTemplatePath: config/pod-low-priority.yaml
podsToSchedule:
podTemplatePath: config/pod-high-priority.yaml
params:
- numNodes: 500
numInitPods: [2000]
numPodsToSchedule: 500
- numNodes: 5000
numInitPods: [20000]
numPodsToSchedule: 5000
18 changes: 18 additions & 0 deletions test/integration/scheduler_perf/config/pod-high-priority.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
generateName: pod-
spec:
priority: 10
containers:
- image: k8s.gcr.io/pause:3.2
name: pause
ports:
- containerPort: 80
resources:
limits:
cpu: 3000m
memory: 500Mi
requests:
cpu: 3000m
memory: 500Mi
18 changes: 18 additions & 0 deletions test/integration/scheduler_perf/config/pod-low-priority.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
generateName: pod-
spec:
terminationGracePeriodSeconds: 0
containers:
- image: k8s.gcr.io/pause:3.2
name: pause
ports:
- containerPort: 80
resources:
limits:
cpu: 900m
memory: 500Mi
requests:
cpu: 900m
memory: 500Mi
12 changes: 7 additions & 5 deletions test/integration/scheduler_perf/scheduler_perf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ var (
"scheduler_scheduling_algorithm_priority_evaluation_seconds",
"scheduler_binding_duration_seconds",
"scheduler_e2e_scheduling_duration_seconds",
"scheduler_scheduling_algorithm_preemption_evaluation_seconds",
"scheduler_pod_scheduling_duration_seconds",
},
}
)
Expand Down Expand Up @@ -154,7 +156,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
}
total += p.Num
}
if err := waitNumPodsScheduled(b, total, podInformer); err != nil {
if err := waitNumPodsScheduled(b, total, podInformer, setupNamespace); err != nil {
b.Fatal(err)
}

Expand All @@ -172,7 +174,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil {
b.Fatal(err)
}
if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil {
if err := waitNumPodsScheduled(b, test.PodsToSchedule.Num, podInformer, testNamespace); err != nil {
b.Fatal(err)
}

Expand All @@ -187,9 +189,9 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
return dataItems
}

func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error {
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer, namespace string) error {
for {
scheduled, err := getScheduledPods(podInformer)
scheduled, err := getScheduledPods(podInformer, namespace)
if err != nil {
return err
}
Expand All @@ -203,7 +205,7 @@ func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodIn
}

func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()})}
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()}, []string{testNamespace})}
metricsCollectorConfig := defaultMetricsCollectorConfig
if tc.MetricsCollectorConfig != nil {
metricsCollectorConfig = *tc.MetricsCollectorConfig
Expand Down
16 changes: 11 additions & 5 deletions test/integration/scheduler_perf/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
Expand Down Expand Up @@ -76,16 +77,19 @@ func mustSetupScheduler() (util.ShutdownFunc, coreinformers.PodInformer, clients
return shutdownFunc, podInformer, clientSet
}

func getScheduledPods(podInformer coreinformers.PodInformer) ([]*v1.Pod, error) {
// Returns the list of scheduled pods in the specified namespaces.
// Note that no namespces specified matches all namespaces.
func getScheduledPods(podInformer coreinformers.PodInformer, namespaces ...string) ([]*v1.Pod, error) {
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, err
}

s := sets.NewString(namespaces...)
scheduled := make([]*v1.Pod, 0, len(pods))
for i := range pods {
pod := pods[i]
if len(pod.Spec.NodeName) > 0 {
if len(pod.Spec.NodeName) > 0 && (len(s) == 0 || s.Has(pod.Namespace)) {
scheduled = append(scheduled, pod)
}
}
Expand Down Expand Up @@ -213,17 +217,19 @@ type throughputCollector struct {
podInformer coreinformers.PodInformer
schedulingThroughputs []float64
labels map[string]string
namespaces []string
}

func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string) *throughputCollector {
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string) *throughputCollector {
return &throughputCollector{
podInformer: podInformer,
labels: labels,
namespaces: namespaces,
}
}

func (tc *throughputCollector) run(stopCh chan struct{}) {
podsScheduled, err := getScheduledPods(tc.podInformer)
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
if err != nil {
klog.Fatalf("%v", err)
}
Expand All @@ -233,7 +239,7 @@ func (tc *throughputCollector) run(stopCh chan struct{}) {
case <-stopCh:
return
case <-time.After(throughputSampleFrequency):
podsScheduled, err := getScheduledPods(tc.podInformer)
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
if err != nil {
klog.Fatalf("%v", err)
}
Expand Down

0 comments on commit 0891f69

Please sign in to comment.