Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix load test & run it on 100- and 500-node Kubemarks #20891

Merged
merged 2 commits into from
Feb 9, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion hack/jenkins/e2e-runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
./test/kubemark/start-kubemark.sh
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false"
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS}" --delete-namespace="false" --gather-resource-usage="false"
./test/kubemark/stop-kubemark.sh
NUM_NODES=${NUM_NODES_BKP}
MASTER_SIZE=${MASTER_SIZE_BKP}
Expand Down
4 changes: 4 additions & 0 deletions hack/jenkins/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="\[Feature:Performance\]"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
KUBE_GCE_INSTANCE_PREFIX="kubemark100"
NUM_NODES="10"
Expand All @@ -679,6 +680,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="\[Feature:Performance\]"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
NUM_NODES="6"
MASTER_SIZE="n1-standard-4"
Expand All @@ -698,6 +700,7 @@ case ${JOB_NAME} in
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
: ${KUBEMARK_TESTS:="should\sallow\sstarting\s30\spods\sper\snode"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
NUM_NODES="11"
Expand Down Expand Up @@ -1029,6 +1032,7 @@ export KUBE_SKIP_CONFIRMATIONS=y

# Kubemark
export USE_KUBEMARK="${USE_KUBEMARK:-false}"
export KUBEMARK_TESTS="${KUBEMARK_TESTS:-}"
export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}"
export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}"

Expand Down
21 changes: 13 additions & 8 deletions test/e2e/load.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,17 @@ var _ = Describe("Load capacity", func() {
// We may want to revisit it in the future.
creatingTime := time.Duration(totalPods/5) * time.Second
createAllRC(configs, creatingTime)

By("============================================================================")
scaleAllRC(configs)

// We would like to spread scaling replication controllers over time
// to make it possible to create/schedule & delete them in the meantime.
// Currently we assume that 5 pods/second average throughput.
// The expected number of created/deleted pods is less than totalPods/3.
scalingTime := time.Duration(totalPods/15) * time.Second
scaleAllRC(configs, scalingTime)
By("============================================================================")
scaleAllRC(configs)

scaleAllRC(configs, scalingTime)
By("============================================================================")

// Cleanup all created replication controllers.
Expand Down Expand Up @@ -211,23 +217,22 @@ func createRC(wg *sync.WaitGroup, config *RCConfig, creatingTime time.Duration)
expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name))
}

func scaleAllRC(configs []*RCConfig) {
func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) {
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
go scaleRC(&wg, config)
go scaleRC(&wg, config, scalingTime)
}
wg.Wait()
}

// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig) {
func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) {
defer GinkgoRecover()
defer wg.Done()
resizingTime := 3 * time.Minute

sleepUpTo(resizingTime)
sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name))
Expand Down