diff --git a/test/scale/README.md b/test/scale/README.md index 33eb3636f6..8945bcffd2 100644 --- a/test/scale/README.md +++ b/test/scale/README.md @@ -11,5 +11,36 @@ This saves us from: ## Usage 1. Create AKS cluster with `--uptime-sla` and create any nodepools. 2. If making KWOK Pods, run `run-kwok.sh` in the background. -3. Scale with `test-scale.sh`. Specify number of Deployments, Pod replicas, NetworkPolicies, and labels for Pods. +3. Scale with `test-scale.sh`. Specify number of Deployments, Pod replicas, NetworkPolicies, and labels for Pods. Can also delete/re-add objects to cause churn. 4. Test connectivity with `connectivity/test-connectivity.sh`. + +### Example Runs +``` +./test-scale.sh --max-kwok-pods-per-node=50 \ + --num-kwok-deployments=10 \ + --num-kwok-replicas=1 \ + --max-real-pods-per-node=30 \ + --num-real-deployments=5 \ + --num-real-replicas=2 \ + --num-network-policies=1 \ + --num-unapplied-network-policies=10 \ + --num-unique-labels-per-pod=2 \ + --num-unique-labels-per-deployment=2 \ + --num-shared-labels-per-pod=10 \ + --delete-kwok-pods=10 \ + --delete-real-pods=5 \ + --delete-pods-interval=120 \ + --delete-pods-times=2 \ + --delete-labels \ + --delete-labels-interval=30 \ + --delete-labels-times=2 \ + --delete-netpols \ + --delete-netpols-interval=0 \ + --delete-netpols-times=1 +``` + +``` +./test-connectivity --num-scale-pods-to-verify=10 \ + --max-wait-for-initial-connectivity=600 \ + --max-wait-after-adding-netpol=120 +``` diff --git a/test/scale/connectivity/test-connectivity.sh b/test/scale/connectivity/test-connectivity.sh index 75ab9256ff..862f298c6e 100755 --- a/test/scale/connectivity/test-connectivity.sh +++ b/test/scale/connectivity/test-connectivity.sh @@ -10,7 +10,7 @@ NETPOL_SLEEP=5 printHelp() { cat < --max-wait-for-initial-connectivity= --max-wait-after-adding-netpol= [--kubeconfig=] +./test-connectivity.sh --num-scale-pods-to-verify=all| --max-wait-for-initial-connectivity= --max-wait-after-adding-netpol= [--kubeconfig=] Verifies that scale test Pods can connect to each other, but cannot connect to a new "pinger" Pod. Then, adds a NetworkPolicy to allow traffic between the scale test Pods and the "pinger" Pod, and verifies connectivity. @@ -21,7 +21,7 @@ USAGE: 3. Run this script REQUIRED PARAMETERS: - --num-scale-pods-to-verify= number of scale Pods to test. Will verify that each scale Pod can connect to each other [(N-1)^2 connections] and that each Scale Pod cannot connect to a "pinger" Pod [2N connection attempts with a 3-second timeout] + --num-scale-pods-to-verify=all| number of scale Pods to test. Will verify that each scale Pod can connect to each other [(N-1)^2 connections] and that each Scale Pod cannot connect to a "pinger" Pod [2N connection attempts with a 3-second timeout] --max-wait-for-initial-connectivity= maximum time in seconds to wait for initial connectivity after Pinger Pods are running --max-wait-after-adding-netpol= maximum time in seconds to wait for allowed connections after adding the allow-pinger NetworkPolicy @@ -72,7 +72,7 @@ while [[ $# -gt 0 ]]; do shift done -if [[ -z $numScalePodsToVerify || -z $maxWaitAfterAddingNetpol ]]; then +if [[ -z $numScalePodsToVerify || -z $maxWaitForInitialConnectivity || -z $maxWaitAfterAddingNetpol ]]; then echo "ERROR: missing required parameter. Check --help for usage" exit 6 fi @@ -122,8 +122,13 @@ startDate=`date -u` echo "STARTING CONNECTIVITY TEST at $startDate" ## GET SCALE PODS +if [[ $numScalePodsToVerify == "all" ]]; then + echo "setting numScalePodsToVerify=9999 since 'all' was passed in" + numScalePodsToVerify=9999 +fi + echo "getting scale Pods..." -scalePodNameIPs=(`kubectl $KUBECONFIG_ARG get pods -n scale-test --field-selector=status.phase==Running -o jsonpath='{range .items[*]}{@.metadata.name}{","}{@.status.podIP}{" "}{end}'`) +scalePodNameIPs=(`kubectl $KUBECONFIG_ARG get pods -n scale-test --field-selector=status.phase==Running -l is-real="true" -o jsonpath='{range .items[*]}{@.metadata.name}{","}{@.status.podIP}{" "}{end}'`) scalePods=() scalePodIPs=() for nameIP in "${scalePodNameIPs[@]}"; do @@ -131,8 +136,6 @@ for nameIP in "${scalePodNameIPs[@]}"; do name=${nameIP[0]} ip=${nameIP[1]} - echo $name | grep real-dep || continue - echo "scale Pod: $name, IP: $ip" if [[ -z $name || -z $ip ]]; then @@ -151,6 +154,7 @@ done numScalePodsFound=${#scalePods[@]} if [[ $numScalePodsFound == 0 ]]; then echo "ERROR: expected namespace scale-test to exist with real (non-kwok) Pods. Run test/scale/test-scale.sh with real Pods first." + kubectl get pod -n scale-test -owide exit 7 elif [[ $numScalePodsFound -lt $numScalePodsToVerify ]]; then echo "WARNING: there are only $numScalePodsFound real scale Pods running which is less than numScalePodsToVerify=$numScalePodsToVerify. Will verify just these $numScalePodsFound Pods" @@ -255,7 +259,7 @@ done low=0 if [[ $prevTryDate -gt $connectivityStartDate ]]; then - low=$(( `date +%s` - $prevTryDate - $CONNECTIVITY_SLEEP )) + low=$(( $prevTryDate - $connectivityStartDate - $CONNECTIVITY_SLEEP )) fi high=$(( `date +%s` - $connectivityStartDate )) echo "SUCCESS: all initial connectivity tests passed. Took between $low and $high seconds to succeed" @@ -309,7 +313,7 @@ done low=0 if [[ $prevTryDate -gt $netpolStartDate ]]; then - low=$(( `date +%s` - $prevTryDate - $NETPOL_SLEEP )) + low=$(( $prevTryDate - $connectivityStartDate - $NETPOL_SLEEP )) fi high=$(( `date +%s` - $netpolStartDate )) echo "SUCCESS: all connectivity tests passed after adding allow-pinger NetworkPolicy. Took between $low and $high seconds to take effect" diff --git a/test/scale/install-kwok.sh b/test/scale/install-kwok.sh new file mode 100644 index 0000000000..46618258d6 --- /dev/null +++ b/test/scale/install-kwok.sh @@ -0,0 +1,5 @@ +KWOK_REPO=kubernetes-sigs/kwok +KWOK_LATEST_RELEASE=$(curl "https://api.github.com/repos/${KWOK_REPO}/releases/latest" | jq -r '.tag_name') +wget -O kwok -c "https://github.com/kubernetes-sigs/kwok/releases/download/${KWOK_LATEST_RELEASE}/kwok-$(go env GOOS)-$(go env GOARCH)" +chmod +x kwok +sudo mv kwok /usr/local/bin/kwok diff --git a/test/scale/run-kwok.sh b/test/scale/run-kwok.sh index f1118ae320..77876fb916 100755 --- a/test/scale/run-kwok.sh +++ b/test/scale/run-kwok.sh @@ -1,33 +1,21 @@ -###################################################################################### -# This script is used to schedule kwok nodes/pods and maintain kwok node heartbeats. # -###################################################################################### -INSTALL_KWOK=false -# KWOK_LATEST_RELEASE=$(curl "https://api.github.com/repos/${KWOK_REPO}/releases/latest" | jq -r '.tag_name') -KWOK_VERSION=${KWOK_LATEST_RELEASE:-"v0.1.1"} -# kubeconfig arg doesn't seem to work for kwok. It seems to just use current context of the default kubeconfig. - -# specify kubeconfig file as first arg if you want -if [[ $1 != "" ]]; then - file=$1 - test -f $file || { - echo "ERROR: KUBECONFIG=$file does not exist" - exit 1 - } - - KUBECONFIG_ARG="--kubeconfig $file" +############################################################### +# Schedule kwok nodes/pods and maintain kwok node heartbeats. # +############################################################### +# can pass kubeconfig as first arg +if [[ -z $1 ]]; then + kubeconfigFile=~/.kube/config +else + kubeconfigFile=$1 fi +echo "using kubeconfig $kubeconfigFile" -if [[ INSTALL_KWOK == true ]]; then - wget -O kwokctl -c "https://github.com/kubernetes-sigs/kwok/releases/download/${KWOK_VERSION}/kwokctl-$(go env GOOS)-$(go env GOARCH)" - chmod +x kwokctl - sudo mv kwokctl /usr/local/bin/kwokctl - - wget -O kwok -c "https://github.com/kubernetes-sigs/kwok/releases/download/${KWOK_VERSION}/kwok-$(go env GOOS)-$(go env GOARCH)" - chmod +x kwok - sudo mv kwok /usr/local/bin/kwok -fi +which kwok || { + echo "ERROR: kwok not found. Install via ./install-kwok.sh" + exit 1 +} -kwok $KUBECONFIG_ARG \ +set -x +kwok --kubeconfig $kubeconfigFile \ --cidr=155.0.0.0/16 \ --node-ip=155.0.0.1 \ --manage-all-nodes=false \ diff --git a/test/scale/templates/kwok-deployment.yaml b/test/scale/templates/kwok-deployment.yaml index 70ef60d5c2..4491e5496b 100644 --- a/test/scale/templates/kwok-deployment.yaml +++ b/test/scale/templates/kwok-deployment.yaml @@ -5,15 +5,18 @@ metadata: namespace: scale-test labels: app: scale-test + is-kwok: "true" spec: replicas: TEMP_REPLICAS selector: matchLabels: - app: scale-testOTHER_LABELS_6_SPACES + app: scale-test + is-kwok: "true"OTHER_LABELS_6_SPACES template: metadata: labels: - app: scale-testOTHER_LABELS_8_SPACES + app: scale-test + is-kwok: "true"OTHER_LABELS_8_SPACES spec: affinity: nodeAffinity: diff --git a/test/scale/templates/real-deployment.yaml b/test/scale/templates/real-deployment.yaml index 38385eda49..9605dbf980 100644 --- a/test/scale/templates/real-deployment.yaml +++ b/test/scale/templates/real-deployment.yaml @@ -5,15 +5,18 @@ metadata: namespace: scale-test labels: app: scale-test + is-real: "true" spec: replicas: TEMP_REPLICAS selector: matchLabels: - app: scale-testOTHER_LABELS_6_SPACES + app: scale-test + is-real: "true"OTHER_LABELS_6_SPACES template: metadata: labels: - app: scale-testOTHER_LABELS_8_SPACES + app: scale-test + is-real: "true"OTHER_LABELS_8_SPACES spec: nodeSelector: scale-test: "true" diff --git a/test/scale/templates/unapplied-networkpolicy.yaml b/test/scale/templates/unapplied-networkpolicy.yaml new file mode 100644 index 0000000000..812fb38944 --- /dev/null +++ b/test/scale/templates/unapplied-networkpolicy.yaml @@ -0,0 +1,22 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: TEMP_NAME + namespace: scale-test +spec: + podSelector: + matchLabels: + non-existent-key: val + ingress: + - from: + - podSelector: + matchLabels: + non-existent-key: val + egress: + - to: + - podSelector: + matchLabels: + non-existent-key: val + policyTypes: + - Ingress + - Egress diff --git a/test/scale/test-scale.sh b/test/scale/test-scale.sh index ce87775e56..363acdd349 100755 --- a/test/scale/test-scale.sh +++ b/test/scale/test-scale.sh @@ -3,7 +3,8 @@ set -e printHelp() { cat < --num-kwok-deployments= --num-kwok-replicas= --max-real-pods-per-node= --num-real-deployments= --num-real-replicas= --num-network-policies= --num-unique-labels-per-pod= --num-unique-labels-per-deployment= --num-shared-labels-per-pod= [--kubeconfig=] [--restart-npm] [--debug-exit-after-print-counts] [--debug-exit-after-generation] +./test-scale.sh --max-kwok-pods-per-node= --num-kwok-deployments= --num-kwok-replicas= --max-real-pods-per-node= --num-real-deployments= --num-real-replicas= --num-network-policies= --num-unapplied-network-policies= --num-unique-labels-per-pod= --num-unique-labels-per-deployment= --num-shared-labels-per-pod= [--kubeconfig=] [--restart-npm] [--debug-exit-after-print-counts] [--debug-exit-after-generation] +(more optional parameters at end of this message) Scales the number of Pods, Pod labels, and NetworkPolicies in a cluster. Uses KWOK to create fake nodes and fake pods as needed. @@ -28,6 +29,7 @@ REQUIRED PARAMETERS: --num-real-deployments deployments scheduled on nodes labeled with scale-test=true --num-real-replicas per deployment --num-network-policies NetPols applied to every Pod + --num-unapplied-network-policies NetPols that do not target any Pods --num-unique-labels-per-pod creates labels specific to each Pod. Creates numTotalPods*numUniqueLabelsPerPod distinct labels. In Cilium, a value >= 1 results in every Pod having a unique identity (not recommended for scale) --num-unique-labels-per-deployment create labels shared between replicas of a deployment. Creates numTotalDeployments*numUniqueLabelsPerDeployment distinct labels --num-shared-labels-per-pod create labels shared between all Pods. Creates numSharedLabelsPerPod distinct labels. Must be >= 3 if numNetworkPolicies > 0 because of the way we generate network policies @@ -37,6 +39,19 @@ OPTIONAL PARAMETERS: --restart-npm make sure NPM exists and restart it before running scale test --debug-exit-after-print-counts skip scale test. Just print out counts of things to be created and counts of IPSets/ACLs that NPM would create --debug-exit-after-generation skip scale test. Exit after generating templates + +OPTIONAL PARAMETERS TO TEST DELETION: + --sleep-after-creation= seconds to sleep after creating everything. Default is 0 + --delete-kwok-pods= delete and readd the specified number of fake Pods + --delete-real-pods= delete and readd the specified number of real Pods + --delete-pods-interval= seconds to wait after deleting Pods. Default is 60 + --delete-pods-times= number of times to delete and readd. Default is 1 + --delete-labels delete and readd shared labels from all Pods + --delete-labels-interval= seconds to wait after deleting or readding. Default is 60 + --delete-labels-times= number of times to delete and readd. Default is 1 + --delete-netpols delete and readd all NetworkPolicies + --delete-netpols-interval= seconds to wait after deleting or readding. Default is 60 + --delete-netpols-times= number of times to delete and readd. Default is 1 EOF } @@ -68,6 +83,9 @@ while [[ $# -gt 0 ]]; do --num-network-policies=*) numNetworkPolicies="${1#*=}" ;; + --num-unapplied-network-policies=*) + numUnappliedNetworkPolicies="${1#*=}" + ;; --num-unique-labels-per-pod=*) numUniqueLabelsPerPod="${1#*=}" ;; @@ -95,6 +113,39 @@ while [[ $# -gt 0 ]]; do --debug-exit-after-generation) DEBUG_EXIT_AFTER_GENERATION=true ;; + --sleep-after-creation=*) + sleepAfterCreation="${1#*=}" + ;; + --delete-kwok-pods=*) + deleteKwokPods="${1#*=}" + ;; + --delete-real-pods=*) + deleteRealPods="${1#*=}" + ;; + --delete-pods-interval=*) + deletePodsInterval="${1#*=}" + ;; + --delete-pods-times=*) + deletePodsTimes="${1#*=}" + ;; + --delete-labels) + deleteLabels=true + ;; + --delete-labels-interval=*) + deleteLabelsInterval="${1#*=}" + ;; + --delete-labels-times=*) + deleteLabelsTimes="${1#*=}" + ;; + --delete-netpols) + deleteNetpols=true + ;; + --delete-netpols-interval=*) + deleteNetpolsInterval="${1#*=}" + ;; + --delete-netpols-times=*) + deleteNetpolsTimes="${1#*=}" + ;; *) echo "ERROR: unknown parameter $1. Make sure you're using '--key=value' for parameters with values" exit 1 @@ -103,7 +154,7 @@ while [[ $# -gt 0 ]]; do shift done -if [[ -z $maxKwokPodsPerNode || -z $numKwokDeployments || -z $numKwokReplicas || -z $maxRealPodsPerNode || -z $numRealDeployments || -z $numRealReplicas || -z $numNetworkPolicies || -z $numUniqueLabelsPerPod || -z $numUniqueLabelsPerDeployment || -z $numSharedLabelsPerPod ]]; then +if [[ -z $maxKwokPodsPerNode || -z $numKwokDeployments || -z $numKwokReplicas || -z $maxRealPodsPerNode || -z $numRealDeployments || -z $numRealReplicas || -z $numNetworkPolicies || -z $numUnappliedNetworkPolicies || -z $numUniqueLabelsPerPod || -z $numUniqueLabelsPerDeployment || -z $numSharedLabelsPerPod ]]; then echo "ERROR: missing required parameter. Check --help for usage" exit 1 fi @@ -113,6 +164,13 @@ if [[ $numNetworkPolicies -gt 0 && $numSharedLabelsPerPod -lt 3 ]]; then exit 1 fi +if [[ -z $deletePodsInterval ]]; then deletePodsInterval=60; fi +if [[ -z $deletePodsTimes ]]; then deletePodsTimes=1; fi +if [[ -z $deleteLabelsInterval ]]; then deleteLabelsInterval=60; fi +if [[ -z $deleteLabelsTimes ]]; then deleteLabelsTimes=1; fi +if [[ -z $deleteNetpolsInterval ]]; then deleteNetpolsInterval=60; fi +if [[ -z $deleteNetpolsTimes ]]; then deleteNetpolsTimes=1; fi + ## CALCULATIONS numKwokPods=$(( $numKwokDeployments * $numKwokReplicas )) numKwokNodes=$(( ($numKwokPods + $maxKwokPodsPerNode - 1) / $maxKwokPodsPerNode)) @@ -122,13 +180,25 @@ numTotalPods=$(( $numKwokPods + $numRealPods )) ## NPM CALCULATIONS # unique to templates/networkpolicy.yaml -numACLsAddedByNPM=$(( 4 * $numNetworkPolicies )) +numACLsAddedByNPM=$(( 6 * $numNetworkPolicies )) # IPSet/member counts can be slight underestimates if there are more than one template-hash labels # 4 basic IPSets are [ns-scale-test,kubernetes.io/metadata.name:scale-test,template-hash:xxxx,app:scale-test] -numIPSetsAddedByNPM=$(( 4 + 2*$numTotalPods*$numUniqueLabelsPerPod + 2*$numSharedLabelsPerPod + 2*($numKwokDeployments+$numRealDeployments)*$numUniqueLabelsPerDeployment )) +# for deployments, have [is-real, is-real:true, is-kwok, is-kwok:true] +# for unapplied netpols, have [non-existent-key, non-existent-key:val] +extraIPSets=0 +if [[ $numUnappliedNetworkPolicies -gt 0 ]]; then + extraIPSets=$(( $extraIPSets + 2 )) +fi +if [[ $numKwokPods -gt 0 ]]; then + extraIPSets=$(( $extraIPSets + 2 )) +fi +if [[ $numRealPods -gt 0 ]]; then + extraIPSets=$(( $extraIPSets + 2 )) +fi +numIPSetsAddedByNPM=$(( 4 + 2*$numTotalPods*$numUniqueLabelsPerPod + 2*$numSharedLabelsPerPod + 2*($numKwokDeployments+$numRealDeployments)*$numUniqueLabelsPerDeployment + $extraIPSets )) # 3 basic members are [all-ns,kubernetes.io/metadata.name,kubernetes.io/metadata.name:scale-test] # 5*pods members go to [ns-scale-test,kubernetes.io/metadata.name:scale-test,template-hash:xxxx,app:scale-test] -numIPSetMembersAddedByNPM=$(( 3 + $numTotalPods*(5 + 2*$numUniqueLabelsPerPod + 2*$numSharedLabelsPerPod) + 2*($numKwokPods+$numRealPods)*$numUniqueLabelsPerDeployment )) +numIPSetMembersAddedByNPM=$(( 3 + $numTotalPods*(5 + 2*$numUniqueLabelsPerPod + 2*$numSharedLabelsPerPod) + 2*($numKwokPods+$numRealPods)*$numUniqueLabelsPerDeployment + 2*$numKwokPods + 2*$numRealPods )) ## PRINT OUT COUNTS cat < generated/networkpolicies/policy-$i.yaml + fileName=generated/networkpolicies/applied/policy-$i.yaml + sed "s/TEMP_NAME/policy-$i/g" templates/networkpolicy.yaml > $fileName if [[ $valNum -ge $(( numSharedLabelsPerPod - 2 )) ]]; then valNum=$(( $numSharedLabelsPerPod - 2 )) fi k=`printf "%05d" $valNum` - sed -i "s/TEMP_LABEL_NAME/shared-lab-$k/g" generated/networkpolicies/policy-$i.yaml + sed -i "s/TEMP_LABEL_NAME/shared-lab-$k/g" $fileName ingressNum=$(( $valNum + 1 )) k=`printf "%05d" $ingressNum` - sed -i "s/TEMP_INGRESS_NAME/shared-lab-$k/g" generated/networkpolicies/policy-$i.yaml + sed -i "s/TEMP_INGRESS_NAME/shared-lab-$k/g" $fileName egressNum=$(( $valNum + 2 )) k=`printf "%05d" $ingressNum` - sed -i "s/TEMP_EGRESS_NAME/shared-lab-$k/g" generated/networkpolicies/policy-$i.yaml + sed -i "s/TEMP_EGRESS_NAME/shared-lab-$k/g" $fileName +done + +for j in $(seq 1 $numUnappliedNetworkPolicies ); do + i=`printf "%05d" $j` + sed "s/TEMP_NAME/unapplied-policy-$i/g" templates/unapplied-networkpolicy.yaml > generated/networkpolicies/unapplied/unapplied-policy-$i.yaml done for i in $(seq -f "%05g" 1 $numKwokNodes); do @@ -320,9 +410,93 @@ if [[ $numUniqueLabelsPerPod -gt 0 ]]; then fi set -x -kubectl $KUBECONFIG_ARG apply -f generated/networkpolicies/ +kubectl $KUBECONFIG_ARG apply -f generated/networkpolicies/unapplied +kubectl $KUBECONFIG_ARG apply -f generated/networkpolicies/applied +# wait for all pods to run +kubectl $KUBECONFIG_ARG wait --for=condition=Ready pods -n scale-test --all --timeout=15m set +x +echo +echo "done scaling at $(date -u). Had started at $startDate." +echo + +echo "performing deletions if configured..." + +if [[ $sleepAfterCreation != "" ]]; then + echo "sleeping $sleepAfterCreation seconds after creation..." + sleep $sleepAfterCreation +fi + +if [[ $deleteLabels == true && $numSharedLabelsPerPod -gt 2 ]]; then + echo "deleting labels..." + for i in $(seq 1 $deleteLabelsTimes); do + echo "deleting labels. round $i/$deleteLabelsTimes..." + set -x + kubectl $KUBECONFIG_ARG label pods -n scale-test --all shared-lab-00001- shared-lab-00002- shared-lab-00003- + set +x + echo "sleeping $deleteLabelsInterval seconds after deleting labels (round $i/$deleteLabelsTimes)..." + sleep $deleteLabelsInterval + + echo "re-adding labels. round $i/$deleteLabelsTimes..." + set -x + kubectl $KUBECONFIG_ARG label pods -n scale-test --all shared-lab-00001=val shared-lab-00002=val shared-lab-00003=val + set +x + echo "sleeping $deleteLabelsInterval seconds after readding labels (end of round $i/$deleteLabelsTimes)..." + sleep $deleteLabelsInterval + done +fi + +if [[ $deleteNetpols == true ]]; then + echo "deleting network policies..." + for i in $(seq 1 $deleteNetpolsTimes); do + echo "deleting network policies. round $i/$deleteNetpolsTimes..." + set -x + kubectl $KUBECONFIG_ARG delete netpol -n scale-test --all + set +x + echo "sleeping $deleteNetpolsInterval seconds after deleting network policies (round $i/$deleteNetpolsTimes)..." + sleep $deleteNetpolsInterval + + echo "re-adding network policies. round $i/$deleteNetpolsTimes..." + set -x + kubectl $KUBECONFIG_ARG apply -f generated/networkpolicies/unapplied + kubectl $KUBECONFIG_ARG apply -f generated/networkpolicies/applied + set +x + echo "sleeping $deleteNetpolsInterval seconds after readding network policies (end of round $i/$deleteNetpolsTimes)..." + sleep $deleteNetpolsInterval + done +fi + +if [[ ($deleteKwokPods != "" && $deleteKwokPods -gt 0) || ($deleteRealPods != "" && $deleteRealPods -gt 0) ]]; then + for i in $(seq 1 $deletePodsTimes); do + if [[ $deleteKwokPods != "" && $deleteKwokPods -gt 0 && $numKwokPods -gt 0 ]]; then + echo "deleting kwok pods. round $i/$deletePodsTimes..." + pods=`kubectl $KUBECONFIG_ARG get pods -n scale-test -l is-kwok="true" | grep -v NAME | shuf -n $deleteKwokPods | awk '{print $1}' | tr '\n' ' '` + set -x + kubectl $KUBECONFIG_ARG delete pods -n scale-test $pods + set +x + fi + + if [[ $deleteRealPods != "" && $deleteRealPods -gt 0 && $numRealPods -gt 0 ]]; then + echo "deleting real pods. round $i/$deletePodsTimes..." + pods=`kubectl $KUBECONFIG_ARG get pods -n scale-test -l is-real="true" | grep -v NAME | shuf -n $deleteRealPods | awk '{print $1}' | tr '\n' ' '` + set -x + kubectl $KUBECONFIG_ARG delete pods -n scale-test $pods + set +x + fi + + sleep 5s + set -x + kubectl $KUBECONFIG_ARG wait --for=condition=Ready pods -n scale-test --all --timeout=15m + set +x + + if [[ $i == $deletePodsTimes ]]; then + break + fi + echo "sleeping $deletePodsInterval seconds after deleting pods (end of round $i/$deletePodsTimes)..." + sleep $deletePodsInterval + done +fi + echo echo "FINISHED at $(date -u). Had started at $startDate." echo