Skip to content

Commit

Permalink
[Cherry-Pick for 1.5.1] (#39)
Browse files Browse the repository at this point in the history
* refactor(experiments): Refactor litmus go experiments (#29)

Signed-off-by: Udit Gaurav <uditgaurav@gmail.com>

* feat(experiments): Add pod memory hog experiment (#31)

Signed-off-by: Udit Gaurav <uditgaurav@gmail.com>

* refactor(go-experiments): separate the types.go file for each experiment (#34)

Signed-off-by: shubhamchaudhary <shubham.chaudhary@mayadata.io>

* update(contribution-guide): updating contribution guide according to new schema changes (#35)

Signed-off-by: shubhamchaudhary <shubham.chaudhary@mayadata.io>

* chore(experiment): Add pod network duplication experiment in generic experiments of LitmusChaos (#27)

* chore(experiment): Add pod network duplication experiment in generic experiments of LitmusChaos

Signed-off-by: Udit Gaurav <uditgaurav@gmail.com>

* bug(fix): Add for summary event and getting target container name (#37)

Signed-off-by: Udit Gaurav <uditgaurav@gmail.com>

* bug(fix): Remove extra index from the list in pod duplication experiment (#38)

Signed-off-by: Udit Gaurav <uditgaurav@gmail.com>

Co-authored-by: Shubham Chaudhary <shubham.chaudhary@mayadata.io>
  • Loading branch information
uditgaurav and ispeakc0de committed Jul 6, 2020
1 parent 8d6cc4b commit 25bea8a
Show file tree
Hide file tree
Showing 35 changed files with 1,750 additions and 366 deletions.
9 changes: 7 additions & 2 deletions build/generate_go_binary
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
# Building go binaries for pod_delete experiment
go build -o build/_output/pod-delete ./experiments/generic/pod-delete
# Building go binaries for cpu_hog experiment
go build -o build/_output/cpu-hog ./experiments/generic/cpu-hog
# Building go binaries for pod_cpu_hog experiment
go build -o build/_output/pod-cpu-hog ./experiments/generic/pod-cpu-hog
# Building go binaries for pod_memory_hog experiment
go build -o build/_output/pod-memory-hog ./experiments/generic/pod-memory-hog
# Buiding go binaries for pod_network_duplication experiment
go build -o build/_output/pod-network-duplication ./experiments/generic/pod-network-duplication

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package cpu_hog
package pod_cpu_hog

import (
"fmt"
Expand All @@ -13,19 +13,19 @@ import (
"github.com/litmuschaos/litmus-go/pkg/events"
"github.com/litmuschaos/litmus-go/pkg/log"
"github.com/litmuschaos/litmus-go/pkg/math"
experimentTypes "github.com/litmuschaos/litmus-go/pkg/pod-cpu-hog/types"
"github.com/litmuschaos/litmus-go/pkg/result"
"github.com/litmuschaos/litmus-go/pkg/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/klog"
)

// Using the REST API to exec into the target container of the target pod
// StressCPU Uses the REST API to exec into the target container of the target pod
// The function will be constantly increasing the CPU utilisation until it reaches the maximum available or allowed number.
// Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last
func StressCPU(containerName, podName, namespace string, clients environment.ClientSets) error {
Expand Down Expand Up @@ -68,8 +68,8 @@ func StressCPU(containerName, podName, namespace string, clients environment.Cli
})

if err != nil {
error_code := strings.Contains(err.Error(), "143")
if error_code != true {
errorCode := strings.Contains(err.Error(), "143")
if errorCode != true {
log.Infof("[Chaos]:CPU stress error: %v", err.Error())
return err
}
Expand All @@ -78,8 +78,8 @@ func StressCPU(containerName, podName, namespace string, clients environment.Cli
return nil
}

//This function orchestrates the experiment by calling the StressCPU function for every core, of every container, of every pod that is targetted
func ExperimentCPU(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails) error {
//ExperimentCPU function orchestrates the experiment by calling the StressCPU function for every core, of every container, of every pod that is targetted
func ExperimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {

var endTime <-chan time.Time
timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
Expand All @@ -102,6 +102,13 @@ func ExperimentCPU(experimentsDetails *types.ExperimentDetails, clients environm
log.Infof("[Chaos]:Stressing: %v cores", strconv.Itoa(experimentsDetails.CPUcores))

for i := 0; i < experimentsDetails.CPUcores; i++ {

if experimentsDetails.EngineName != "" {
msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
environment.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, chaosDetails)
events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
}

go StressCPU(container.Name, pod.Name, experimentsDetails.AppNS, clients)

log.Infof("[Chaos]:Waiting for: %vs", strconv.Itoa(experimentsDetails.ChaosDuration))
Expand All @@ -123,7 +130,7 @@ func ExperimentCPU(experimentsDetails *types.ExperimentDetails, clients environm
}
resultDetails.FailStep = "CPU hog Chaos injection stopped!"
resultDetails.Verdict = "Stopped"
result.ChaosResult(experimentsDetails, clients, resultDetails, "EOT")
result.ChaosResult(chaosDetails, clients, resultDetails, "EOT")
os.Exit(1)
case <-endTime:
log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
Expand All @@ -133,8 +140,8 @@ func ExperimentCPU(experimentsDetails *types.ExperimentDetails, clients environm
}
err = KillStressCPU(container.Name, pod.Name, experimentsDetails.AppNS, clients)
if err != nil {
error_code := strings.Contains(err.Error(), "143")
if error_code != true {
errorCode := strings.Contains(err.Error(), "143")
if errorCode != true {
log.Infof("[Chaos]:CPU stress error: %v", err.Error())
return err
}
Expand All @@ -146,16 +153,16 @@ func ExperimentCPU(experimentsDetails *types.ExperimentDetails, clients environm
return nil
}

//PreparePodDelete contains the steps for prepration before chaos
func PrepareCPUstress(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails, recorder *events.Recorder) error {
//PrepareCPUstress contains the steps for prepration before chaos
func PrepareCPUstress(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {

//Waiting for the ramp time before chaos injection
if experimentsDetails.RampTime != 0 {
log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", strconv.Itoa(experimentsDetails.RampTime))
waitForRampTime(experimentsDetails)
}
//Starting the CPU stress experiment
err := ExperimentCPU(experimentsDetails, clients, resultDetails)
err := ExperimentCPU(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
if err != nil {
return err
}
Expand All @@ -168,13 +175,12 @@ func PrepareCPUstress(experimentsDetails *types.ExperimentDetails, clients envir
}

//waitForRampTime waits for the given ramp time duration (in seconds)
func waitForRampTime(experimentsDetails *types.ExperimentDetails) {
func waitForRampTime(experimentsDetails *experimentTypes.ExperimentDetails) {
time.Sleep(time.Duration(experimentsDetails.RampTime) * time.Second)
}

//PreparePodList derive the list of target pod for deletion
//It will also adjust the number of the target pods depending on the specified percentage in PODS_AFFECTED_PERC variable
func PreparePodList(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails) (*core_v1.PodList, error) {
//PreparePodList will also adjust the number of the target pods depending on the specified percentage in PODS_AFFECTED_PERC variable
func PreparePodList(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails) (*core_v1.PodList, error) {

log.Infof("[Chaos]:Pods percentage to affect is %v", strconv.Itoa(experimentsDetails.PodsAffectedPerc))

Expand All @@ -188,17 +194,17 @@ func PreparePodList(experimentsDetails *types.ExperimentDetails, clients environ
//If the default value has changed, means that we are aiming for a subset of the pods.
if experimentsDetails.PodsAffectedPerc != 100 {

new_podlist_length := math.Adjustment(experimentsDetails.PodsAffectedPerc, len(pods.Items))
newPodListLength := math.Maximum(1, math.Adjustment(experimentsDetails.PodsAffectedPerc, len(pods.Items)))

pods.Items = pods.Items[:new_podlist_length]
pods.Items = pods.Items[:newPodListLength]

log.Infof("[Chaos]:Number of pods targetted: %v", strconv.Itoa(new_podlist_length))
log.Infof("[Chaos]:Number of pods targetted: %v", strconv.Itoa(newPodListLength))

}
return pods, nil
}

// Function to kill the experiment. Triggered by either timeout of chaos duration or termination of the experiment
// KillStressCPU function to kill the experiment. Triggered by either timeout of chaos duration or termination of the experiment
func KillStressCPU(containerName, podName, namespace string, clients environment.ClientSets) error {

command := []string{"/bin/sh", "-c", "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"}
Expand Down Expand Up @@ -240,8 +246,8 @@ func KillStressCPU(containerName, podName, namespace string, clients environment

//The kill command returns a 143 when it kills a process. This is expected
if err != nil {
error_code := strings.Contains(err.Error(), "143")
if error_code != true {
errorCode := strings.Contains(err.Error(), "143")
if errorCode != true {
log.Infof("[Chaos]:CPU stress error: %v", err.Error())
return err
}
Expand Down
38 changes: 19 additions & 19 deletions chaoslib/litmus/pod_delete/pod-delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,24 @@ import (
"strconv"
"time"

"github.com/litmuschaos/litmus-go/pkg/environment"
"github.com/litmuschaos/litmus-go/pkg/log"
"github.com/litmuschaos/litmus-go/pkg/math"
experimentTypes "github.com/litmuschaos/litmus-go/pkg/pod-delete/types"
"github.com/litmuschaos/litmus-go/pkg/status"
"github.com/litmuschaos/litmus-go/pkg/types"
"github.com/openebs/maya/pkg/util/retry"
"github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/litmuschaos/litmus-go/pkg/environment"
"github.com/litmuschaos/litmus-go/pkg/math"
"github.com/litmuschaos/litmus-go/pkg/types"
)

var err error

//PreparePodDelete contains the prepration steps before chaos injection
func PreparePodDelete(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {

//getting the iteration count for the pod deletion
//Getting the iteration count for the pod deletion
GetIterations(experimentsDetails)
//Waiting for the ramp time before chaos injection
if experimentsDetails.RampTime != 0 {
Expand All @@ -39,7 +39,7 @@ func PreparePodDelete(experimentsDetails *types.ExperimentDetails, clients envir
// Generate the run_id
runID := GetRunID()

// Creating the helper pod
// Creating a helper pod
err = CreateHelperPod(experimentsDetails, clients, runID)
if err != nil {
errors.Errorf("Unable to create the helper pod, err: %v", err)
Expand All @@ -65,7 +65,7 @@ func PreparePodDelete(experimentsDetails *types.ExperimentDetails, clients envir
//ChaosCurrentTimeStamp contains the current timestamp
ChaosCurrentTimeStamp := time.Now().Unix()
//ChaosDiffTimeStamp contains the difference of current timestamp and start timestamp
//It will helpful to track the total chaos duration
//It will helpful to track the total chaos duration
chaosDiffTimeStamp := ChaosCurrentTimeStamp - ChaosStartTimeStamp

if int(chaosDiffTimeStamp) < experimentsDetails.ChaosDuration {
Expand All @@ -88,7 +88,7 @@ func PreparePodDelete(experimentsDetails *types.ExperimentDetails, clients envir
}

//GetIterations derive the iterations value from given parameters
func GetIterations(experimentsDetails *types.ExperimentDetails) {
func GetIterations(experimentsDetails *experimentTypes.ExperimentDetails) {
var Iterations int
if experimentsDetails.ChaosInterval != 0 {
Iterations = experimentsDetails.ChaosDuration / experimentsDetails.ChaosInterval
Expand All @@ -100,7 +100,7 @@ func GetIterations(experimentsDetails *types.ExperimentDetails) {
}

//waitForRampTime waits for the given ramp time duration (in seconds)
func waitForRampTime(experimentsDetails *types.ExperimentDetails) {
func waitForRampTime(experimentsDetails *experimentTypes.ExperimentDetails) {
time.Sleep(time.Duration(experimentsDetails.RampTime) * time.Second)
}

Expand All @@ -115,7 +115,7 @@ func GetRunID() string {
}

// GetServiceAccount find the serviceAccountName for the helper pod
func GetServiceAccount(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets) error {
func GetServiceAccount(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets) error {
pod, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Get(experimentsDetails.ChaosPodName, v1.GetOptions{})
if err != nil {
return err
Expand All @@ -125,7 +125,7 @@ func GetServiceAccount(experimentsDetails *types.ExperimentDetails, clients envi
}

// CreateHelperPod derive the attributes for helper pod and create the helper pod
func CreateHelperPod(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, runID string) error {
func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, runID string) error {

helperPod := &apiv1.Pod{
ObjectMeta: v1.ObjectMeta{
Expand Down Expand Up @@ -164,7 +164,7 @@ func CreateHelperPod(experimentsDetails *types.ExperimentDetails, clients enviro
}

//DeleteHelperPod delete the helper pod
func DeleteHelperPod(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets, runID string) error {
func DeleteHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients environment.ClientSets, runID string) error {

err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Delete("pod-delete-"+runID, &v1.DeleteOptions{})

Expand All @@ -187,12 +187,12 @@ func DeleteHelperPod(experimentsDetails *types.ExperimentDetails, clients enviro
}

// GetPodEnv derive all the env required for the helper pod
func GetPodEnv(experimentsDetails *types.ExperimentDetails) []apiv1.EnvVar{
func GetPodEnv(experimentsDetails *experimentTypes.ExperimentDetails) []apiv1.EnvVar {

var envVar []apiv1.EnvVar
ENVList := map[string]string{"FORCE": strconv.FormatBool(experimentsDetails.Force),"APP_NS": experimentsDetails.AppNS,"KILL_COUNT": strconv.Itoa(experimentsDetails.KillCount),
"TOTAL_CHAOS_DURATION": strconv.Itoa(experimentsDetails.ChaosDuration),"CHAOS_NAMESPACE": experimentsDetails.ChaosNamespace,"APP_LABEL": experimentsDetails.AppLabel,
"CHAOS_ENGINE": experimentsDetails.EngineName,"CHAOS_UID": string(experimentsDetails.ChaosUID),"CHAOS_INTERVAL": strconv.Itoa(experimentsDetails.ChaosInterval),"ITERATIONS": strconv.Itoa(experimentsDetails.Iterations)}
ENVList := map[string]string{"FORCE": strconv.FormatBool(experimentsDetails.Force), "APP_NS": experimentsDetails.AppNS, "KILL_COUNT": strconv.Itoa(experimentsDetails.KillCount),
"TOTAL_CHAOS_DURATION": strconv.Itoa(experimentsDetails.ChaosDuration), "CHAOS_NAMESPACE": experimentsDetails.ChaosNamespace, "APP_LABEL": experimentsDetails.AppLabel,
"CHAOS_ENGINE": experimentsDetails.EngineName, "CHAOS_UID": string(experimentsDetails.ChaosUID), "CHAOS_INTERVAL": strconv.Itoa(experimentsDetails.ChaosInterval), "ITERATIONS": strconv.Itoa(experimentsDetails.Iterations)}
for key, value := range ENVList {
var perEnv apiv1.EnvVar
perEnv.Name = key
Expand All @@ -204,7 +204,7 @@ func GetPodEnv(experimentsDetails *types.ExperimentDetails) []apiv1.EnvVar{

var downwardEnv apiv1.EnvVar
downwardEnv.Name = "POD_NAME"
downwardEnv.ValueFrom = &experimentPodName
downwardEnv.ValueFrom = &experimentPodName
envVar = append(envVar, downwardEnv)

return envVar
Expand All @@ -219,4 +219,4 @@ func GetValueFromDownwardAPI(apiVersion string, fieldPath string) apiv1.EnvVarSo
},
}
return downwardENV
}
}
Loading

0 comments on commit 25bea8a

Please sign in to comment.