New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Optimize gracefulRampDown calculations #1567
Conversation
Codecov Report
@@ Coverage Diff @@
## master #1567 +/- ##
==========================================
- Coverage 77.11% 77.10% -0.02%
==========================================
Files 162 162
Lines 13192 13202 +10
==========================================
+ Hits 10173 10179 +6
- Misses 2499 2503 +4
Partials 520 520
Continue to review full report at Codecov.
|
Well, while I'm only pretty confident there aren't any huge bugs in the Here are the butchered changes: diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go
index 22d58a5f..8169b612 100644
--- a/lib/executor/ramping_vus.go
+++ b/lib/executor/ramping_vus.go
@@ -491,6 +491,55 @@ func (vlvc RampingVUsConfig) reserveVUsForGracefulRampDowns( //nolint:funlen
return newSteps
}
+func (vlvc RampingVUsConfig) reserveVUsForGracefulRampDownsOld( //nolint:funlen
+ rawSteps []lib.ExecutionStep, executorEndOffset time.Duration,
+) []lib.ExecutionStep {
+ rawStepsLen := len(rawSteps)
+ gracefulRampDownPeriod := vlvc.GetGracefulRampDown()
+ newSteps := []lib.ExecutionStep{}
+
+ lastPlannedVUs := uint64(0)
+ for rawStepNum := 0; rawStepNum < rawStepsLen; rawStepNum++ {
+ rawStep := rawSteps[rawStepNum]
+ if rawStepNum == 0 || rawStep.PlannedVUs > lastPlannedVUs {
+ newSteps = append(newSteps, rawStep)
+ lastPlannedVUs = rawStep.PlannedVUs
+ continue
+ }
+ if rawStep.PlannedVUs == lastPlannedVUs {
+ continue
+ }
+ skippedToNewRawStep := false
+ timeOffsetWithTimeout := rawStep.TimeOffset + gracefulRampDownPeriod
+
+ for advStepNum := rawStepNum + 1; advStepNum < rawStepsLen; advStepNum++ {
+ advStep := rawSteps[advStepNum]
+ if advStep.TimeOffset > timeOffsetWithTimeout {
+ break
+ }
+ if advStep.PlannedVUs >= lastPlannedVUs {
+ rawStepNum = advStepNum - 1
+ skippedToNewRawStep = true
+ break
+ }
+ }
+ if skippedToNewRawStep {
+ continue
+ }
+ if timeOffsetWithTimeout >= executorEndOffset {
+ break
+ }
+
+ newSteps = append(newSteps, lib.ExecutionStep{
+ TimeOffset: timeOffsetWithTimeout,
+ PlannedVUs: rawStep.PlannedVUs,
+ })
+ lastPlannedVUs = rawStep.PlannedVUs
+ }
+
+ return newSteps
+}
+
// GetExecutionRequirements very dynamically reserves exactly the number of
// required VUs for this executor at every moment of the test.
//
@@ -526,6 +575,23 @@ func (vlvc RampingVUsConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []
return steps
}
+func (vlvc RampingVUsConfig) GetExecutionRequirementsOld(et *lib.ExecutionTuple) []lib.ExecutionStep {
+ steps := vlvc.getRawExecutionSteps(et, false)
+
+ executorEndOffset := sumStagesDuration(vlvc.Stages) + time.Duration(vlvc.GracefulStop.Duration)
+ // Handle graceful ramp-downs, if we have them
+ if vlvc.GracefulRampDown.Duration > 0 {
+ steps = vlvc.reserveVUsForGracefulRampDownsOld(steps, executorEndOffset)
+ }
+
+ // If the last PlannedVUs value wasn't 0, add a last step with 0
+ if steps[len(steps)-1].PlannedVUs != 0 {
+ steps = append(steps, lib.ExecutionStep{TimeOffset: executorEndOffset, PlannedVUs: 0})
+ }
+
+ return steps
+}
+
// NewExecutor creates a new RampingVUs executor
func (vlvc RampingVUsConfig) NewExecutor(es *lib.ExecutionState, logger *logrus.Entry) (lib.Executor, error) {
return RampingVUs{
diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go
index bdd2d710..b644b31e 100644
--- a/lib/executor/ramping_vus_test.go
+++ b/lib/executor/ramping_vus_test.go
@@ -1282,3 +1282,43 @@ func TestSumRandomSegmentSequenceMatchesNoSegment(t *testing.T) {
})
}
}
+
+func TestGracefulRampDownOptimization(t *testing.T) {
+ t.Parallel()
+
+ seed := time.Now().UnixNano()
+ r := rand.New(rand.NewSource(seed))
+ t.Logf("Random source seeded with %d\n", seed)
+
+ randomStageCount := r.Int63n(100)
+ randomStages := make([]Stage, randomStageCount)
+ for i := int64(0); i < randomStageCount; i++ {
+ randomStages[i] = Stage{
+ Duration: types.NullDurationFrom(time.Duration(r.Int63n(int64(1 * time.Hour)))),
+ Target: null.IntFrom(r.Int63n(10000)),
+ }
+ }
+
+ randomConfig := RampingVUsConfig{
+ BaseConfig: BaseConfig{
+ Name: "test",
+ Type: rampingVUsType,
+ GracefulStop: types.NullDurationFrom(time.Duration(r.Int63n(int64(10 * time.Minute)))),
+ },
+ StartVUs: null.IntFrom(r.Int63n(10000)),
+ GracefulRampDown: types.NullDurationFrom(time.Duration(r.Int63n(int64(10 * time.Minute)))),
+ Stages: randomStages,
+ }
+
+ noEss, err := lib.NewExecutionTuple(nil, nil)
+ require.NoError(t, err)
+ require.Equal(t, randomConfig.GetExecutionRequirementsOld(noEss), randomConfig.GetExecutionRequirements(noEss))
+
+ randEssSegCount := r.Int63n(20) + 1
+ randESS := generateRandomSequence(t, randEssSegCount, 100, r)
+ randMySegNumber := r.Int63n(randEssSegCount)
+ tuple, err := lib.NewExecutionTuple(randESS[randMySegNumber], &randESS)
+ require.NoError(t, err)
+
+ assert.Equal(t, randomConfig.GetExecutionRequirementsOld(tuple), randomConfig.GetExecutionRequirements(tuple))
+} and running them with |
This should take care of the problem mentioned in #1499 (comment), though that whole issue isn't solved, there are still things we can optimize in the
ramping-vus
executor.I'm still testing this, and might even add a few more unit tests, but for now this ugly chunk of debug code is helpful in allowing me to visually spot any issues with funky configurations that will take me a ton of time to convert to proper unit tests: