Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix the execution derivation bug and work around the k6 0.24.0 wrong tar metadata #1057

Merged
merged 3 commits into from Jun 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/archive.go
Expand Up @@ -76,7 +76,7 @@ An archive is a fully self-contained test run, and can be executed identically e
return err
}

if cerr := validateConfig(conf); cerr != nil {
if _, cerr := deriveAndValidateConfig(conf); cerr != nil {
return ExitCode{cerr, invalidConfigErrorCode}
}

Expand Down
10 changes: 6 additions & 4 deletions cmd/cloud.go
Expand Up @@ -31,6 +31,7 @@ import (

"github.com/kelseyhightower/envconfig"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/lib/consts"
"github.com/loadimpact/k6/stats/cloud"
"github.com/loadimpact/k6/ui"
"github.com/pkg/errors"
Expand All @@ -56,7 +57,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud
Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"),
RunE: func(cmd *cobra.Command, args []string) error {
//TODO: disable in quiet mode?
_, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", Banner)
_, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", consts.Banner)
initBar := ui.ProgressBar{
Width: 60,
Left: func() string { return " uploading script" },
Expand Down Expand Up @@ -95,7 +96,8 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud
return err
}

if cerr := validateConfig(conf); cerr != nil {
derivedConf, cerr := deriveAndValidateConfig(conf)
if cerr != nil {
return ExitCode{cerr, invalidConfigErrorCode}
}

Expand All @@ -105,7 +107,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud
}

// Cloud config
cloudConfig := cloud.NewConfig().Apply(conf.Collectors.Cloud)
cloudConfig := cloud.NewConfig().Apply(derivedConf.Collectors.Cloud)
if err := envconfig.Process("k6", &cloudConfig); err != nil {
return err
}
Expand Down Expand Up @@ -161,7 +163,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud
}

// Start cloud test run
client := cloud.NewClient(cloudConfig.Token.String, cloudConfig.Host.String, Version)
client := cloud.NewClient(cloudConfig.Token.String, cloudConfig.Host.String, consts.Version)
if err := client.ValidateOptions(arc.Options); err != nil {
return err
}
Expand Down
3 changes: 2 additions & 1 deletion cmd/collectors.go
Expand Up @@ -28,6 +28,7 @@ import (

"github.com/kelseyhightower/envconfig"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/lib/consts"
"github.com/loadimpact/k6/stats/cloud"
"github.com/loadimpact/k6/stats/datadog"
"github.com/loadimpact/k6/stats/influxdb"
Expand Down Expand Up @@ -84,7 +85,7 @@ func newCollector(collectorName, arg string, src *lib.SourceData, conf Config) (
if arg != "" {
config.Name = null.StringFrom(arg)
}
return cloud.New(config, src, conf.Options, Version)
return cloud.New(config, src, conf.Options, consts.Version)
case collectorKafka:
config := kafka.NewConfig().Apply(conf.Collectors.Kafka)
if err := envconfig.Process("k6", &config); err != nil {
Expand Down
112 changes: 72 additions & 40 deletions cmd/config.go
Expand Up @@ -25,13 +25,15 @@ import (
"fmt"
"os"
"path/filepath"
"reflect"
"strings"

"errors"

"github.com/kelseyhightower/envconfig"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/lib/scheduler"
"github.com/loadimpact/k6/lib/types"
"github.com/loadimpact/k6/stats/cloud"
"github.com/loadimpact/k6/stats/datadog"
"github.com/loadimpact/k6/stats/influxdb"
Expand Down Expand Up @@ -187,67 +189,89 @@ func (e executionConflictConfigError) Error() string {

var _ error = executionConflictConfigError("")

func getConstantLoopingVUsExecution(duration types.NullDuration, vus null.Int) scheduler.ConfigMap {
ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName)
ds.VUs = vus
ds.Duration = duration
return scheduler.ConfigMap{lib.DefaultSchedulerName: ds}
}

func getVariableLoopingVUsExecution(stages []lib.Stage, startVUs null.Int) scheduler.ConfigMap {
ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName)
ds.StartVUs = startVUs
for _, s := range stages {
if s.Duration.Valid {
ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target})
}
}
return scheduler.ConfigMap{lib.DefaultSchedulerName: ds}
}

func getSharedIterationsExecution(iterations null.Int, duration types.NullDuration, vus null.Int) scheduler.ConfigMap {
ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName)
ds.VUs = vus
ds.Iterations = iterations
if duration.Valid {
ds.MaxDuration = duration
}
return scheduler.ConfigMap{lib.DefaultSchedulerName: ds}
}

func checkExecutionMismatch(observedExecution, derivedExecution scheduler.ConfigMap, shortcutField string) error {
if observedExecution == nil {
return nil
}

// Work around the v0.24.0 archive metadata.js options that wrongly contain execution
if !reflect.DeepEqual(observedExecution, derivedExecution) {
errMsg := fmt.Sprintf("specifying both `%s` and `execution` is not supported", shortcutField)
return executionConflictConfigError(errMsg)
}

log.Warnf("ignoring the specified `execution` options because they match the ones derived from `%s`", shortcutField)
return nil
}

// This checks for conflicting options and turns any shortcut options (i.e. duration, iterations,
// stages) into the proper scheduler configuration
func buildExecutionConfig(conf Config) (Config, error) {
func deriveExecutionConfig(conf Config) (Config, error) {
result := conf
switch {
case conf.Duration.Valid:
if conf.Iterations.Valid {
case conf.Iterations.Valid:
if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty
//TODO: make this an executionConflictConfigError in the next version
log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions")
log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions")
}

result.Execution = getSharedIterationsExecution(conf.Iterations, conf.Duration, conf.VUs)
if err := checkExecutionMismatch(conf.Execution, result.Execution, "iterations"); err != nil {
return conf, err
}
// TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations?

case conf.Duration.Valid:
if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty
//TODO: make this an executionConflictConfigError in the next version
log.Warnf("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions")
}

if conf.Execution != nil {
return result, executionConflictConfigError("specifying both duration and execution is not supported")
derivedExecConfig := getConstantLoopingVUsExecution(conf.Duration, conf.VUs)
if err := checkExecutionMismatch(conf.Execution, derivedExecConfig, "duration"); err != nil {
return conf, err
}

if conf.Duration.Duration <= 0 {
//TODO: make this an executionConflictConfigError in the next version
log.Warnf("Specifying infinite duration in this way is deprecated and won't be supported in the future k6 versions")
} else {
ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName)
ds.VUs = conf.VUs
ds.Duration = conf.Duration
ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility
result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds}
result.Execution = derivedExecConfig
}

case len(conf.Stages) > 0: // stages isn't nil (not set) and isn't explicitly set to empty
if conf.Iterations.Valid {
//TODO: make this an executionConflictConfigError in the next version
log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions")
}

if conf.Execution != nil {
return conf, executionConflictConfigError("specifying both stages and execution is not supported")
}

ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName)
ds.StartVUs = conf.VUs
for _, s := range conf.Stages {
if s.Duration.Valid {
ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target})
}
result.Execution = getVariableLoopingVUsExecution(conf.Stages, conf.VUs)
if err := checkExecutionMismatch(conf.Execution, result.Execution, "stages"); err != nil {
return conf, err
}
ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility
result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds}

case conf.Iterations.Valid:
if conf.Execution != nil {
return conf, executionConflictConfigError("specifying both iterations and execution is not supported")
}
// TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations?

ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName)
ds.VUs = conf.VUs
ds.Iterations = conf.Iterations
result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds}

default:
if conf.Execution != nil { // If someone set this, regardless if its empty
Expand Down Expand Up @@ -302,7 +326,15 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf
}
conf = conf.Apply(envConf).Apply(cliConf)

return buildExecutionConfig(conf)
return conf, nil
}

func deriveAndValidateConfig(conf Config) (Config, error) {
result, err := deriveExecutionConfig(conf)
if err != nil {
return result, err
}
return result, validateConfig(conf)
}

//TODO: remove ↓
Expand Down
72 changes: 59 additions & 13 deletions cmd/config_consolidation_test.go
Expand Up @@ -79,7 +79,6 @@ func verifyOneIterPerOneVU(t *testing.T, c Config) {
require.IsType(t, scheduler.PerVUIteationsConfig{}, sched)
perVuIters, ok := sched.(scheduler.PerVUIteationsConfig)
require.True(t, ok)
assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible)
assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations)
assert.Equal(t, null.NewInt(1, false), perVuIters.VUs)
}
Expand All @@ -105,7 +104,6 @@ func verifyConstLoopingVUs(vus null.Int, duration time.Duration) func(t *testing
require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched)
clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig)
require.True(t, ok)
assert.Equal(t, null.NewBool(true, false), clvc.Interruptible)
assert.Equal(t, vus, clvc.VUs)
assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration)
assert.Equal(t, vus, c.VUs)
Expand All @@ -120,7 +118,6 @@ func verifyVarLoopingVUs(startVus null.Int, stages []scheduler.Stage) func(t *te
require.IsType(t, scheduler.VariableLoopingVUsConfig{}, sched)
clvc, ok := sched.(scheduler.VariableLoopingVUsConfig)
require.True(t, ok)
assert.Equal(t, null.NewBool(true, false), clvc.Interruptible)
assert.Equal(t, startVus, clvc.StartVUs)
assert.Equal(t, startVus, c.VUs)
assert.Equal(t, stages, clvc.Stages)
Expand Down Expand Up @@ -229,6 +226,7 @@ type exp struct {
cliParseError bool
cliReadError bool
consolidationError bool
derivationError bool
validationErrors bool
logWarning bool //TODO: remove in the next version?
}
Expand Down Expand Up @@ -267,21 +265,27 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase {
opts{cli: []string{"-s", "1m6s:5", "--vus", "10"}}, exp{},
verifyVarLoopingVUs(null.NewInt(10, true), buildStages(66, 5)),
},
{opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{}, func(t *testing.T, c Config) {
verifySharedIters(I(1), I(6))(t, c)
sharedIterConfig := c.Execution[lib.DefaultSchedulerName].(scheduler.SharedIteationsConfig)
assert.Equal(t, time.Duration(sharedIterConfig.MaxDuration.Duration), 10*time.Second)
}},
// This should get a validation error since VUs are more than the shared iterations
{opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(I(10), I(6))},
{opts{cli: []string{"-s", "10s:5", "-s", "10s:"}}, exp{validationErrors: true}, nil},
{opts{fs: defaultConfig(`{"stages": [{"duration": "20s"}], "vus": 10}`)}, exp{validationErrors: true}, nil},
// These should emit a warning
//TODO: in next version, those should be an error
{opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil},
{opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil},
{opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil},
{opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil},
{
opts{runner: &lib.Options{
VUs: null.IntFrom(5),
Duration: types.NullDurationFrom(44 * time.Second),
Iterations: null.IntFrom(10),
VUs: null.IntFrom(5),
Duration: types.NullDurationFrom(44 * time.Second),
Stages: []lib.Stage{
{Duration: types.NullDurationFrom(3 * time.Second), Target: I(20)},
},
}}, exp{logWarning: true}, nil,
},
{opts{fs: defaultConfig(`{"execution": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU},
Expand Down Expand Up @@ -319,7 +323,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase {
{
opts{
runner: &lib.Options{VUs: null.IntFrom(5), Duration: types.NullDurationFrom(50 * time.Second)},
cli: []string{"--iterations", "5"},
cli: []string{"--stage", "5s:5"},
},
//TODO: this shouldn't be a warning in the next version, but the result will be different
exp{logWarning: true}, verifyConstLoopingVUs(I(5), 50*time.Second),
Expand All @@ -339,7 +343,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase {
env: []string{"K6_VUS=15", "K6_ITERATIONS=15"},
},
exp{logWarning: true}, //TODO: this won't be a warning in the next version, but the result will be different
verifyVarLoopingVUs(null.NewInt(15, true), buildStages(20, 10)),
verifySharedIters(I(15), I(15)),
},
{
opts{
Expand All @@ -365,7 +369,42 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase {
},
exp{}, verifySharedIters(I(12), I(25)),
},

{
opts{
fs: defaultConfig(`
{
"execution": {
"default": {
"type": "constant-looping-vus",
"vus": 10,
"duration": "60s"
}
},
"vus": 20,
"duration": "60s"
}`,
),
},
exp{derivationError: true}, nil,
},
{
opts{
fs: defaultConfig(`
{
"execution": {
"default": {
"type": "constant-looping-vus",
"vus": 10,
"duration": "60s"
}
},
"vus": 10,
"duration": "60s"
}`,
),
},
exp{logWarning: true}, verifyConstLoopingVUs(I(10), 60*time.Second),
},
// Just in case, verify that no options will result in the same 1 vu 1 iter config
{opts{}, exp{}, verifyOneIterPerOneVU},
//TODO: test for differences between flagsets
Expand Down Expand Up @@ -421,29 +460,36 @@ func runTestCase(
testCase.options.fs = afero.NewMemMapFs() // create an empty FS if it wasn't supplied
}

result, err := getConsolidatedConfig(testCase.options.fs, cliConf, runner)
consolidatedConfig, err := getConsolidatedConfig(testCase.options.fs, cliConf, runner)
if testCase.expected.consolidationError {
require.Error(t, err)
return
}
require.NoError(t, err)

derivedConfig, err := deriveExecutionConfig(consolidatedConfig)
if testCase.expected.derivationError {
require.Error(t, err)
return
}
require.NoError(t, err)

warnings := logHook.Drain()
if testCase.expected.logWarning {
assert.NotEmpty(t, warnings)
} else {
assert.Empty(t, warnings)
}

validationErrors := result.Validate()
validationErrors := derivedConfig.Validate()
if testCase.expected.validationErrors {
assert.NotEmpty(t, validationErrors)
} else {
assert.Empty(t, validationErrors)
}

if testCase.customValidator != nil {
testCase.customValidator(t, result)
testCase.customValidator(t, derivedConfig)
}
}

Expand Down