From 7a8862e0f617b4192179a4faa9bb71acdf2f9c85 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 13 Feb 2024 20:23:12 +0100 Subject: [PATCH 01/32] Control cancelation with contexts --- cmd/benchmark.go | 17 ++--- cmd/service.go | 2 +- cmd/testrunner.go | 10 ++- internal/benchrunner/runner.go | 15 +++-- .../benchrunner/runners/pipeline/runner.go | 7 +- internal/benchrunner/runners/rally/runner.go | 28 ++++---- internal/benchrunner/runners/stream/runner.go | 45 ++++++------- internal/benchrunner/runners/system/runner.go | 62 ++++++++--------- internal/compose/compose.go | 24 +++---- internal/kibana/agents.go | 21 +++--- internal/service/boot.go | 5 +- internal/servicedeployer/compose.go | 5 +- internal/servicedeployer/custom_agent.go | 5 +- internal/servicedeployer/kubernetes.go | 11 +-- internal/servicedeployer/service_deployer.go | 4 +- internal/servicedeployer/terraform.go | 9 +-- internal/signal/sigint.go | 45 ------------- internal/testrunner/runners/asset/runner.go | 5 +- .../testrunner/runners/pipeline/runner.go | 15 +++-- internal/testrunner/runners/static/runner.go | 9 +-- internal/testrunner/runners/system/runner.go | 67 +++++++++---------- internal/testrunner/testrunner.go | 11 +-- main.go | 7 +- 23 files changed, 182 insertions(+), 247 deletions(-) delete mode 100644 internal/signal/sigint.go diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 4364ebaab9..0238aee15e 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -30,7 +30,6 @@ import ( "github.com/elastic/elastic-package/internal/cobraext" "github.com/elastic/elastic-package/internal/common" "github.com/elastic/elastic-package/internal/packages" - "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/testrunner" ) @@ -156,8 +155,6 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { } } - signal.Enable() - benchFolders, err := pipeline.FindBenchmarkFolders(packageRootPath, dataStreams) if err != nil { return fmt.Errorf("unable to determine benchmark folder paths: %w", err) @@ -204,7 +201,7 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { ) runner := pipeline.NewPipelineBenchmark(opts) - r, err := benchrunner.Run(runner) + r, err := benchrunner.Run(cmd.Context(), runner) if err != nil { return fmt.Errorf("error running package pipeline benchmarks: %w", err) @@ -308,8 +305,6 @@ func rallyCommandAction(cmd *cobra.Command, args []string) error { return err } - signal.Enable() - esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) @@ -348,7 +343,7 @@ func rallyCommandAction(cmd *cobra.Command, args []string) error { runner := rally.NewRallyBenchmark(rally.NewOptions(withOpts...)) - r, err := benchrunner.Run(runner) + r, err := benchrunner.Run(cmd.Context(), runner) if errors.Is(err, rally.ErrDryRun) { return nil } @@ -478,8 +473,6 @@ func streamCommandAction(cmd *cobra.Command, args []string) error { return err } - signal.Enable() - esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) @@ -510,7 +503,7 @@ func streamCommandAction(cmd *cobra.Command, args []string) error { runner := stream.NewStreamBenchmark(stream.NewOptions(withOpts...)) - _, err = benchrunner.Run(runner) + _, err = benchrunner.Run(cmd.Context(), runner) if err != nil { return fmt.Errorf("error running package stream benchmarks: %w", err) } @@ -583,8 +576,6 @@ func systemCommandAction(cmd *cobra.Command, args []string) error { return err } - signal.Enable() - esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) @@ -622,7 +613,7 @@ func systemCommandAction(cmd *cobra.Command, args []string) error { runner := system.NewSystemBenchmark(system.NewOptions(withOpts...)) - r, err := benchrunner.Run(runner) + r, err := benchrunner.Run(cmd.Context(), runner) if err != nil { return fmt.Errorf("error running package system benchmarks: %w", err) } diff --git a/cmd/service.go b/cmd/service.go index 05c8d41a4e..ed542838ce 100644 --- a/cmd/service.go +++ b/cmd/service.go @@ -78,7 +78,7 @@ func upCommandAction(cmd *cobra.Command, args []string) error { } _, serviceName := filepath.Split(packageRoot) - err = service.BootUp(service.Options{ + err = service.BootUp(cmd.Context(), service.Options{ Profile: profile, ServiceName: serviceName, PackageRootPath: packageRoot, diff --git a/cmd/testrunner.go b/cmd/testrunner.go index f4308d421b..50d0874278 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -5,6 +5,7 @@ package cmd import ( + "context" "errors" "fmt" "os" @@ -19,7 +20,6 @@ import ( "github.com/elastic/elastic-package/internal/install" "github.com/elastic/elastic-package/internal/kibana" "github.com/elastic/elastic-package/internal/packages" - "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" "github.com/elastic/elastic-package/internal/testrunner/reporters/formats" @@ -78,7 +78,7 @@ func setupTestCommand() *cobraext.Command { cmd.PersistentFlags().StringP(cobraext.ProfileFlagName, "p", "", fmt.Sprintf(cobraext.ProfileFlagDescription, install.ProfileNameEnvVar)) for testType, runner := range testrunner.TestRunners() { - action := testTypeCommandActionFactory(runner) + action := testTypeCommandActionFactory(cmd.Context(), runner) testTypeCmdActions = append(testTypeCmdActions, action) testTypeCmd := &cobra.Command{ @@ -99,7 +99,7 @@ func setupTestCommand() *cobraext.Command { return cobraext.NewCommand(cmd, cobraext.ContextPackage) } -func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.CommandAction { +func testTypeCommandActionFactory(ctx context.Context, runner testrunner.TestRunner) cobraext.CommandAction { testType := runner.Type() return func(cmd *cobra.Command, args []string) error { cmd.Printf("Run %s tests for the package\n", testType) @@ -156,8 +156,6 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command return fmt.Errorf("cannot determine if package has data streams: %w", err) } - signal.Enable() - var testFolders []testrunner.TestFolder if hasDataStreams && runner.CanRunPerDataStream() { var dataStreams []string @@ -247,7 +245,7 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command var results []testrunner.TestResult for _, folder := range testFolders { - r, err := testrunner.Run(testType, testrunner.TestOptions{ + r, err := testrunner.Run(ctx, testType, testrunner.TestOptions{ Profile: profile, TestFolder: folder, PackageRootPath: packageRootPath, diff --git a/internal/benchrunner/runner.go b/internal/benchrunner/runner.go index 13e939957f..4bbe84d14d 100644 --- a/internal/benchrunner/runner.go +++ b/internal/benchrunner/runner.go @@ -5,6 +5,7 @@ package benchrunner import ( + "context" "errors" "fmt" @@ -16,29 +17,29 @@ import ( type Type string type Runner interface { - SetUp() error - Run() (reporters.Reportable, error) - TearDown() error + SetUp(context.Context) error + Run(context.Context) (reporters.Reportable, error) + TearDown(context.Context) error } // Run method delegates execution to the benchmark runner. -func Run(runner Runner) (reporters.Reportable, error) { +func Run(ctx context.Context, runner Runner) (reporters.Reportable, error) { if runner == nil { return nil, errors.New("a runner is required") } defer func() { - tdErr := runner.TearDown() + tdErr := runner.TearDown(ctx) if tdErr != nil { logger.Errorf("could not teardown benchmark runner: %v", tdErr) } }() - if err := runner.SetUp(); err != nil { + if err := runner.SetUp(ctx); err != nil { return nil, fmt.Errorf("could not set up benchmark runner: %w", err) } - report, err := runner.Run() + report, err := runner.Run(ctx) if err != nil { return nil, fmt.Errorf("could not complete benchmark run: %w", err) } diff --git a/internal/benchrunner/runners/pipeline/runner.go b/internal/benchrunner/runners/pipeline/runner.go index e55f293e87..bc917c22be 100644 --- a/internal/benchrunner/runners/pipeline/runner.go +++ b/internal/benchrunner/runners/pipeline/runner.go @@ -5,6 +5,7 @@ package pipeline import ( + "context" "encoding/json" "errors" "fmt" @@ -38,7 +39,7 @@ func NewPipelineBenchmark(opts Options) benchrunner.Runner { return &runner{options: opts} } -func (r *runner) SetUp() error { +func (r *runner) SetUp(ctx context.Context) error { dataStreamPath, found, err := packages.FindDataStreamRootForPath(r.options.Folder.Path) if err != nil { return fmt.Errorf("locating data_stream root failed: %w", err) @@ -56,7 +57,7 @@ func (r *runner) SetUp() error { } // TearDown shuts down the pipeline benchmark runner. -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { if err := ingest.UninstallPipelines(r.options.API, r.pipelines); err != nil { return fmt.Errorf("uninstalling ingest pipelines failed: %w", err) } @@ -64,7 +65,7 @@ func (r *runner) TearDown() error { } // Run runs the pipeline benchmarks defined under the given folder -func (r *runner) Run() (reporters.Reportable, error) { +func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { return r.run() } diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 3b9cbb506c..e38d4511ee 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -42,7 +42,6 @@ import ( "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/servicedeployer" - "github.com/elastic/elastic-package/internal/signal" ) const ( @@ -176,19 +175,22 @@ func NewRallyBenchmark(opts Options) benchrunner.Runner { return &runner{options: opts} } -func (r *runner) SetUp() error { - return r.setUp() +func (r *runner) SetUp(ctx context.Context) error { + return r.setUp(ctx) } // Run runs the system benchmarks defined under the given folder -func (r *runner) Run() (reporters.Reportable, error) { +func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { return r.run() } -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { if r.options.DeferCleanup > 0 { logger.Debugf("waiting for %s before tearing down...", r.options.DeferCleanup) - signal.Sleep(r.options.DeferCleanup) + select { + case <-time.After(r.options.DeferCleanup): + case <-ctx.Done(): + } } var merr multierror.Error @@ -235,7 +237,7 @@ func (r *runner) createRallyTrackDir(locationManager *locations.LocationManager) return nil } -func (r *runner) setUp() error { +func (r *runner) setUp(ctx context.Context) error { locationManager, err := locations.NewLocationManager() if err != nil { return fmt.Errorf("reading service logs directory failed: %w", err) @@ -327,11 +329,7 @@ func (r *runner) setUp() error { return fmt.Errorf("error deleting old data in data stream: %s: %w", r.runtimeDataStream, err) } - cleared, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel clearing data") - } - + cleared, err := waitUntilTrue(ctx, func(context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 2*time.Minute) @@ -1163,7 +1161,7 @@ func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { return numHits, nil } -func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) { +func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { timeoutTimer := time.NewTimer(timeout) defer timeoutTimer.Stop() @@ -1171,7 +1169,7 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) defer retryTicker.Stop() for { - result, err := fn() + result, err := fn(ctx) if err != nil { return false, err } @@ -1182,6 +1180,8 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) select { case <-retryTicker.C: continue + case <-ctx.Done(): + return false, fmt.Errorf("context done: %w", ctx.Err()) case <-timeoutTimer.C: return false, nil } diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 490a68e777..998c878ba8 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/servicedeployer" - "github.com/elastic/elastic-package/internal/signal" ) type runner struct { @@ -59,16 +58,16 @@ func NewStreamBenchmark(opts Options) benchrunner.Runner { return &runner{options: opts} } -func (r *runner) SetUp() error { - return r.setUp() +func (r *runner) SetUp(ctx context.Context) error { + return r.setUp(ctx) } // Run runs the system benchmarks defined under the given folder -func (r *runner) Run() (reporters.Reportable, error) { - return nil, r.run() +func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { + return nil, r.run(ctx) } -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { r.wg.Wait() if !r.options.PerformCleanup { @@ -99,7 +98,7 @@ func (r *runner) TearDown() error { return merr } -func (r *runner) setUp() error { +func (r *runner) setUp(ctx context.Context) error { r.generators = make(map[string]genlib.Generator) r.backFillGenerators = make(map[string]genlib.Generator) r.errChanGenerators = make(chan error) @@ -156,11 +155,7 @@ func (r *runner) setUp() error { return fmt.Errorf("error cleaning up old data in data streams: %w", err) } - cleared, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel clearing data") - } - + cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { totalHits := 0 for _, runtimeDataStream := range r.runtimeDataStreams { hits, err := getTotalHits(r.options.ESAPI, runtimeDataStream) @@ -203,20 +198,16 @@ func (r *runner) wipeDataStreamsOnSetup() error { return nil } -func (r *runner) run() (err error) { +func (r *runner) run(ctx context.Context) (err error) { r.streamData() - for { - select { - case err = <-r.errChanGenerators: - close(r.done) - return err - default: - if signal.SIGINT() { - close(r.done) - return nil - } - } + select { + case err = <-r.errChanGenerators: + close(r.done) + return err + case <-ctx.Done(): + close(r.done) + return nil } } @@ -603,7 +594,7 @@ func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { return numHits, nil } -func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) { +func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { timeoutTimer := time.NewTimer(timeout) defer timeoutTimer.Stop() @@ -611,7 +602,7 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) defer retryTicker.Stop() for { - result, err := fn() + result, err := fn(ctx) if err != nil { return false, err } @@ -622,6 +613,8 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) select { case <-retryTicker.C: continue + case <-ctx.Done(): + return false, fmt.Errorf("context done: %w", ctx.Err()) case <-timeoutTimer.C: return false, nil } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 3393949587..259796e510 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -33,7 +33,6 @@ import ( "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/servicedeployer" - "github.com/elastic/elastic-package/internal/signal" ) const ( @@ -69,19 +68,23 @@ func NewSystemBenchmark(opts Options) benchrunner.Runner { return &runner{options: opts} } -func (r *runner) SetUp() error { - return r.setUp() +func (r *runner) SetUp(ctx context.Context) error { + return r.setUp(ctx) } // Run runs the system benchmarks defined under the given folder -func (r *runner) Run() (reporters.Reportable, error) { - return r.run() +func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { + return r.run(ctx) } -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { if r.options.DeferCleanup > 0 { logger.Debugf("waiting for %s before tearing down...", r.options.DeferCleanup) - signal.Sleep(r.options.DeferCleanup) + select { + case <-time.After(r.options.DeferCleanup): + case <-ctx.Done(): + } + } var merr multierror.Error @@ -127,7 +130,7 @@ func (r *runner) TearDown() error { return merr } -func (r *runner) setUp() error { +func (r *runner) setUp(ctx context.Context) error { locationManager, err := locations.NewLocationManager() if err != nil { return fmt.Errorf("reading service logs directory failed: %w", err) @@ -208,11 +211,7 @@ func (r *runner) setUp() error { return fmt.Errorf("error deleting old data in data stream: %s: %w", r.runtimeDataStream, err) } - cleared, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel clearing data") - } - + cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 2*time.Minute) @@ -226,7 +225,7 @@ func (r *runner) setUp() error { return nil } -func (r *runner) run() (report reporters.Reportable, err error) { +func (r *runner) run(ctx context.Context) (report reporters.Reportable, err error) { var service servicedeployer.DeployedService if r.scenario.Corpora.InputService != nil { stackVersion, err := r.options.KibanaClient.Version() @@ -252,7 +251,7 @@ func (r *runner) run() (report reporters.Reportable, err error) { } r.ctxt.Name = r.scenario.Corpora.InputService.Name - service, err = serviceDeployer.SetUp(r.ctxt) + service, err = serviceDeployer.SetUp(ctx, r.ctxt) if err != nil { return nil, fmt.Errorf("could not setup service: %w", err) } @@ -280,7 +279,7 @@ func (r *runner) run() (report reporters.Reportable, err error) { } // once data is generated, enroll agents and assign policy - if err := r.enrollAgents(); err != nil { + if err := r.enrollAgents(ctx); err != nil { return nil, err } @@ -291,7 +290,7 @@ func (r *runner) run() (report reporters.Reportable, err error) { } } - finishedOnTime, err := r.waitUntilBenchmarkFinishes() + finishedOnTime, err := r.waitUntilBenchmarkFinishes(ctx) if err != nil { return nil, err } @@ -615,12 +614,9 @@ func (r *runner) runGenerator(destDir string) error { return r.generator.Close() } -func (r *runner) checkEnrolledAgents() ([]kibana.Agent, error) { +func (r *runner) checkEnrolledAgents(ctx context.Context) ([]kibana.Agent, error) { var agents []kibana.Agent - enrolled, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return false, errors.New("SIGINT: cancel checking enrolled agents") - } + enrolled, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { allAgents, err := r.options.KibanaClient.ListAgents() if err != nil { return false, fmt.Errorf("could not list agents: %w", err) @@ -642,7 +638,7 @@ func (r *runner) checkEnrolledAgents() ([]kibana.Agent, error) { return agents, nil } -func (r *runner) waitUntilBenchmarkFinishes() (bool, error) { +func (r *runner) waitUntilBenchmarkFinishes(ctx context.Context) (bool, error) { logger.Debug("checking for all data in data stream...") var benchTime *time.Timer if r.scenario.BenchmarkTimePeriod > 0 { @@ -650,11 +646,7 @@ func (r *runner) waitUntilBenchmarkFinishes() (bool, error) { } oldHits := 0 - return waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel waiting for policy assigned") - } - + return waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) if hits == 0 { @@ -679,8 +671,8 @@ func (r *runner) waitUntilBenchmarkFinishes() (bool, error) { }, *r.scenario.WaitForDataTimeout) } -func (r *runner) enrollAgents() error { - agents, err := r.checkEnrolledAgents() +func (r *runner) enrollAgents(ctx context.Context) error { + agents, err := r.checkEnrolledAgents(ctx) if err != nil { return fmt.Errorf("can't check enrolled agents: %w", err) } @@ -695,7 +687,7 @@ func (r *runner) enrollAgents() error { // Assign policy to agent handlers[i] = func() error { logger.Debug("reassigning original policy back to agent...") - if err := r.options.KibanaClient.AssignPolicyToAgent(agent, origPolicy); err != nil { + if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { return fmt.Errorf("error reassigning original policy to agent %s: %w", agent.ID, err) } return nil @@ -707,7 +699,7 @@ func (r *runner) enrollAgents() error { } logger.Debug("assigning package data stream to agent...") - if err := r.options.KibanaClient.AssignPolicyToAgent(agent, *policyWithDataStream); err != nil { + if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, *policyWithDataStream); err != nil { return fmt.Errorf("could not assign policy to agent: %w", err) } } @@ -970,7 +962,7 @@ func filterAgents(allAgents []kibana.Agent) []kibana.Agent { return filtered } -func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) { +func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), timeout time.Duration) (bool, error) { timeoutTicker := time.NewTicker(timeout) defer timeoutTicker.Stop() @@ -978,7 +970,7 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) defer retryTicker.Stop() for { - result, err := fn() + result, err := fn(ctx) if err != nil { return false, err } @@ -989,6 +981,8 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) select { case <-retryTicker.C: continue + case <-ctx.Done(): + return false, fmt.Errorf("context done: %w", ctx.Err()) case <-timeoutTicker.C: return false, nil } diff --git a/internal/compose/compose.go b/internal/compose/compose.go index 9b8ceda59b..c1f990437f 100644 --- a/internal/compose/compose.go +++ b/internal/compose/compose.go @@ -6,6 +6,7 @@ package compose import ( "bytes" + "context" "errors" "fmt" "io" @@ -22,7 +23,6 @@ import ( "github.com/elastic/elastic-package/internal/docker" "github.com/elastic/elastic-package/internal/environment" "github.com/elastic/elastic-package/internal/logger" - "github.com/elastic/elastic-package/internal/signal" ) const ( @@ -335,7 +335,7 @@ func (p *Project) Logs(opts CommandOptions) ([]byte, error) { } // WaitForHealthy method waits until all containers are healthy. -func (p *Project) WaitForHealthy(opts CommandOptions) error { +func (p *Project) WaitForHealthy(ctx context.Context, opts CommandOptions) error { // Read container IDs args := p.baseArgs() args = append(args, "ps", "-a", "-q") @@ -345,19 +345,11 @@ func (p *Project) WaitForHealthy(opts CommandOptions) error { return err } - startTime := time.Now() - timeout := startTime.Add(waitForHealthyTimeout) + ctx, stop := context.WithTimeout(ctx, waitForHealthyTimeout) + defer stop() containerIDs := strings.Fields(b.String()) for { - if time.Now().After(timeout) { - return errors.New("timeout waiting for healthy container") - } - - if signal.SIGINT() { - return errors.New("SIGINT: cancel waiting for policy assigned") - } - // NOTE: healthy must be reinitialized at each iteration healthy := true @@ -399,8 +391,12 @@ func (p *Project) WaitForHealthy(opts CommandOptions) error { break } - // NOTE: using sleep does not guarantee interval but it's ok for this use case - time.Sleep(waitForHealthyInterval) + select { + case <-ctx.Done(): + return fmt.Errorf("context done: %w", ctx.Err()) + // NOTE: using after does not guarantee interval but it's ok for this use case + case <-time.After(waitForHealthyInterval): + } } return nil diff --git a/internal/kibana/agents.go b/internal/kibana/agents.go index f9add95512..1d8ab0fbcd 100644 --- a/internal/kibana/agents.go +++ b/internal/kibana/agents.go @@ -5,14 +5,13 @@ package kibana import ( + "context" "encoding/json" - "errors" "fmt" "net/http" "time" "github.com/elastic/elastic-package/internal/logger" - "github.com/elastic/elastic-package/internal/signal" ) var ( @@ -69,7 +68,7 @@ func (c *Client) ListAgents() ([]Agent, error) { } // AssignPolicyToAgent assigns the given Policy to the given Agent. -func (c *Client) AssignPolicyToAgent(a Agent, p Policy) error { +func (c *Client) AssignPolicyToAgent(ctx context.Context, a Agent, p Policy) error { reqBody := `{ "policy_id": "` + p.ID + `" }` path := fmt.Sprintf("%s/agents/%s/reassign", FleetAPI, a.ID) @@ -82,24 +81,20 @@ func (c *Client) AssignPolicyToAgent(a Agent, p Policy) error { return fmt.Errorf("could not assign policy to agent; API status code = %d; response body = %s", statusCode, respBody) } - err = c.waitUntilPolicyAssigned(a, p) + err = c.waitUntilPolicyAssigned(ctx, a, p) if err != nil { return fmt.Errorf("error occurred while waiting for the policy to be assigned to all agents: %w", err) } return nil } -func (c *Client) waitUntilPolicyAssigned(a Agent, p Policy) error { - timeout := time.NewTimer(waitForPolicyAssignedTimeout) - defer timeout.Stop() +func (c *Client) waitUntilPolicyAssigned(ctx context.Context, a Agent, p Policy) error { + ctx, cancel := context.WithTimeout(ctx, waitForPolicyAssignedTimeout) + defer cancel() ticker := time.NewTicker(waitForPolicyAssignedRetryPeriod) defer ticker.Stop() for { - if signal.SIGINT() { - return errors.New("SIGINT: cancel waiting for policy assigned") - } - agent, err := c.getAgent(a.ID) if err != nil { return fmt.Errorf("can't get the agent: %w", err) @@ -113,8 +108,8 @@ func (c *Client) waitUntilPolicyAssigned(a Agent, p Policy) error { logger.Debugf("Wait until the policy (ID: %s, revision: %d) is assigned to the agent (ID: %s)...", p.ID, p.Revision, a.ID) select { - case <-timeout.C: - return errors.New("timeout: policy hasn't been assigned in time") + case <-ctx.Done(): + return fmt.Errorf("context done: %w", ctx.Err()) case <-ticker.C: continue } diff --git a/internal/service/boot.go b/internal/service/boot.go index 7303b3dce0..95a06459eb 100644 --- a/internal/service/boot.go +++ b/internal/service/boot.go @@ -5,6 +5,7 @@ package service import ( + "context" "fmt" "os" "os/signal" @@ -32,7 +33,7 @@ type Options struct { } // BootUp function boots up the service stack. -func BootUp(options Options) error { +func BootUp(ctx context.Context, options Options) error { logger.Debugf("Create new instance of the service deployer") serviceDeployer, err := servicedeployer.Factory(servicedeployer.FactoryOptions{ Profile: options.Profile, @@ -57,7 +58,7 @@ func BootUp(options Options) error { serviceCtxt.Name = options.ServiceName serviceCtxt.Logs.Folder.Agent = system.ServiceLogsAgentDir serviceCtxt.Logs.Folder.Local = locationManager.ServiceLogDir() - deployed, err := serviceDeployer.SetUp(serviceCtxt) + deployed, err := serviceDeployer.SetUp(ctx, serviceCtxt) if err != nil { return fmt.Errorf("can't set up the service deployer: %w", err) } diff --git a/internal/servicedeployer/compose.go b/internal/servicedeployer/compose.go index 1b291552fa..d38787d421 100644 --- a/internal/servicedeployer/compose.go +++ b/internal/servicedeployer/compose.go @@ -5,6 +5,7 @@ package servicedeployer import ( + "context" "fmt" "os" "path/filepath" @@ -46,7 +47,7 @@ func NewDockerComposeServiceDeployer(profile *profile.Profile, ymlPaths []string } // SetUp sets up the service and returns any relevant information. -func (d *DockerComposeServiceDeployer) SetUp(inCtxt ServiceContext) (DeployedService, error) { +func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) (DeployedService, error) { logger.Debug("setting up service using Docker Compose service deployer") service := dockerComposeDeployedService{ ymlPaths: d.ymlPaths, @@ -90,7 +91,7 @@ func (d *DockerComposeServiceDeployer) SetUp(inCtxt ServiceContext) (DeployedSer return nil, fmt.Errorf("could not boot up service using Docker Compose: %w", err) } - err = p.WaitForHealthy(opts) + err = p.WaitForHealthy(ctx, opts) if err != nil { processServiceContainerLogs(p, compose.CommandOptions{ Env: opts.Env, diff --git a/internal/servicedeployer/custom_agent.go b/internal/servicedeployer/custom_agent.go index b63891df92..f1c04ad338 100644 --- a/internal/servicedeployer/custom_agent.go +++ b/internal/servicedeployer/custom_agent.go @@ -5,6 +5,7 @@ package servicedeployer import ( + "context" _ "embed" "fmt" "os" @@ -47,7 +48,7 @@ func NewCustomAgentDeployer(profile *profile.Profile, dockerComposeFile string, } // SetUp sets up the service and returns any relevant information. -func (d *CustomAgentDeployer) SetUp(inCtxt ServiceContext) (DeployedService, error) { +func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) (DeployedService, error) { logger.Debug("setting up service using Docker Compose service deployer") appConfig, err := install.Configuration() @@ -121,7 +122,7 @@ func (d *CustomAgentDeployer) SetUp(inCtxt ServiceContext) (DeployedService, err return nil, fmt.Errorf("can't attach service container to the stack network: %w", err) } - err = p.WaitForHealthy(opts) + err = p.WaitForHealthy(ctx, opts) if err != nil { processServiceContainerLogs(p, compose.CommandOptions{ Env: opts.Env, diff --git a/internal/servicedeployer/kubernetes.go b/internal/servicedeployer/kubernetes.go index c3dffa607c..913cd93391 100644 --- a/internal/servicedeployer/kubernetes.go +++ b/internal/servicedeployer/kubernetes.go @@ -6,6 +6,7 @@ package servicedeployer import ( "bytes" + "context" _ "embed" "encoding/base64" "fmt" @@ -85,7 +86,7 @@ func NewKubernetesServiceDeployer(profile *profile.Profile, definitionsPath stri // SetUp function links the kind container with elastic-package-stack network, installs Elastic-Agent and optionally // custom YAML definitions. -func (ksd KubernetesServiceDeployer) SetUp(ctxt ServiceContext) (DeployedService, error) { +func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceContext) (DeployedService, error) { err := kind.VerifyContext() if err != nil { return nil, fmt.Errorf("kind context verification failed: %w", err) @@ -106,13 +107,13 @@ func (ksd KubernetesServiceDeployer) SetUp(ctxt ServiceContext) (DeployedService return nil, fmt.Errorf("can't install custom definitions in the Kubernetes cluster: %w", err) } - ctxt.Name = kind.ControlPlaneContainerName - ctxt.Hostname = kind.ControlPlaneContainerName + service.Name = kind.ControlPlaneContainerName + service.Hostname = kind.ControlPlaneContainerName // kind-control-plane is the name of the kind host where Pod is running since we use hostNetwork setting // to deploy Agent Pod. Because of this, hostname inside pod will be equal to the name of the k8s host. - ctxt.Agent.Host.NamePrefix = "kind-control-plane" + service.Agent.Host.NamePrefix = "kind-control-plane" return &kubernetesDeployedService{ - ctxt: ctxt, + ctxt: service, definitionsDir: ksd.definitionsDir, }, nil } diff --git a/internal/servicedeployer/service_deployer.go b/internal/servicedeployer/service_deployer.go index 5e1cb93afe..d9002b878d 100644 --- a/internal/servicedeployer/service_deployer.go +++ b/internal/servicedeployer/service_deployer.go @@ -4,10 +4,12 @@ package servicedeployer +import "context" + // ServiceDeployer defines the interface for deploying a service. It defines methods for // controlling the lifecycle of a service. type ServiceDeployer interface { // SetUp implements the logic for setting up a service. It takes a context and returns a // ServiceHandler. - SetUp(ctxt ServiceContext) (DeployedService, error) + SetUp(context.Context, ServiceContext) (DeployedService, error) } diff --git a/internal/servicedeployer/terraform.go b/internal/servicedeployer/terraform.go index 8bbb0c65d4..9b27596147 100644 --- a/internal/servicedeployer/terraform.go +++ b/internal/servicedeployer/terraform.go @@ -5,6 +5,7 @@ package servicedeployer import ( + "context" _ "embed" "encoding/json" "fmt" @@ -88,7 +89,7 @@ func NewTerraformServiceDeployer(definitionsDir string) (*TerraformServiceDeploy } // SetUp method boots up the Docker Compose with Terraform executor and mounted .tf definitions. -func (tsd TerraformServiceDeployer) SetUp(inCtxt ServiceContext) (DeployedService, error) { +func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceContext) (DeployedService, error) { logger.Debug("setting up service using Terraform deployer") configDir, err := tsd.installDockerfile() @@ -103,14 +104,14 @@ func (tsd TerraformServiceDeployer) SetUp(inCtxt ServiceContext) (DeployedServic ymlPaths = append(ymlPaths, envYmlPath) } - tfEnvironment := tsd.buildTerraformExecutorEnvironment(inCtxt) + tfEnvironment := tsd.buildTerraformExecutorEnvironment(svcCtxt) service := dockerComposeDeployedService{ ymlPaths: ymlPaths, project: "elastic-package-service", env: tfEnvironment, } - outCtxt := inCtxt + outCtxt := svcCtxt p, err := compose.NewProject(service.project, service.ymlPaths...) if err != nil { @@ -146,7 +147,7 @@ func (tsd TerraformServiceDeployer) SetUp(inCtxt ServiceContext) (DeployedServic return nil, fmt.Errorf("could not boot up service using Docker Compose: %w", err) } - err = p.WaitForHealthy(opts) + err = p.WaitForHealthy(ctx, opts) if err != nil { processServiceContainerLogs(p, compose.CommandOptions{ Env: opts.Env, diff --git a/internal/signal/sigint.go b/internal/signal/sigint.go deleted file mode 100644 index 4cac814eed..0000000000 --- a/internal/signal/sigint.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package signal - -import ( - "os" - "os/signal" - "syscall" - "time" - - "github.com/elastic/elastic-package/internal/logger" -) - -var ch chan os.Signal - -// Enable function enables signal notifications. -func Enable() { - ch = make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt, syscall.SIGTERM) -} - -// SIGINT function returns true if ctrl+c was pressed -func SIGINT() bool { - select { - case <-ch: - logger.Info("Signal caught!") - return true - default: - return false - } -} - -// Sleep is the equivalent of time.Sleep with the exception -// that is will end the sleep if ctrl+c is pressed. -func Sleep(d time.Duration) { - timer := time.NewTimer(d) - select { - case <-ch: - logger.Info("Signal caught!") - timer.Stop() - case <-timer.C: - } -} diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index ac26c58187..7937251b4e 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -5,6 +5,7 @@ package asset import ( + "context" "errors" "fmt" "strings" @@ -53,7 +54,7 @@ func (r runner) CanRunPerDataStream() bool { } // Run runs the asset loading tests -func (r *runner) Run(options testrunner.TestOptions) ([]testrunner.TestResult, error) { +func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { r.testFolder = options.TestFolder r.packageRootPath = options.PackageRootPath r.kibanaClient = options.KibanaClient @@ -167,7 +168,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { return results, nil } -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { if r.removePackageHandler != nil { if err := r.removePackageHandler(); err != nil { return err diff --git a/internal/testrunner/runners/pipeline/runner.go b/internal/testrunner/runners/pipeline/runner.go index fce07f9fa9..b9988cdb51 100644 --- a/internal/testrunner/runners/pipeline/runner.go +++ b/internal/testrunner/runners/pipeline/runner.go @@ -5,6 +5,7 @@ package pipeline import ( + "context" "encoding/json" "errors" "fmt" @@ -24,7 +25,6 @@ import ( "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" - "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" ) @@ -66,7 +66,7 @@ func (r *runner) String() string { } // Run runs the pipeline tests defined under the given folder -func (r *runner) Run(options testrunner.TestOptions) ([]testrunner.TestResult, error) { +func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { r.options = options stackConfig, err := stack.LoadConfig(r.options.Profile) @@ -84,14 +84,17 @@ func (r *runner) Run(options testrunner.TestOptions) ([]testrunner.TestResult, e } } - return r.run() + return r.run(ctx) } // TearDown shuts down the pipeline test runner. -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { if r.options.DeferCleanup > 0 { logger.Debugf("Waiting for %s before cleanup...", r.options.DeferCleanup) - signal.Sleep(r.options.DeferCleanup) + select { + case <-time.After(r.options.DeferCleanup): + case <-ctx.Done(): + } } if err := ingest.UninstallPipelines(r.options.API, r.pipelines); err != nil { @@ -106,7 +109,7 @@ func (r *runner) CanRunPerDataStream() bool { return true } -func (r *runner) run() ([]testrunner.TestResult, error) { +func (r *runner) run(ctx context.Context) ([]testrunner.TestResult, error) { testCaseFiles, err := r.listTestCaseFiles() if err != nil { return nil, fmt.Errorf("listing test case definitions failed: %w", err) diff --git a/internal/testrunner/runners/static/runner.go b/internal/testrunner/runners/static/runner.go index e0bd881d36..f30b2ee7b4 100644 --- a/internal/testrunner/runners/static/runner.go +++ b/internal/testrunner/runners/static/runner.go @@ -5,6 +5,7 @@ package static import ( + "context" "errors" "fmt" "os" @@ -41,12 +42,12 @@ func (r runner) String() string { return "static files" } -func (r runner) Run(options testrunner.TestOptions) ([]testrunner.TestResult, error) { +func (r runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { r.options = options - return r.run() + return r.run(ctx) } -func (r runner) run() ([]testrunner.TestResult, error) { +func (r runner) run(ctx context.Context) ([]testrunner.TestResult, error) { result := testrunner.NewResultComposer(testrunner.TestResult{ TestType: TestType, Package: r.options.TestFolder.Package, @@ -166,7 +167,7 @@ func (r runner) getExpectedDatasets(pkgManifest *packages.PackageManifest) ([]st return []string{pkgManifest.Name + "." + dsName}, nil } -func (r runner) TearDown() error { +func (r runner) TearDown(ctx context.Context) error { return nil // it's a static test runner, no state is stored } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index e125ef86e8..f85ebdd824 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -5,6 +5,7 @@ package system import ( + "context" "encoding/json" "errors" "fmt" @@ -32,7 +33,6 @@ import ( "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/packages/installer" "github.com/elastic/elastic-package/internal/servicedeployer" - "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" ) @@ -131,20 +131,23 @@ func (r *runner) TestFolderRequired() bool { } // Run runs the system tests defined under the given folder -func (r *runner) Run(options testrunner.TestOptions) ([]testrunner.TestResult, error) { +func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { r.options = options - return r.run() + return r.run(ctx) } // TearDown method doesn't perform any global action as the "tear down" is executed per test case. -func (r *runner) TearDown() error { +func (r *runner) TearDown(ctx context.Context) error { return nil } -func (r *runner) tearDownTest() error { +func (r *runner) tearDownTest(ctx context.Context) error { if r.options.DeferCleanup > 0 { logger.Debugf("waiting for %s before tearing down...", r.options.DeferCleanup) - signal.Sleep(r.options.DeferCleanup) + select { + case <-time.After(r.options.DeferCleanup): + case <-ctx.Done(): + } } if r.resetAgentPolicyHandler != nil { @@ -201,7 +204,7 @@ func (r *runner) newResult(name string) *testrunner.ResultComposer { }) } -func (r *runner) run() (results []testrunner.TestResult, err error) { +func (r *runner) run(ctx context.Context) (results []testrunner.TestResult, err error) { result := r.newResult("(init)") locationManager, err := locations.NewLocationManager() if err != nil { @@ -254,7 +257,7 @@ func (r *runner) run() (results []testrunner.TestResult, err error) { startTesting := time.Now() for _, cfgFile := range cfgFiles { for _, variantName := range r.selectVariants(variantsFile) { - partial, err := r.runTestPerVariant(result, locationManager, cfgFile, dataStreamPath, variantName, stackVersion.Version()) + partial, err := r.runTestPerVariant(ctx, result, locationManager, cfgFile, dataStreamPath, variantName, stackVersion.Version()) results = append(results, partial...) if err != nil { return results, err @@ -283,7 +286,7 @@ func (r *runner) run() (results []testrunner.TestResult, err error) { return results, nil } -func (r *runner) runTestPerVariant(result *testrunner.ResultComposer, locationManager *locations.LocationManager, cfgFile, dataStreamPath, variantName, stackVersion string) ([]testrunner.TestResult, error) { +func (r *runner) runTestPerVariant(ctx context.Context, result *testrunner.ResultComposer, locationManager *locations.LocationManager, cfgFile, dataStreamPath, variantName, stackVersion string) ([]testrunner.TestResult, error) { serviceOptions := servicedeployer.FactoryOptions{ Profile: r.options.Profile, PackageRootPath: r.options.PackageRootPath, @@ -314,7 +317,7 @@ func (r *runner) runTestPerVariant(result *testrunner.ResultComposer, locationMa var partial []testrunner.TestResult if testConfig.Skip == nil { logger.Debugf("running test with configuration '%s'", testConfig.Name()) - partial, err = r.runTest(testConfig, ctxt, serviceOptions) + partial, err = r.runTest(ctx, testConfig, ctxt, serviceOptions) } else { logger.Warnf("skipping %s test for %s/%s: %s (details: %s)", TestType, r.options.TestFolder.Package, r.options.TestFolder.DataStream, @@ -323,7 +326,7 @@ func (r *runner) runTestPerVariant(result *testrunner.ResultComposer, locationMa partial, err = result.WithSkip(testConfig.Skip) } - tdErr := r.tearDownTest() + tdErr := r.tearDownTest(ctx) if err != nil { return partial, err } @@ -469,7 +472,7 @@ func (r *runner) getDocs(dataStream string) (*hits, error) { return &hits, nil } -func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { +func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext servicedeployer.ServiceContext, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { result := r.newResult(config.Name()) pkgManifest, err := packages.ReadPackageManifestFromPackageRoot(r.options.PackageRootPath) @@ -503,13 +506,13 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext } if config.Service != "" { - ctxt.Name = config.Service + serviceContext.Name = config.Service } - service, err := serviceDeployer.SetUp(ctxt) + service, err := serviceDeployer.SetUp(ctx, serviceContext) if err != nil { return result.WithError(fmt.Errorf("could not setup service: %w", err)) } - ctxt = service.Context() + serviceContext = service.Context() r.shutdownServiceHandler = func() error { logger.Debug("tearing down service...") if err := service.TearDown(); err != nil { @@ -520,7 +523,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext } // Reload test config with ctx variable substitution. - config, err = newConfig(config.Path, ctxt, serviceOptions.Variant) + config, err = newConfig(config.Path, serviceContext, serviceOptions.Variant) if err != nil { return result.WithError(fmt.Errorf("unable to reload system test case configuration: %w", err)) } @@ -622,11 +625,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext return result.WithError(fmt.Errorf("error deleting old data in data stream: %s: %w", dataStream, err)) } - cleared, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel clearing data") - } - + cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := r.getDocs(dataStream) if err != nil { return false, err @@ -640,7 +639,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext return result.WithError(err) } - agents, err := checkEnrolledAgents(r.options.KibanaClient, ctxt) + agents, err := checkEnrolledAgents(ctx, r.options.KibanaClient, serviceContext) if err != nil { return result.WithError(fmt.Errorf("can't check enrolled agents: %w", err)) } @@ -668,7 +667,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext // Assign policy to agent r.resetAgentPolicyHandler = func() error { logger.Debug("reassigning original policy back to agent...") - if err := r.options.KibanaClient.AssignPolicyToAgent(agent, origPolicy); err != nil { + if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { return fmt.Errorf("error reassigning original policy to agent: %w", err) } return nil @@ -680,7 +679,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext } logger.Debug("assigning package data stream to agent...") - if err := r.options.KibanaClient.AssignPolicyToAgent(agent, *policyWithDataStream); err != nil { + if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, *policyWithDataStream); err != nil { return result.WithError(fmt.Errorf("could not assign policy to agent: %w", err)) } @@ -701,11 +700,7 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext logger.Debug("checking for expected data in data stream...") var hits *hits oldHits := 0 - passed, waitErr := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return true, errors.New("SIGINT: cancel waiting for policy assigned") - } - + passed, waitErr := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error hits, err = r.getDocs(dataStream) if err != nil { @@ -833,13 +828,9 @@ func (r *runner) runTest(config *testConfig, ctxt servicedeployer.ServiceContext return result.WithSuccess() } -func checkEnrolledAgents(client *kibana.Client, ctxt servicedeployer.ServiceContext) ([]kibana.Agent, error) { +func checkEnrolledAgents(ctx context.Context, client *kibana.Client, ctxt servicedeployer.ServiceContext) ([]kibana.Agent, error) { var agents []kibana.Agent - enrolled, err := waitUntilTrue(func() (bool, error) { - if signal.SIGINT() { - return false, errors.New("SIGINT: cancel checking enrolled agents") - } - + enrolled, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { allAgents, err := client.ListAgents() if err != nil { return false, fmt.Errorf("could not list agents: %w", err) @@ -1244,7 +1235,7 @@ func deleteDataStreamDocs(api *elasticsearch.API, dataStream string) error { return nil } -func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) { +func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), timeout time.Duration) (bool, error) { timeoutTicker := time.NewTicker(timeout) defer timeoutTicker.Stop() @@ -1252,7 +1243,7 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) defer retryTicker.Stop() for { - result, err := fn() + result, err := fn(ctx) if err != nil { return false, err } @@ -1263,6 +1254,8 @@ func waitUntilTrue(fn func() (bool, error), timeout time.Duration) (bool, error) select { case <-retryTicker.C: continue + case <-ctx.Done(): + return false, fmt.Errorf("context done: %w", ctx.Err()) case <-timeoutTicker.C: return false, nil } diff --git a/internal/testrunner/testrunner.go b/internal/testrunner/testrunner.go index d9df974b08..3885d485a3 100644 --- a/internal/testrunner/testrunner.go +++ b/internal/testrunner/testrunner.go @@ -5,6 +5,7 @@ package testrunner import ( + "context" "errors" "fmt" "os" @@ -45,11 +46,11 @@ type TestRunner interface { String() string // Run executes the test runner. - Run(TestOptions) ([]TestResult, error) + Run(context.Context, TestOptions) ([]TestResult, error) // TearDown cleans up any test runner resources. It must be called // after the test runner has finished executing. - TearDown() error + TearDown(context.Context) error CanRunPerDataStream() bool @@ -263,14 +264,14 @@ func RegisterRunner(runner TestRunner) { } // Run method delegates execution to the registered test runner, based on the test type. -func Run(testType TestType, options TestOptions) ([]TestResult, error) { +func Run(ctx context.Context, testType TestType, options TestOptions) ([]TestResult, error) { runner, defined := runners[testType] if !defined { return nil, fmt.Errorf("unregistered runner test: %s", testType) } - results, err := runner.Run(options) - tdErr := runner.TearDown() + results, err := runner.Run(ctx, options) + tdErr := runner.TearDown(ctx) if err != nil { return nil, fmt.Errorf("could not complete test run: %w", err) } diff --git a/main.go b/main.go index 2ca1012e0c..b40df33803 100644 --- a/main.go +++ b/main.go @@ -5,8 +5,10 @@ package main import ( + "context" "log" "os" + "os/signal" "github.com/elastic/elastic-package/cmd" "github.com/elastic/elastic-package/internal/install" @@ -20,7 +22,10 @@ func main() { log.Fatalf("Validating installation failed: %v", err) } - err = rootCmd.Execute() + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + + err = rootCmd.ExecuteContext(ctx) if err != nil { os.Exit(1) } From b2ebe8299c351d26bf15b23e845ecc0aa297996f Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 14 Feb 2024 15:03:35 +0100 Subject: [PATCH 02/32] Use the correct context on testrunner commands --- cmd/testrunner.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/testrunner.go b/cmd/testrunner.go index 50d0874278..dff947c910 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "errors" "fmt" "os" @@ -78,7 +77,7 @@ func setupTestCommand() *cobraext.Command { cmd.PersistentFlags().StringP(cobraext.ProfileFlagName, "p", "", fmt.Sprintf(cobraext.ProfileFlagDescription, install.ProfileNameEnvVar)) for testType, runner := range testrunner.TestRunners() { - action := testTypeCommandActionFactory(cmd.Context(), runner) + action := testTypeCommandActionFactory(runner) testTypeCmdActions = append(testTypeCmdActions, action) testTypeCmd := &cobra.Command{ @@ -99,7 +98,7 @@ func setupTestCommand() *cobraext.Command { return cobraext.NewCommand(cmd, cobraext.ContextPackage) } -func testTypeCommandActionFactory(ctx context.Context, runner testrunner.TestRunner) cobraext.CommandAction { +func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.CommandAction { testType := runner.Type() return func(cmd *cobra.Command, args []string) error { cmd.Printf("Run %s tests for the package\n", testType) @@ -245,7 +244,7 @@ func testTypeCommandActionFactory(ctx context.Context, runner testrunner.TestRun var results []testrunner.TestResult for _, folder := range testFolders { - r, err := testrunner.Run(ctx, testType, testrunner.TestOptions{ + r, err := testrunner.Run(cmd.Context(), testType, testrunner.TestOptions{ Profile: profile, TestFolder: folder, PackageRootPath: packageRootPath, From e30b7b77e39bcdd7355ff88bf7576e529179654e Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 14 Feb 2024 17:18:09 +0100 Subject: [PATCH 03/32] More contexts --- cmd/stack.go | 10 ++--- internal/benchrunner/runners/rally/runner.go | 10 ++--- internal/benchrunner/runners/system/runner.go | 40 ++++++++--------- internal/compose/compose.go | 45 ++++++++++--------- internal/kind/kind.go | 5 ++- internal/kubectl/kubectl.go | 13 +++--- internal/kubectl/kubectl_apply.go | 9 ++-- internal/kubectl/kubectl_delete.go | 6 ++- internal/service/boot.go | 2 +- internal/servicedeployer/compose.go | 24 +++++----- internal/servicedeployer/custom_agent.go | 6 +-- internal/servicedeployer/deployed_service.go | 11 +++-- internal/servicedeployer/kubernetes.go | 22 ++++----- internal/servicedeployer/terraform.go | 6 +-- internal/stack/boot.go | 20 +++++---- internal/stack/clients.go | 7 ++- internal/stack/compose.go | 19 ++++---- internal/stack/dump.go | 9 ++-- internal/stack/logs.go | 5 ++- internal/stack/providers.go | 31 ++++++------- internal/stack/serverless.go | 29 ++++++------ internal/stack/status.go | 5 ++- internal/stack/update.go | 5 ++- internal/testrunner/runners/system/runner.go | 8 ++-- main.go | 12 +++++ 25 files changed, 195 insertions(+), 164 deletions(-) diff --git a/cmd/stack.go b/cmd/stack.go index 123edc1a87..63899ba2eb 100644 --- a/cmd/stack.go +++ b/cmd/stack.go @@ -115,7 +115,7 @@ func setupStackCommand() *cobraext.Command { profile.RuntimeOverrides(userParameters) cmd.Printf("Using profile %s.\n", profile.ProfilePath) - err = provider.BootUp(stack.Options{ + err = provider.BootUp(cmd.Context(), stack.Options{ DaemonMode: daemonMode, StackVersion: stackVersion, Services: services, @@ -154,7 +154,7 @@ func setupStackCommand() *cobraext.Command { return err } - err = provider.TearDown(stack.Options{ + err = provider.TearDown(cmd.Context(), stack.Options{ Profile: profile, Printer: cmd, }) @@ -189,7 +189,7 @@ func setupStackCommand() *cobraext.Command { return cobraext.FlagParsingError(err, cobraext.StackVersionFlagName) } - err = provider.Update(stack.Options{ + err = provider.Update(cmd.Context(), stack.Options{ StackVersion: stackVersion, Profile: profile, Printer: cmd, @@ -255,7 +255,7 @@ func setupStackCommand() *cobraext.Command { return err } - target, err := provider.Dump(stack.DumpOptions{ + target, err := provider.Dump(cmd.Context(), stack.DumpOptions{ Output: output, Profile: profile, }) @@ -286,7 +286,7 @@ func setupStackCommand() *cobraext.Command { return err } - servicesStatus, err := provider.Status(stack.Options{ + servicesStatus, err := provider.Status(cmd.Context(), stack.Options{ Profile: profile, Printer: cmd, }) diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index e38d4511ee..37bf5dd31f 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -181,7 +181,7 @@ func (r *runner) SetUp(ctx context.Context) error { // Run runs the system benchmarks defined under the given folder func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { - return r.run() + return r.run(ctx) } func (r *runner) TearDown(ctx context.Context) error { @@ -398,7 +398,7 @@ func (r *runner) wipeDataStreamOnSetup() error { return r.deleteDataStreamDocs(r.runtimeDataStream) } -func (r *runner) run() (report reporters.Reportable, err error) { +func (r *runner) run(ctx context.Context) (report reporters.Reportable, err error) { r.startMetricsColletion() defer r.mcollector.stop() @@ -433,7 +433,7 @@ func (r *runner) run() (report reporters.Reportable, err error) { return dummy, ErrDryRun } - rallyStats, err := r.runRally() + rallyStats, err := r.runRally(ctx) if err != nil { return nil, err } @@ -861,7 +861,7 @@ func (r *runner) copyCorpusFile(corpusPath, destDir string) (uint64, error) { return corpusDocsCount, nil } -func (r *runner) runRally() ([]rallyStat, error) { +func (r *runner) runRally(ctx context.Context) ([]rallyStat, error) { logger.Debug("running rally...") profileConfig, err := stack.StackInitConfig(r.options.Profile) if err != nil { @@ -870,7 +870,7 @@ func (r *runner) runRally() ([]rallyStat, error) { elasticsearchHost, found := os.LookupEnv(stack.ElasticsearchHostEnv) if !found { - status, err := stack.Status(stack.Options{Profile: r.options.Profile}) + status, err := stack.Status(ctx, stack.Options{Profile: r.options.Profile}) if err != nil { return nil, fmt.Errorf("failed to check status of stack in current profile: %w", err) } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 259796e510..cc4a27221c 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -57,11 +57,11 @@ type runner struct { corporaFile string // Execution order of following handlers is defined in runner.TearDown() method. - deletePolicyHandler func() error - resetAgentPolicyHandler func() error - shutdownServiceHandler func() error - wipeDataStreamHandler func() error - clearCorporaHandler func() error + deletePolicyHandler func(context.Context) error + resetAgentPolicyHandler func(context.Context) error + shutdownServiceHandler func(context.Context) error + wipeDataStreamHandler func(context.Context) error + clearCorporaHandler func(context.Context) error } func NewSystemBenchmark(opts Options) benchrunner.Runner { @@ -90,35 +90,35 @@ func (r *runner) TearDown(ctx context.Context) error { var merr multierror.Error if r.resetAgentPolicyHandler != nil { - if err := r.resetAgentPolicyHandler(); err != nil { + if err := r.resetAgentPolicyHandler(context.Background()); err != nil { merr = append(merr, err) } r.resetAgentPolicyHandler = nil } if r.deletePolicyHandler != nil { - if err := r.deletePolicyHandler(); err != nil { + if err := r.deletePolicyHandler(context.Background()); err != nil { merr = append(merr, err) } r.deletePolicyHandler = nil } if r.shutdownServiceHandler != nil { - if err := r.shutdownServiceHandler(); err != nil { + if err := r.shutdownServiceHandler(context.Background()); err != nil { merr = append(merr, err) } r.shutdownServiceHandler = nil } if r.wipeDataStreamHandler != nil { - if err := r.wipeDataStreamHandler(); err != nil { + if err := r.wipeDataStreamHandler(context.Background()); err != nil { merr = append(merr, err) } r.wipeDataStreamHandler = nil } if r.clearCorporaHandler != nil { - if err := r.clearCorporaHandler(); err != nil { + if err := r.clearCorporaHandler(context.Background()); err != nil { merr = append(merr, err) } r.clearCorporaHandler = nil @@ -199,7 +199,7 @@ func (r *runner) setUp(ctx context.Context) error { r.scenario.Version, ) - r.wipeDataStreamHandler = func() error { + r.wipeDataStreamHandler = func(context.Context) error { logger.Debugf("deleting data in data stream...") if err := r.deleteDataStreamDocs(r.runtimeDataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -257,9 +257,9 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro } r.ctxt = service.Context() - r.shutdownServiceHandler = func() error { + r.shutdownServiceHandler = func(ctx context.Context) error { logger.Debug("tearing down service...") - if err := service.TearDown(); err != nil { + if err := service.TearDown(ctx); err != nil { return fmt.Errorf("error tearing down service: %w", err) } @@ -285,7 +285,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro // Signal to the service that the agent is ready (policy is assigned). if r.scenario.Corpora.InputService != nil && r.scenario.Corpora.InputService.Signal != "" { - if err = service.Signal(r.scenario.Corpora.InputService.Signal); err != nil { + if err = service.Signal(ctx, r.scenario.Corpora.InputService.Signal); err != nil { return nil, fmt.Errorf("failed to notify benchmark service: %w", err) } } @@ -376,7 +376,7 @@ func (r *runner) createBenchmarkPolicy(pkgManifest *packages.PackageManifest) (* return nil, err } - r.deletePolicyHandler = func() error { + r.deletePolicyHandler = func(ctx context.Context) error { var merr multierror.Error logger.Debug("deleting benchmark package policy...") @@ -607,7 +607,7 @@ func (r *runner) runGenerator(destDir string) error { } r.corporaFile = f.Name() - r.clearCorporaHandler = func() error { + r.clearCorporaHandler = func(context.Context) error { return os.Remove(r.corporaFile) } @@ -677,7 +677,7 @@ func (r *runner) enrollAgents(ctx context.Context) error { return fmt.Errorf("can't check enrolled agents: %w", err) } - handlers := make([]func() error, len(agents)) + handlers := make([]func(context.Context) error, len(agents)) for i, agent := range agents { origPolicy := kibana.Policy{ ID: agent.PolicyID, @@ -685,7 +685,7 @@ func (r *runner) enrollAgents(ctx context.Context) error { } // Assign policy to agent - handlers[i] = func() error { + handlers[i] = func(ctx context.Context) error { logger.Debug("reassigning original policy back to agent...") if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { return fmt.Errorf("error reassigning original policy to agent %s: %w", agent.ID, err) @@ -704,10 +704,10 @@ func (r *runner) enrollAgents(ctx context.Context) error { } } - r.resetAgentPolicyHandler = func() error { + r.resetAgentPolicyHandler = func(ctx context.Context) error { var merr multierror.Error for _, h := range handlers { - if err := h(); err != nil { + if err := h(ctx); err != nil { merr = append(merr, err) } } diff --git a/internal/compose/compose.go b/internal/compose/compose.go index c1f990437f..511bd02ad2 100644 --- a/internal/compose/compose.go +++ b/internal/compose/compose.go @@ -195,7 +195,8 @@ func NewProject(name string, paths ...string) (*Project, error) { c.dockerComposeStandalone = c.dockerComposeStandaloneRequired() } - ver, err := c.dockerComposeVersion() + // Passing a nil context here because we are on initialization. + ver, err := c.dockerComposeVersion(context.Background()) if err != nil { logger.Errorf("Unable to determine Docker Compose version: %v. Defaulting to 1.x", err) c.dockerComposeV1 = true @@ -226,7 +227,7 @@ func NewProject(name string, paths ...string) (*Project, error) { } // Up brings up a Docker Compose project. -func (p *Project) Up(opts CommandOptions) error { +func (p *Project) Up(ctx context.Context, opts CommandOptions) error { args := p.baseArgs() args = append(args, "up") if p.disablePullProgressInformation { @@ -235,7 +236,7 @@ func (p *Project) Up(opts CommandOptions) error { args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env}); err != nil { return fmt.Errorf("running Docker Compose up command failed: %w", err) } @@ -243,12 +244,12 @@ func (p *Project) Up(opts CommandOptions) error { } // Down tears down a Docker Compose project. -func (p *Project) Down(opts CommandOptions) error { +func (p *Project) Down(ctx context.Context, opts CommandOptions) error { args := p.baseArgs() args = append(args, "down") args = append(args, opts.ExtraArgs...) - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env}); err != nil { return fmt.Errorf("running Docker Compose down command failed: %w", err) } @@ -256,13 +257,13 @@ func (p *Project) Down(opts CommandOptions) error { } // Build builds a Docker Compose project. -func (p *Project) Build(opts CommandOptions) error { +func (p *Project) Build(ctx context.Context, opts CommandOptions) error { args := p.baseArgs() args = append(args, "build") args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env}); err != nil { return fmt.Errorf("running Docker Compose build command failed: %w", err) } @@ -270,13 +271,13 @@ func (p *Project) Build(opts CommandOptions) error { } // Kill sends a signal to a service container. -func (p *Project) Kill(opts CommandOptions) error { +func (p *Project) Kill(ctx context.Context, opts CommandOptions) error { args := p.baseArgs() args = append(args, "kill") args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env}); err != nil { return fmt.Errorf("running Docker Compose kill command failed: %w", err) } @@ -284,14 +285,14 @@ func (p *Project) Kill(opts CommandOptions) error { } // Config returns the combined configuration for a Docker Compose project. -func (p *Project) Config(opts CommandOptions) (*Config, error) { +func (p *Project) Config(ctx context.Context, opts CommandOptions) (*Config, error) { args := p.baseArgs() args = append(args, "config") args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) var b bytes.Buffer - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { return nil, err } @@ -304,7 +305,7 @@ func (p *Project) Config(opts CommandOptions) (*Config, error) { } // Pull pulls down images for a Docker Compose project. -func (p *Project) Pull(opts CommandOptions) error { +func (p *Project) Pull(ctx context.Context, opts CommandOptions) error { args := p.baseArgs() args = append(args, "pull") if p.disablePullProgressInformation { @@ -313,7 +314,7 @@ func (p *Project) Pull(opts CommandOptions) error { args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env}); err != nil { return fmt.Errorf("running Docker Compose pull command failed: %w", err) } @@ -321,14 +322,14 @@ func (p *Project) Pull(opts CommandOptions) error { } // Logs returns service logs for the selected service in the Docker Compose project. -func (p *Project) Logs(opts CommandOptions) ([]byte, error) { +func (p *Project) Logs(ctx context.Context, opts CommandOptions) ([]byte, error) { args := p.baseArgs() args = append(args, "logs") args = append(args, opts.ExtraArgs...) args = append(args, opts.Services...) var b bytes.Buffer - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { return nil, err } return b.Bytes(), nil @@ -341,7 +342,7 @@ func (p *Project) WaitForHealthy(ctx context.Context, opts CommandOptions) error args = append(args, "ps", "-a", "-q") var b bytes.Buffer - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { return err } @@ -403,13 +404,13 @@ func (p *Project) WaitForHealthy(ctx context.Context, opts CommandOptions) error } // ServiceExitCode returns true if the specified service is exited with an error. -func (p *Project) ServiceExitCode(service string, opts CommandOptions) (bool, int, error) { +func (p *Project) ServiceExitCode(ctx context.Context, service string, opts CommandOptions) (bool, int, error) { // Read container IDs args := p.baseArgs() args = append(args, "ps", "-a", "-q", service) var b bytes.Buffer - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, env: opts.Env, stdout: &b}); err != nil { return false, -1, err } @@ -460,11 +461,11 @@ type dockerComposeOptions struct { stdout io.Writer } -func (p *Project) runDockerComposeCmd(opts dockerComposeOptions) error { +func (p *Project) runDockerComposeCmd(ctx context.Context, opts dockerComposeOptions) error { name, args := p.dockerComposeBaseCommand() args = append(args, opts.args...) - cmd := exec.Command(name, args...) + cmd := exec.CommandContext(ctx, name, args...) cmd.Env = append(os.Environ(), opts.env...) if logger.IsDebugMode() { @@ -497,14 +498,14 @@ func (p *Project) dockerComposeStandaloneRequired() bool { return true } -func (p *Project) dockerComposeVersion() (*semver.Version, error) { +func (p *Project) dockerComposeVersion(ctx context.Context) (*semver.Version, error) { var b bytes.Buffer args := []string{ "version", "--short", } - if err := p.runDockerComposeCmd(dockerComposeOptions{args: args, stdout: &b}); err != nil { + if err := p.runDockerComposeCmd(ctx, dockerComposeOptions{args: args, stdout: &b}); err != nil { return nil, fmt.Errorf("running Docker Compose version command failed: %w", err) } dcVersion := b.String() diff --git a/internal/kind/kind.go b/internal/kind/kind.go index fe67003331..90644f487d 100644 --- a/internal/kind/kind.go +++ b/internal/kind/kind.go @@ -5,6 +5,7 @@ package kind import ( + "context" "fmt" "github.com/elastic/elastic-package/internal/docker" @@ -20,10 +21,10 @@ const ControlPlaneContainerName = "kind-control-plane" const kindContext = "kind-kind" // VerifyContext function ensures that the kind context is selected. -func VerifyContext() error { +func VerifyContext(ctx context.Context) error { logger.Debug("ensure that kind context is selected") - currentContext, err := kubectl.CurrentContext() + currentContext, err := kubectl.CurrentContext(ctx) if err != nil { return fmt.Errorf("can't read current Kubernetes context: %w", err) } diff --git a/internal/kubectl/kubectl.go b/internal/kubectl/kubectl.go index f3277e766e..7d224148f3 100644 --- a/internal/kubectl/kubectl.go +++ b/internal/kubectl/kubectl.go @@ -6,6 +6,7 @@ package kubectl import ( "bytes" + "context" "fmt" "os/exec" "path/filepath" @@ -16,8 +17,8 @@ import ( const kustomizationFile = "kustomization.yaml" // CurrentContext function returns the selected Kubernetes context. -func CurrentContext() (string, error) { - cmd := exec.Command("kubectl", "config", "current-context") +func CurrentContext(ctx context.Context) (string, error) { + cmd := exec.CommandContext(ctx, "kubectl", "config", "current-context") errOutput := new(bytes.Buffer) cmd.Stderr = errOutput @@ -29,7 +30,7 @@ func CurrentContext() (string, error) { return string(bytes.TrimSpace(output)), nil } -func modifyKubernetesResources(action string, definitionPaths []string) ([]byte, error) { +func modifyKubernetesResources(ctx context.Context, action string, definitionPaths []string) ([]byte, error) { args := []string{action} for _, definitionPath := range definitionPaths { if filepath.Base(definitionPath) == kustomizationFile { @@ -44,7 +45,7 @@ func modifyKubernetesResources(action string, definitionPaths []string) ([]byte, args = append(args, "-o", "yaml") } - cmd := exec.Command("kubectl", args...) + cmd := exec.CommandContext(ctx, "kubectl", args...) errOutput := new(bytes.Buffer) cmd.Stderr = errOutput @@ -58,9 +59,9 @@ func modifyKubernetesResources(action string, definitionPaths []string) ([]byte, // applyKubernetesResourcesStdin applies a Kubernetes manifest provided as stdin. // It returns the resources created as output and an error -func applyKubernetesResourcesStdin(input []byte) ([]byte, error) { +func applyKubernetesResourcesStdin(ctx context.Context, input []byte) ([]byte, error) { // create kubectl apply command - kubectlCmd := exec.Command("kubectl", "apply", "-f", "-", "-o", "yaml") + kubectlCmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", "-", "-o", "yaml") //Stdin of kubectl command is the manifest provided kubectlCmd.Stdin = bytes.NewReader(input) errOutput := new(bytes.Buffer) diff --git a/internal/kubectl/kubectl_apply.go b/internal/kubectl/kubectl_apply.go index cee7c05ddf..460b72b2fd 100644 --- a/internal/kubectl/kubectl_apply.go +++ b/internal/kubectl/kubectl_apply.go @@ -5,6 +5,7 @@ package kubectl import ( + "context" "fmt" "strings" "time" @@ -69,9 +70,9 @@ func (c condition) String() string { } // Apply function adds resources to the Kubernetes cluster based on provided definitions. -func Apply(definitionsPath []string) error { +func Apply(ctx context.Context, definitionsPath []string) error { logger.Debugf("Apply Kubernetes custom definitions") - out, err := modifyKubernetesResources("apply", definitionsPath) + out, err := modifyKubernetesResources(ctx, "apply", definitionsPath) if err != nil { return fmt.Errorf("can't modify Kubernetes resources (apply): %w", err) } @@ -85,9 +86,9 @@ func Apply(definitionsPath []string) error { } // ApplyStdin function adds resources to the Kubernetes cluster based on provided stdin. -func ApplyStdin(input []byte) error { +func ApplyStdin(ctx context.Context, input []byte) error { logger.Debugf("Apply Kubernetes stdin") - out, err := applyKubernetesResourcesStdin(input) + out, err := applyKubernetesResourcesStdin(ctx, input) if err != nil { return fmt.Errorf("can't modify Kubernetes resources (apply stdin): %w", err) } diff --git a/internal/kubectl/kubectl_delete.go b/internal/kubectl/kubectl_delete.go index 2116d78cb1..6f3c8ea72f 100644 --- a/internal/kubectl/kubectl_delete.go +++ b/internal/kubectl/kubectl_delete.go @@ -4,8 +4,10 @@ package kubectl +import "context" + // Delete function removes resources from the Kubernetes cluster based on provided definitions. -func Delete(definitionsPath []string) error { - _, err := modifyKubernetesResources("delete", definitionsPath) +func Delete(ctx context.Context, definitionsPath []string) error { + _, err := modifyKubernetesResources(ctx, "delete", definitionsPath) return err } diff --git a/internal/service/boot.go b/internal/service/boot.go index 95a06459eb..5d7ce98a53 100644 --- a/internal/service/boot.go +++ b/internal/service/boot.go @@ -70,7 +70,7 @@ func BootUp(ctx context.Context, options Options) error { // Tear down the service fmt.Println("Take down the service") - err = deployed.TearDown() + err = deployed.TearDown(ctx) if err != nil { return fmt.Errorf("can't tear down the service: %w", err) } diff --git a/internal/servicedeployer/compose.go b/internal/servicedeployer/compose.go index d38787d421..e73625cebb 100644 --- a/internal/servicedeployer/compose.go +++ b/internal/servicedeployer/compose.go @@ -86,14 +86,14 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt Service d.variant.Env...), ExtraArgs: []string{"--build", "-d"}, } - err = p.Up(opts) + err = p.Up(ctx, opts) if err != nil { return nil, fmt.Errorf("could not boot up service using Docker Compose: %w", err) } err = p.WaitForHealthy(ctx, opts) if err != nil { - processServiceContainerLogs(p, compose.CommandOptions{ + processServiceContainerLogs(ctx, p, compose.CommandOptions{ Env: opts.Env, }, outCtxt.Name) return nil, fmt.Errorf("service is unhealthy: %w", err) @@ -109,7 +109,7 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt Service } logger.Debugf("adding service container %s internal ports to context", p.ContainerName(serviceName)) - serviceComposeConfig, err := p.Config(compose.CommandOptions{ + serviceComposeConfig, err := p.Config(ctx, compose.CommandOptions{ Env: []string{fmt.Sprintf("%s=%s", serviceLogsDirEnv, outCtxt.Logs.Folder.Local)}, }) if err != nil { @@ -133,7 +133,7 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt Service } // Signal sends a signal to the service. -func (s *dockerComposeDeployedService) Signal(signal string) error { +func (s *dockerComposeDeployedService) Signal(ctx context.Context, signal string) error { p, err := compose.NewProject(s.project, s.ymlPaths...) if err != nil { return fmt.Errorf("could not create Docker Compose project for service: %w", err) @@ -149,7 +149,7 @@ func (s *dockerComposeDeployedService) Signal(signal string) error { opts.Services = append(opts.Services, s.ctxt.Name) } - err = p.Kill(opts) + err = p.Kill(ctx, opts) if err != nil { return fmt.Errorf("could not send %q signal: %w", signal, err) } @@ -157,7 +157,7 @@ func (s *dockerComposeDeployedService) Signal(signal string) error { } // ExitCode returns true if the service is exited and its exit code. -func (s *dockerComposeDeployedService) ExitCode(service string) (bool, int, error) { +func (s *dockerComposeDeployedService) ExitCode(ctx context.Context, service string) (bool, int, error) { p, err := compose.NewProject(s.project, s.ymlPaths...) if err != nil { return false, -1, fmt.Errorf("could not create Docker Compose project for service: %w", err) @@ -169,11 +169,11 @@ func (s *dockerComposeDeployedService) ExitCode(service string) (bool, int, erro s.variant.Env...), } - return p.ServiceExitCode(service, opts) + return p.ServiceExitCode(ctx, service, opts) } // TearDown tears down the service. -func (s *dockerComposeDeployedService) TearDown() error { +func (s *dockerComposeDeployedService) TearDown(ctx context.Context) error { logger.Debugf("tearing down service using Docker Compose runner") defer func() { err := files.RemoveContent(s.ctxt.Logs.Folder.Local) @@ -196,9 +196,9 @@ func (s *dockerComposeDeployedService) TearDown() error { s.env, s.variant.Env...), } - processServiceContainerLogs(p, opts, s.ctxt.Name) + processServiceContainerLogs(ctx, p, opts, s.ctxt.Name) - if err := p.Down(compose.CommandOptions{ + if err := p.Down(ctx, compose.CommandOptions{ Env: opts.Env, ExtraArgs: []string{"--volumes"}, // Remove associated volumes. }); err != nil { @@ -218,8 +218,8 @@ func (s *dockerComposeDeployedService) SetContext(ctxt ServiceContext) error { return nil } -func processServiceContainerLogs(p *compose.Project, opts compose.CommandOptions, serviceName string) { - content, err := p.Logs(opts) +func processServiceContainerLogs(ctx context.Context, p *compose.Project, opts compose.CommandOptions, serviceName string) { + content, err := p.Logs(ctx, opts) if err != nil { logger.Errorf("can't export service logs: %v", err) return diff --git a/internal/servicedeployer/custom_agent.go b/internal/servicedeployer/custom_agent.go index f1c04ad338..587dd3924f 100644 --- a/internal/servicedeployer/custom_agent.go +++ b/internal/servicedeployer/custom_agent.go @@ -111,7 +111,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) Env: env, ExtraArgs: []string{"--build", "-d"}, } - err = p.Up(opts) + err = p.Up(ctx, opts) if err != nil { return nil, fmt.Errorf("could not boot up service using Docker Compose: %w", err) } @@ -124,7 +124,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) err = p.WaitForHealthy(ctx, opts) if err != nil { - processServiceContainerLogs(p, compose.CommandOptions{ + processServiceContainerLogs(ctx, p, compose.CommandOptions{ Env: opts.Env, }, outCtxt.Name) return nil, fmt.Errorf("service is unhealthy: %w", err) @@ -134,7 +134,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) outCtxt.Hostname = p.ContainerName(serviceName) logger.Debugf("adding service container %s internal ports to context", p.ContainerName(serviceName)) - serviceComposeConfig, err := p.Config(compose.CommandOptions{Env: env}) + serviceComposeConfig, err := p.Config(ctx, compose.CommandOptions{Env: env}) if err != nil { return nil, fmt.Errorf("could not get Docker Compose configuration for service: %w", err) } diff --git a/internal/servicedeployer/deployed_service.go b/internal/servicedeployer/deployed_service.go index dc6822022c..8c9d302f11 100644 --- a/internal/servicedeployer/deployed_service.go +++ b/internal/servicedeployer/deployed_service.go @@ -4,17 +4,20 @@ package servicedeployer -import "errors" +import ( + "context" + "errors" +) var ErrNotSupported error = errors.New("not supported") // DeployedService defines the interface for interacting with a service that has been deployed. type DeployedService interface { // TearDown implements the logic for tearing down a service. - TearDown() error + TearDown(context.Context) error // Signal sends a signal to the service. - Signal(signal string) error + Signal(ctx context.Context, signal string) error // Context returns the current context from the service. Context() ServiceContext @@ -23,5 +26,5 @@ type DeployedService interface { SetContext(str ServiceContext) error // ExitCode returns true if the service is exited and its exit code. - ExitCode(service string) (bool, int, error) + ExitCode(ctx context.Context, service string) (bool, int, error) } diff --git a/internal/servicedeployer/kubernetes.go b/internal/servicedeployer/kubernetes.go index 913cd93391..cff27920d1 100644 --- a/internal/servicedeployer/kubernetes.go +++ b/internal/servicedeployer/kubernetes.go @@ -36,7 +36,7 @@ type kubernetesDeployedService struct { definitionsDir string } -func (s kubernetesDeployedService) TearDown() error { +func (s kubernetesDeployedService) TearDown(ctx context.Context) error { logger.Debugf("uninstall custom Kubernetes definitions (directory: %s)", s.definitionsDir) definitionPaths, err := findKubernetesDefinitions(s.definitionsDir) @@ -49,18 +49,18 @@ func (s kubernetesDeployedService) TearDown() error { return nil } - err = kubectl.Delete(definitionPaths) + err = kubectl.Delete(ctx, definitionPaths) if err != nil { return fmt.Errorf("can't uninstall Kubernetes resources (path: %s): %w", s.definitionsDir, err) } return nil } -func (s kubernetesDeployedService) Signal(_ string) error { +func (s kubernetesDeployedService) Signal(_ context.Context, _ string) error { return ErrNotSupported } -func (s kubernetesDeployedService) ExitCode(_ string) (bool, int, error) { +func (s kubernetesDeployedService) ExitCode(_ context.Context, _ string) (bool, int, error) { return false, -1, ErrNotSupported } @@ -87,7 +87,7 @@ func NewKubernetesServiceDeployer(profile *profile.Profile, definitionsPath stri // SetUp function links the kind container with elastic-package-stack network, installs Elastic-Agent and optionally // custom YAML definitions. func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceContext) (DeployedService, error) { - err := kind.VerifyContext() + err := kind.VerifyContext(ctx) if err != nil { return nil, fmt.Errorf("kind context verification failed: %w", err) } @@ -97,12 +97,12 @@ func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceC return nil, fmt.Errorf("can't connect control plane to Elastic stack network: %w", err) } - err = installElasticAgentInCluster(ksd.profile, ksd.stackVersion) + err = installElasticAgentInCluster(ctx, ksd.profile, ksd.stackVersion) if err != nil { return nil, fmt.Errorf("can't install Elastic-Agent in the Kubernetes cluster: %w", err) } - err = ksd.installCustomDefinitions() + err = ksd.installCustomDefinitions(ctx) if err != nil { return nil, fmt.Errorf("can't install custom definitions in the Kubernetes cluster: %w", err) } @@ -118,7 +118,7 @@ func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceC }, nil } -func (ksd KubernetesServiceDeployer) installCustomDefinitions() error { +func (ksd KubernetesServiceDeployer) installCustomDefinitions(ctx context.Context) error { logger.Debugf("install custom Kubernetes definitions (directory: %s)", ksd.definitionsDir) definitionPaths, err := findKubernetesDefinitions(ksd.definitionsDir) @@ -131,7 +131,7 @@ func (ksd KubernetesServiceDeployer) installCustomDefinitions() error { return nil } - err = kubectl.Apply(definitionPaths) + err = kubectl.Apply(ctx, definitionPaths) if err != nil { return fmt.Errorf("can't install custom definitions: %w", err) } @@ -151,7 +151,7 @@ func findKubernetesDefinitions(definitionsDir string) ([]string, error) { return definitionPaths, nil } -func installElasticAgentInCluster(profile *profile.Profile, stackVersion string) error { +func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, stackVersion string) error { logger.Debug("install Elastic Agent in the Kubernetes cluster") elasticAgentManagedYaml, err := getElasticAgentYAML(profile, stackVersion) @@ -159,7 +159,7 @@ func installElasticAgentInCluster(profile *profile.Profile, stackVersion string) return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err) } - err = kubectl.ApplyStdin(elasticAgentManagedYaml) + err = kubectl.ApplyStdin(ctx, elasticAgentManagedYaml) if err != nil { return fmt.Errorf("can't install Elastic-Agent in Kubernetes cluster: %w", err) } diff --git a/internal/servicedeployer/terraform.go b/internal/servicedeployer/terraform.go index 9b27596147..fead3b69f2 100644 --- a/internal/servicedeployer/terraform.go +++ b/internal/servicedeployer/terraform.go @@ -128,7 +128,7 @@ func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceCo Env: service.env, } // Set custom aliases, which may be used in agent policies. - serviceComposeConfig, err := p.Config(opts) + serviceComposeConfig, err := p.Config(ctx, opts) if err != nil { return nil, fmt.Errorf("could not get Docker Compose configuration for service: %w", err) } @@ -142,14 +142,14 @@ func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceCo Env: service.env, ExtraArgs: []string{"--build", "-d"}, } - err = p.Up(opts) + err = p.Up(ctx, opts) if err != nil { return nil, fmt.Errorf("could not boot up service using Docker Compose: %w", err) } err = p.WaitForHealthy(ctx, opts) if err != nil { - processServiceContainerLogs(p, compose.CommandOptions{ + processServiceContainerLogs(ctx, p, compose.CommandOptions{ Env: opts.Env, }, outCtxt.Name) //lint:ignore ST1005 error starting with product name can be capitalized diff --git a/internal/stack/boot.go b/internal/stack/boot.go index c4f66457eb..7ac853a065 100644 --- a/internal/stack/boot.go +++ b/internal/stack/boot.go @@ -5,6 +5,8 @@ package stack import ( + "context" + "errors" "fmt" "os" "path/filepath" @@ -30,7 +32,7 @@ func DockerComposeProjectName(profile *profile.Profile) string { } // BootUp function boots up the Elastic stack. -func BootUp(options Options) error { +func BootUp(ctx context.Context, options Options) error { // Print information before starting the stack, for cases where // this is executed in the foreground, without daemon mode. config := Config{ @@ -78,12 +80,12 @@ func BootUp(options Options) error { return fmt.Errorf("creating stack files failed: %w", err) } - err = dockerComposeBuild(options) + err = dockerComposeBuild(ctx, options) if err != nil { return fmt.Errorf("building docker images failed: %w", err) } - err = dockerComposeUp(options) + err = dockerComposeUp(ctx, options) if err != nil { // At least starting on 8.6.0, fleet-server may be reconfigured or // restarted after being healthy. If elastic-agent tries to enroll at @@ -91,11 +93,11 @@ func BootUp(options Options) error { // to fail too. // As a workaround, try to give another chance to docker-compose if only // elastic-agent failed. - if onlyElasticAgentFailed(options) { + if onlyElasticAgentFailed(ctx, options) && !errors.Is(err, context.Canceled) { sleepTime := 10 * time.Second fmt.Printf("Elastic Agent failed to start, trying again in %s.\n", sleepTime) time.Sleep(sleepTime) - err = dockerComposeUp(options) + err = dockerComposeUp(ctx, options) } if err != nil { return fmt.Errorf("running docker-compose failed: %w", err) @@ -110,8 +112,8 @@ func BootUp(options Options) error { return nil } -func onlyElasticAgentFailed(options Options) bool { - status, err := Status(options) +func onlyElasticAgentFailed(ctx context.Context, options Options) bool { + status, err := Status(ctx, options) if err != nil { fmt.Printf("Failed to check status of the stack after failure: %v\n", err) return false @@ -130,8 +132,8 @@ func onlyElasticAgentFailed(options Options) bool { } // TearDown function takes down the testing stack. -func TearDown(options Options) error { - err := dockerComposeDown(options) +func TearDown(ctx context.Context, options Options) error { + err := dockerComposeDown(ctx, options) if err != nil { return fmt.Errorf("stopping docker containers failed: %w", err) } diff --git a/internal/stack/clients.go b/internal/stack/clients.go index 04a35708f0..39546e6a71 100644 --- a/internal/stack/clients.go +++ b/internal/stack/clients.go @@ -5,6 +5,7 @@ package stack import ( + "context" "errors" "fmt" "os" @@ -45,7 +46,8 @@ func NewElasticsearchClientFromProfile(profile *profile.Profile, customOptions . elasticsearchHost, found := os.LookupEnv(ElasticsearchHostEnv) if !found { - status, err := Status(Options{Profile: profile}) + // Using backgound context on initial call to avoid context cancellation. + status, err := Status(context.Background(), Options{Profile: profile}) if err != nil { return nil, fmt.Errorf("failed to check status of stack in current profile: %w", err) } @@ -109,7 +111,8 @@ func NewKibanaClientFromProfile(profile *profile.Profile, customOptions ...kiban kibanaHost, found := os.LookupEnv(KibanaHostEnv) if !found { - status, err := Status(Options{Profile: profile}) + // Using backgound context on initial call to avoid context cancellation. + status, err := Status(context.Background(), Options{Profile: profile}) if err != nil { return nil, fmt.Errorf("failed to check status of stack in current profile: %w", err) } diff --git a/internal/stack/compose.go b/internal/stack/compose.go index 828d052f24..e7e3b47b5f 100644 --- a/internal/stack/compose.go +++ b/internal/stack/compose.go @@ -5,6 +5,7 @@ package stack import ( + "context" "fmt" "strings" @@ -51,7 +52,7 @@ func (eb *envBuilder) build() []string { return eb.vars } -func dockerComposeBuild(options Options) error { +func dockerComposeBuild(ctx context.Context, options Options) error { c, err := compose.NewProject(DockerComposeProjectName(options.Profile), options.Profile.Path(profileStackPath, SnapshotFile)) if err != nil { return fmt.Errorf("could not create docker compose project: %w", err) @@ -71,13 +72,13 @@ func dockerComposeBuild(options Options) error { Services: withIsReadyServices(withDependentServices(options.Services)), } - if err := c.Build(opts); err != nil { + if err := c.Build(ctx, opts); err != nil { return fmt.Errorf("running command failed: %w", err) } return nil } -func dockerComposePull(options Options) error { +func dockerComposePull(ctx context.Context, options Options) error { c, err := compose.NewProject(DockerComposeProjectName(options.Profile), options.Profile.Path(profileStackPath, SnapshotFile)) if err != nil { return fmt.Errorf("could not create docker compose project: %w", err) @@ -97,13 +98,13 @@ func dockerComposePull(options Options) error { Services: withIsReadyServices(withDependentServices(options.Services)), } - if err := c.Pull(opts); err != nil { + if err := c.Pull(ctx, opts); err != nil { return fmt.Errorf("running command failed: %w", err) } return nil } -func dockerComposeUp(options Options) error { +func dockerComposeUp(ctx context.Context, options Options) error { c, err := compose.NewProject(DockerComposeProjectName(options.Profile), options.Profile.Path(profileStackPath, SnapshotFile)) if err != nil { return fmt.Errorf("could not create docker compose project: %w", err) @@ -129,13 +130,13 @@ func dockerComposeUp(options Options) error { Services: withIsReadyServices(withDependentServices(options.Services)), } - if err := c.Up(opts); err != nil { + if err := c.Up(ctx, opts); err != nil { return fmt.Errorf("running command failed: %w", err) } return nil } -func dockerComposeDown(options Options) error { +func dockerComposeDown(ctx context.Context, options Options) error { c, err := compose.NewProject(DockerComposeProjectName(options.Profile), options.Profile.Path(profileStackPath, SnapshotFile)) if err != nil { return fmt.Errorf("could not create docker compose project: %w", err) @@ -155,7 +156,7 @@ func dockerComposeDown(options Options) error { // Remove associated volumes. ExtraArgs: []string{"--volumes", "--remove-orphans"}, } - if err := c.Down(downOptions); err != nil { + if err := c.Down(ctx, downOptions); err != nil { return fmt.Errorf("running command failed: %w", err) } return nil @@ -182,7 +183,7 @@ func withIsReadyServices(services []string) []string { return allServices } -func dockerComposeStatus(options Options) ([]ServiceStatus, error) { +func dockerComposeStatus(ctx context.Context, options Options) ([]ServiceStatus, error) { var services []ServiceStatus // query directly to docker to avoid load environment variables (e.g. STACK_VERSION_VARIANT) and profiles containerIDs, err := docker.ContainerIDsWithLabel(projectLabelDockerCompose, DockerComposeProjectName(options.Profile)) diff --git a/internal/stack/dump.go b/internal/stack/dump.go index cb53615740..ff92703a85 100644 --- a/internal/stack/dump.go +++ b/internal/stack/dump.go @@ -5,6 +5,7 @@ package stack import ( + "context" "fmt" "os" "path/filepath" @@ -25,17 +26,17 @@ type DumpOptions struct { } // Dump function exports stack data and dumps them as local artifacts, which can be used for debug purposes. -func Dump(options DumpOptions) (string, error) { +func Dump(ctx context.Context, options DumpOptions) (string, error) { logger.Debugf("Dump Elastic stack data") - err := dumpStackLogs(options) + err := dumpStackLogs(ctx, options) if err != nil { return "", fmt.Errorf("can't dump Elastic stack logs: %w", err) } return options.Output, nil } -func dumpStackLogs(options DumpOptions) error { +func dumpStackLogs(ctx context.Context, options DumpOptions) error { logger.Debugf("Dump stack logs (location: %s)", options.Output) err := os.RemoveAll(options.Output) if err != nil { @@ -56,7 +57,7 @@ func dumpStackLogs(options DumpOptions) error { for _, serviceName := range services { logger.Debugf("Dump stack logs for %s", serviceName) - content, err := dockerComposeLogs(serviceName, options.Profile) + content, err := dockerComposeLogs(ctx, serviceName, options.Profile) if err != nil { logger.Errorf("can't fetch service logs (service: %s): %v", serviceName, err) } else { diff --git a/internal/stack/logs.go b/internal/stack/logs.go index fb31f46706..9088eda4ea 100644 --- a/internal/stack/logs.go +++ b/internal/stack/logs.go @@ -5,6 +5,7 @@ package stack import ( + "context" "fmt" "path/filepath" @@ -14,7 +15,7 @@ import ( "github.com/elastic/elastic-package/internal/profile" ) -func dockerComposeLogs(serviceName string, profile *profile.Profile) ([]byte, error) { +func dockerComposeLogs(ctx context.Context, serviceName string, profile *profile.Profile) ([]byte, error) { appConfig, err := install.Configuration() if err != nil { return nil, fmt.Errorf("can't read application configuration: %w", err) @@ -36,7 +37,7 @@ func dockerComposeLogs(serviceName string, profile *profile.Profile) ([]byte, er Services: []string{serviceName}, } - out, err := p.Logs(opts) + out, err := p.Logs(ctx, opts) if err != nil { return nil, fmt.Errorf("running command failed: %w", err) } diff --git a/internal/stack/providers.go b/internal/stack/providers.go index 730ad7cedb..8fda021e5e 100644 --- a/internal/stack/providers.go +++ b/internal/stack/providers.go @@ -5,6 +5,7 @@ package stack import ( + "context" "fmt" "strings" @@ -34,19 +35,19 @@ type Printer interface { // Provider is the implementation of a stack provider. type Provider interface { // BootUp starts a stack. - BootUp(Options) error + BootUp(context.Context, Options) error // TearDown stops and/or removes a stack. - TearDown(Options) error + TearDown(context.Context, Options) error // Update updates resources associated to a stack. - Update(Options) error + Update(context.Context, Options) error // Dump dumps data for debug purpouses. - Dump(DumpOptions) (string, error) + Dump(context.Context, DumpOptions) (string, error) // Status obtains status information of the stack. - Status(Options) ([]ServiceStatus, error) + Status(context.Context, Options) ([]ServiceStatus, error) } // BuildProvider returns the provider for the given name. @@ -62,22 +63,22 @@ func BuildProvider(name string, profile *profile.Profile) (Provider, error) { type composeProvider struct{} -func (*composeProvider) BootUp(options Options) error { - return BootUp(options) +func (*composeProvider) BootUp(ctx context.Context, options Options) error { + return BootUp(ctx, options) } -func (*composeProvider) TearDown(options Options) error { - return TearDown(options) +func (*composeProvider) TearDown(ctx context.Context, options Options) error { + return TearDown(ctx, options) } -func (*composeProvider) Update(options Options) error { - return Update(options) +func (*composeProvider) Update(ctx context.Context, options Options) error { + return Update(ctx, options) } -func (*composeProvider) Dump(options DumpOptions) (string, error) { - return Dump(options) +func (*composeProvider) Dump(ctx context.Context, options DumpOptions) (string, error) { + return Dump(ctx, options) } -func (*composeProvider) Status(options Options) ([]ServiceStatus, error) { - return Status(options) +func (*composeProvider) Status(ctx context.Context, options Options) ([]ServiceStatus, error) { + return Status(ctx, options) } diff --git a/internal/stack/serverless.go b/internal/stack/serverless.go index 7fcf5b726d..3c6bc31a68 100644 --- a/internal/stack/serverless.go +++ b/internal/stack/serverless.go @@ -235,7 +235,7 @@ func newServerlessProvider(profile *profile.Profile) (*serverlessProvider, error return &serverlessProvider{profile, client, nil, nil}, nil } -func (sp *serverlessProvider) BootUp(options Options) error { +func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error { logger.Warn("Elastic Serverless provider is in technical preview") config, err := LoadConfig(sp.profile) @@ -292,7 +292,7 @@ func (sp *serverlessProvider) BootUp(options Options) error { } logger.Infof("Starting local services") - err = sp.startLocalServices(options, config) + err = sp.startLocalServices(ctx, options, config) if err != nil { return fmt.Errorf("failed to start local services: %w", err) } @@ -318,7 +318,7 @@ func (sp *serverlessProvider) localServicesComposeProject() (*compose.Project, e return compose.NewProject(sp.composeProjectName(), composeFile) } -func (sp *serverlessProvider) startLocalServices(options Options, config Config) error { +func (sp *serverlessProvider) startLocalServices(ctx context.Context, options Options, config Config) error { err := applyServerlessResources(sp.profile, options.StackVersion, config) if err != nil { return fmt.Errorf("could not initialize compose files for local services: %w", err) @@ -329,12 +329,12 @@ func (sp *serverlessProvider) startLocalServices(options Options, config Config) return fmt.Errorf("could not initialize local services compose project") } - err = project.Build(compose.CommandOptions{}) + err = project.Build(ctx, compose.CommandOptions{}) if err != nil { return fmt.Errorf("failed to build images for local services: %w", err) } - err = project.Up(compose.CommandOptions{ExtraArgs: []string{"-d"}}) + err = project.Up(ctx, compose.CommandOptions{ExtraArgs: []string{"-d"}}) if err != nil { // At least starting on 8.6.0, fleet-server may be reconfigured or // restarted after being healthy. If elastic-agent tries to enroll at @@ -343,7 +343,7 @@ func (sp *serverlessProvider) startLocalServices(options Options, config Config) // As a workaround, try to give another chance to docker-compose if only // elastic-agent failed. fmt.Println("Elastic Agent failed to start, trying again.") - err = project.Up(compose.CommandOptions{ExtraArgs: []string{"-d"}}) + err = project.Up(ctx, compose.CommandOptions{ExtraArgs: []string{"-d"}}) if err != nil { return fmt.Errorf("failed to start local agent: %w", err) } @@ -352,7 +352,7 @@ func (sp *serverlessProvider) startLocalServices(options Options, config Config) return nil } -func (sp *serverlessProvider) TearDown(options Options) error { +func (sp *serverlessProvider) TearDown(ctx context.Context, options Options) error { config, err := LoadConfig(sp.profile) if err != nil { return fmt.Errorf("failed to load configuration: %w", err) @@ -360,7 +360,7 @@ func (sp *serverlessProvider) TearDown(options Options) error { var errs error - err = sp.destroyLocalServices() + err = sp.destroyLocalServices(ctx) if err != nil { logger.Errorf("failed to destroy local services: %v", err) errs = fmt.Errorf("failed to destroy local services: %w", err) @@ -384,13 +384,13 @@ func (sp *serverlessProvider) TearDown(options Options) error { return errs } -func (sp *serverlessProvider) destroyLocalServices() error { +func (sp *serverlessProvider) destroyLocalServices(ctx context.Context) error { project, err := sp.localServicesComposeProject() if err != nil { return fmt.Errorf("could not initialize local services compose project") } - err = project.Down(compose.CommandOptions{}) + err = project.Down(ctx, compose.CommandOptions{}) if err != nil { return fmt.Errorf("failed to destroy local services: %w", err) } @@ -398,15 +398,15 @@ func (sp *serverlessProvider) destroyLocalServices() error { return nil } -func (sp *serverlessProvider) Update(options Options) error { +func (sp *serverlessProvider) Update(ctx context.Context, options Options) error { return fmt.Errorf("not implemented") } -func (sp *serverlessProvider) Dump(options DumpOptions) (string, error) { - return Dump(options) +func (sp *serverlessProvider) Dump(ctx context.Context, options DumpOptions) (string, error) { + return Dump(ctx, options) } -func (sp *serverlessProvider) Status(options Options) ([]ServiceStatus, error) { +func (sp *serverlessProvider) Status(ctx context.Context, options Options) ([]ServiceStatus, error) { logger.Warn("Elastic Serverless provider is in technical preview") config, err := LoadConfig(sp.profile) if err != nil { @@ -421,7 +421,6 @@ func (sp *serverlessProvider) Status(options Options) ([]ServiceStatus, error) { return nil, err } - ctx := context.TODO() projectServiceStatus, err := project.Status(ctx, sp.elasticsearchClient, sp.kibanaClient) if err != nil { return nil, err diff --git a/internal/stack/status.go b/internal/stack/status.go index d9683a12a3..c41dc44e16 100644 --- a/internal/stack/status.go +++ b/internal/stack/status.go @@ -5,13 +5,14 @@ package stack import ( + "context" "sort" "strings" ) // Status shows the status for each service -func Status(options Options) ([]ServiceStatus, error) { - servicesStatus, err := dockerComposeStatus(options) +func Status(ctx context.Context, options Options) ([]ServiceStatus, error) { + servicesStatus, err := dockerComposeStatus(ctx, options) if err != nil { return nil, err } diff --git a/internal/stack/update.go b/internal/stack/update.go index 924ead0c43..991622e641 100644 --- a/internal/stack/update.go +++ b/internal/stack/update.go @@ -5,13 +5,14 @@ package stack import ( + "context" "fmt" "github.com/elastic/elastic-package/internal/docker" ) // Update pulls down the most recent versions of the Docker images. -func Update(options Options) error { +func Update(ctx context.Context, options Options) error { err := applyResources(options.Profile, options.StackVersion) if err != nil { return fmt.Errorf("creating stack files failed: %w", err) @@ -22,7 +23,7 @@ func Update(options Options) error { return fmt.Errorf("pulling package-registry docker image failed: %w", err) } - err = dockerComposePull(options) + err = dockerComposePull(ctx, options) if err != nil { return fmt.Errorf("updating docker images failed: %w", err) } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index f85ebdd824..a123a5dc18 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -272,7 +272,7 @@ func (r *runner) run(ctx context.Context) (results []testrunner.TestResult, err defer os.RemoveAll(tempDir) dumpOptions := stack.DumpOptions{Output: tempDir, Profile: r.options.Profile} - _, err = stack.Dump(dumpOptions) + _, err = stack.Dump(ctx, dumpOptions) if err != nil { return nil, fmt.Errorf("dump failed: %w", err) } @@ -515,7 +515,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext serviceContext = service.Context() r.shutdownServiceHandler = func() error { logger.Debug("tearing down service...") - if err := service.TearDown(); err != nil { + if err := service.TearDown(ctx); err != nil { return fmt.Errorf("error tearing down service: %w", err) } @@ -685,7 +685,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext // Signal to the service that the agent is ready (policy is assigned). if config.ServiceNotifySignal != "" { - if err = service.Signal(config.ServiceNotifySignal); err != nil { + if err = service.Signal(ctx, config.ServiceNotifySignal); err != nil { return result.WithError(fmt.Errorf("failed to notify test service: %w", err)) } } @@ -725,7 +725,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext }, waitForDataTimeout) if config.Service != "" && !config.IgnoreServiceError { - exited, code, err := service.ExitCode(config.Service) + exited, code, err := service.ExitCode(ctx, config.Service) if err != nil && !errors.Is(err, servicedeployer.ErrNotSupported) { return result.WithError(err) } diff --git a/main.go b/main.go index b40df33803..b23da74445 100644 --- a/main.go +++ b/main.go @@ -6,6 +6,7 @@ package main import ( "context" + "errors" "log" "os" "os/signal" @@ -26,7 +27,18 @@ func main() { defer cancel() err = rootCmd.ExecuteContext(ctx) + if errIsInterruption(err) { + os.Exit(130) + } if err != nil { os.Exit(1) } } + +func errIsInterruption(err error) bool { + if errors.Is(err, context.Canceled) { + return true + } + + return false +} From 7fe9eea417693e631b5d49938a04d8266e372ab6 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 14 Feb 2024 17:24:49 +0100 Subject: [PATCH 04/32] Context for cleanup handlers --- cmd/root.go | 2 +- internal/benchrunner/runners/rally/runner.go | 37 ++++++++++--------- internal/benchrunner/runners/stream/runner.go | 17 +++++---- internal/benchrunner/runners/system/runner.go | 15 +++++--- internal/compose/compose.go | 2 +- internal/kibana/agents.go | 2 +- internal/serverless/client.go | 11 ++---- internal/stack/serverless.go | 30 +++++++-------- internal/testrunner/runners/system/runner.go | 2 +- internal/version/check_update.go | 4 +- 10 files changed, 64 insertions(+), 58 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index b2b19bc7ae..8557d2c88d 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -82,6 +82,6 @@ func processPersistentFlags(cmd *cobra.Command, args []string) error { } func checkVersionUpdate(cmd *cobra.Command, args []string) error { - version.CheckUpdate() + version.CheckUpdate(cmd.Context()) return nil } diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 37bf5dd31f..f82469cb64 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -164,11 +164,11 @@ type runner struct { reportFile string // Execution order of following handlers is defined in runner.TearDown() method. - persistRallyTrackHandler func() error - removePackageHandler func() error - wipeDataStreamHandler func() error - clearCorporaHandler func() error - clearTrackHandler func() error + persistRallyTrackHandler func(context.Context) error + removePackageHandler func(context.Context) error + wipeDataStreamHandler func(context.Context) error + clearCorporaHandler func(context.Context) error + clearTrackHandler func(context.Context) error } func NewRallyBenchmark(opts Options) benchrunner.Runner { @@ -193,31 +193,34 @@ func (r *runner) TearDown(ctx context.Context) error { } } + // Using nil context to avoid interrupting cleanup operations. + cleanupCtx := context.Background() + var merr multierror.Error if r.persistRallyTrackHandler != nil { - if err := r.persistRallyTrackHandler(); err != nil { + if err := r.persistRallyTrackHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.persistRallyTrackHandler = nil } if r.removePackageHandler != nil { - if err := r.removePackageHandler(); err != nil { + if err := r.removePackageHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.removePackageHandler = nil } if r.wipeDataStreamHandler != nil { - if err := r.wipeDataStreamHandler(); err != nil { + if err := r.wipeDataStreamHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.wipeDataStreamHandler = nil } if r.clearCorporaHandler != nil { - if err := r.clearCorporaHandler(); err != nil { + if err := r.clearCorporaHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.clearCorporaHandler = nil @@ -387,7 +390,7 @@ func (r *runner) extractSimulatedTemplate(indexTemplate string) (string, error) func (r *runner) wipeDataStreamOnSetup() error { // Delete old data logger.Debug("deleting old data in data stream...") - r.wipeDataStreamHandler = func() error { + r.wipeDataStreamHandler = func(context.Context) error { logger.Debugf("deleting data in data stream...") if err := r.deleteDataStreamDocs(r.runtimeDataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -469,7 +472,7 @@ func (r *runner) installPackageFromRegistry(packageName, packageVersion string) return fmt.Errorf("cannot install package %s@%s: %w", packageName, packageVersion, err) } - r.removePackageHandler = func() error { + r.removePackageHandler = func(context.Context) error { logger.Debug("removing benchmark package...") if _, err := r.options.KibanaClient.RemovePackage(packageName, packageVersion); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) @@ -497,7 +500,7 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func() error { + r.removePackageHandler = func(context.Context) error { if err := installer.Uninstall(); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } @@ -713,7 +716,7 @@ func (r *runner) runGenerator(destDir string) (uint64, error) { r.corpusFile = corpusFile.Name() - r.clearCorporaHandler = func() error { + r.clearCorporaHandler = func(context.Context) error { return errors.Join( os.Remove(r.corpusFile), ) @@ -780,7 +783,7 @@ func (r *runner) createRallyTrack(corpusDocsCount uint64, destDir string) error r.reportFile = reportFile.Name() if r.options.RallyTrackOutputDir != "" { - r.persistRallyTrackHandler = func() error { + r.persistRallyTrackHandler = func(context.Context) error { err := os.MkdirAll(r.options.RallyTrackOutputDir, 0755) if err != nil { return fmt.Errorf("cannot not create rally track output dir: %w", err) @@ -804,7 +807,7 @@ func (r *runner) createRallyTrack(corpusDocsCount uint64, destDir string) error } } - r.clearTrackHandler = func() error { + r.clearTrackHandler = func(context.Context) error { return errors.Join( os.Remove(r.trackFile), os.Remove(r.reportFile), @@ -852,7 +855,7 @@ func (r *runner) copyCorpusFile(corpusPath, destDir string) (uint64, error) { r.corpusFile = corpusFile.Name() - r.clearCorporaHandler = func() error { + r.clearCorporaHandler = func(context.Context) error { return errors.Join( os.Remove(r.corpusFile), ) @@ -1181,7 +1184,7 @@ func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, erro case <-retryTicker.C: continue case <-ctx.Done(): - return false, fmt.Errorf("context done: %w", ctx.Err()) + return false, ctx.Err() case <-timeoutTimer.C: return false, nil } diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 998c878ba8..dd6ff51bee 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -50,8 +50,8 @@ type runner struct { done chan struct{} // Execution order of following handlers is defined in runner.TearDown() method. - removePackageHandler func() error - wipeDataStreamHandler func() error + removePackageHandler func(context.Context) error + wipeDataStreamHandler func(context.Context) error } func NewStreamBenchmark(opts Options) benchrunner.Runner { @@ -76,17 +76,20 @@ func (r *runner) TearDown(ctx context.Context) error { return nil } + // Using nil context to avoid interrupting cleanup operations. + cleanupCtx := context.Background() + var merr multierror.Error if r.removePackageHandler != nil { - if err := r.removePackageHandler(); err != nil { + if err := r.removePackageHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.removePackageHandler = nil } if r.wipeDataStreamHandler != nil { - if err := r.wipeDataStreamHandler(); err != nil { + if err := r.wipeDataStreamHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.wipeDataStreamHandler = nil @@ -179,7 +182,7 @@ func (r *runner) setUp(ctx context.Context) error { func (r *runner) wipeDataStreamsOnSetup() error { // Delete old data logger.Debug("deleting old data in data stream...") - r.wipeDataStreamHandler = func() error { + r.wipeDataStreamHandler = func(context.Context) error { logger.Debugf("deleting data in data stream...") for _, runtimeDataStream := range r.runtimeDataStreams { if err := r.deleteDataStreamDocs(runtimeDataStream); err != nil { @@ -232,7 +235,7 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func() error { + r.removePackageHandler = func(context.Context) error { if err := installer.Uninstall(); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } @@ -614,7 +617,7 @@ func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, erro case <-retryTicker.C: continue case <-ctx.Done(): - return false, fmt.Errorf("context done: %w", ctx.Err()) + return false, ctx.Err() case <-timeoutTimer.C: return false, nil } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index cc4a27221c..0c0667ef9d 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -87,38 +87,41 @@ func (r *runner) TearDown(ctx context.Context) error { } + // Using nil context to avoid interrupting cleanup operations. + cleanupCtx := context.Background() + var merr multierror.Error if r.resetAgentPolicyHandler != nil { - if err := r.resetAgentPolicyHandler(context.Background()); err != nil { + if err := r.resetAgentPolicyHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.resetAgentPolicyHandler = nil } if r.deletePolicyHandler != nil { - if err := r.deletePolicyHandler(context.Background()); err != nil { + if err := r.deletePolicyHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.deletePolicyHandler = nil } if r.shutdownServiceHandler != nil { - if err := r.shutdownServiceHandler(context.Background()); err != nil { + if err := r.shutdownServiceHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.shutdownServiceHandler = nil } if r.wipeDataStreamHandler != nil { - if err := r.wipeDataStreamHandler(context.Background()); err != nil { + if err := r.wipeDataStreamHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.wipeDataStreamHandler = nil } if r.clearCorporaHandler != nil { - if err := r.clearCorporaHandler(context.Background()); err != nil { + if err := r.clearCorporaHandler(cleanupCtx); err != nil { merr = append(merr, err) } r.clearCorporaHandler = nil @@ -982,7 +985,7 @@ func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), case <-retryTicker.C: continue case <-ctx.Done(): - return false, fmt.Errorf("context done: %w", ctx.Err()) + return false, ctx.Err() case <-timeoutTicker.C: return false, nil } diff --git a/internal/compose/compose.go b/internal/compose/compose.go index 511bd02ad2..9802798234 100644 --- a/internal/compose/compose.go +++ b/internal/compose/compose.go @@ -394,7 +394,7 @@ func (p *Project) WaitForHealthy(ctx context.Context, opts CommandOptions) error select { case <-ctx.Done(): - return fmt.Errorf("context done: %w", ctx.Err()) + return ctx.Err() // NOTE: using after does not guarantee interval but it's ok for this use case case <-time.After(waitForHealthyInterval): } diff --git a/internal/kibana/agents.go b/internal/kibana/agents.go index 1d8ab0fbcd..421ece0baa 100644 --- a/internal/kibana/agents.go +++ b/internal/kibana/agents.go @@ -109,7 +109,7 @@ func (c *Client) waitUntilPolicyAssigned(ctx context.Context, a Agent, p Policy) logger.Debugf("Wait until the policy (ID: %s, revision: %d) is assigned to the agent (ID: %s)...", p.ID, p.Revision, a.ID) select { case <-ctx.Done(): - return fmt.Errorf("context done: %w", ctx.Err()) + return ctx.Err() case <-ticker.C: continue } diff --git a/internal/serverless/client.go b/internal/serverless/client.go index 1b4a2f0b33..73057cf563 100644 --- a/internal/serverless/client.go +++ b/internal/serverless/client.go @@ -140,7 +140,7 @@ func (c *Client) doRequest(request *http.Request) (int, []byte, error) { return resp.StatusCode, body, nil } -func (c *Client) CreateProject(name, region, projectType string) (*Project, error) { +func (c *Client) CreateProject(ctx context.Context, name, region, projectType string) (*Project, error) { ReqBody := struct { Name string `json:"name"` RegionID string `json:"region_id"` @@ -152,7 +152,6 @@ func (c *Client) CreateProject(name, region, projectType string) (*Project, erro if err != nil { return nil, err } - ctx := context.TODO() resourcePath, err := url.JoinPath(c.host, projectsAPI, projectType) if err != nil { return nil, fmt.Errorf("could not build the URL: %w", err) @@ -259,8 +258,7 @@ func (c *Client) ResetCredentials(ctx context.Context, project *Project) error { return nil } -func (c *Client) DeleteProject(project *Project) error { - ctx := context.TODO() +func (c *Client) DeleteProject(ctx context.Context, project *Project) error { resourcePath, err := url.JoinPath(c.host, projectsAPI, project.Type, project.ID) if err != nil { return fmt.Errorf("could not build the URL: %w", err) @@ -277,8 +275,7 @@ func (c *Client) DeleteProject(project *Project) error { return nil } -func (c *Client) GetProject(projectType, projectID string) (*Project, error) { - ctx := context.TODO() +func (c *Client) GetProject(ctx context.Context, projectType, projectID string) (*Project, error) { resourcePath, err := url.JoinPath(c.host, projectsAPI, projectType, projectID) if err != nil { return nil, fmt.Errorf("could not build the URL: %w", err) @@ -311,7 +308,7 @@ func (c *Client) EnsureEndpoints(ctx context.Context, project *Project) error { } for { - newProject, err := c.GetProject(project.Type, project.ID) + newProject, err := c.GetProject(ctx, project.Type, project.ID) switch { case err != nil: logger.Debugf("request error: %s", err.Error()) diff --git a/internal/stack/serverless.go b/internal/stack/serverless.go index 3c6bc31a68..2ae860a40e 100644 --- a/internal/stack/serverless.go +++ b/internal/stack/serverless.go @@ -59,13 +59,13 @@ type projectSettings struct { SelfMonitor bool } -func (sp *serverlessProvider) createProject(settings projectSettings, options Options, conf Config) (Config, error) { - project, err := sp.client.CreateProject(settings.Name, settings.Region, settings.Type) +func (sp *serverlessProvider) createProject(ctx context.Context, settings projectSettings, options Options, conf Config) (Config, error) { + project, err := sp.client.CreateProject(ctx, settings.Name, settings.Region, settings.Type) if err != nil { return Config{}, fmt.Errorf("failed to create %s project %s in %s: %w", settings.Type, settings.Name, settings.Region, err) } - ctx, cancel := context.WithTimeout(context.TODO(), time.Minute*30) + ctx, cancel := context.WithTimeout(ctx, time.Minute*30) defer cancel() if err := sp.client.EnsureEndpoints(ctx, project); err != nil { return Config{}, fmt.Errorf("failed to ensure endpoints have been provisioned properly: %w", err) @@ -129,12 +129,12 @@ func (sp *serverlessProvider) createProject(settings projectSettings, options Op return config, nil } -func (sp *serverlessProvider) deleteProject(project *serverless.Project, options Options) error { - return sp.client.DeleteProject(project) +func (sp *serverlessProvider) deleteProject(ctx context.Context, project *serverless.Project, options Options) error { + return sp.client.DeleteProject(ctx, project) } -func (sp *serverlessProvider) currentProjectWithClientsAndFleetEndpoint(config Config) (*serverless.Project, error) { - project, err := sp.currentProject(config) +func (sp *serverlessProvider) currentProjectWithClientsAndFleetEndpoint(ctx context.Context, config Config) (*serverless.Project, error) { + project, err := sp.currentProject(ctx, config) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func (sp *serverlessProvider) currentProjectWithClientsAndFleetEndpoint(config C return project, nil } -func (sp *serverlessProvider) currentProject(config Config) (*serverless.Project, error) { +func (sp *serverlessProvider) currentProject(ctx context.Context, config Config) (*serverless.Project, error) { projectID, found := config.Parameters[paramServerlessProjectID] if !found { return nil, serverless.ErrProjectNotExist @@ -167,7 +167,7 @@ func (sp *serverlessProvider) currentProject(config Config) (*serverless.Project return nil, serverless.ErrProjectNotExist } - project, err := sp.client.GetProject(projectType, projectID) + project, err := sp.client.GetProject(ctx, projectType, projectID) if errors.Is(serverless.ErrProjectNotExist, err) { return nil, err } @@ -255,18 +255,18 @@ func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error var project *serverless.Project isNewProject := false - project, err = sp.currentProject(config) + project, err = sp.currentProject(ctx, config) switch err { default: return err case serverless.ErrProjectNotExist: logger.Infof("Creating %s project: %q", settings.Type, settings.Name) - config, err = sp.createProject(settings, options, config) + config, err = sp.createProject(ctx, settings, options, config) if err != nil { return fmt.Errorf("failed to create deployment: %w", err) } - project, err = sp.currentProjectWithClientsAndFleetEndpoint(config) + project, err = sp.currentProjectWithClientsAndFleetEndpoint(ctx, config) if err != nil { return fmt.Errorf("failed to retrieve latest project created: %w", err) } @@ -366,14 +366,14 @@ func (sp *serverlessProvider) TearDown(ctx context.Context, options Options) err errs = fmt.Errorf("failed to destroy local services: %w", err) } - project, err := sp.currentProject(config) + project, err := sp.currentProject(ctx, config) if err != nil { return fmt.Errorf("failed to find current project: %w", err) } logger.Debugf("Deleting project %q (%s)", project.Name, project.ID) - err = sp.deleteProject(project, options) + err = sp.deleteProject(ctx, project, options) if err != nil { logger.Errorf("failed to delete project: %v", err) errs = errors.Join(errs, fmt.Errorf("failed to delete project: %w", err)) @@ -413,7 +413,7 @@ func (sp *serverlessProvider) Status(ctx context.Context, options Options) ([]Se return nil, fmt.Errorf("failed to load configuration: %w", err) } - project, err := sp.currentProjectWithClientsAndFleetEndpoint(config) + project, err := sp.currentProjectWithClientsAndFleetEndpoint(ctx, config) if errors.Is(serverless.ErrProjectNotExist, err) { return nil, nil } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index a123a5dc18..02ae5805c8 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -1255,7 +1255,7 @@ func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), case <-retryTicker.C: continue case <-ctx.Done(): - return false, fmt.Errorf("context done: %w", ctx.Err()) + return false, ctx.Err() case <-timeoutTicker.C: return false, nil } diff --git a/internal/version/check_update.go b/internal/version/check_update.go index 7b9ee554b6..d902f8afa2 100644 --- a/internal/version/check_update.go +++ b/internal/version/check_update.go @@ -42,7 +42,7 @@ func (v versionLatest) String() string { } // CheckUpdate function checks using Github Release API if newer version is available. -func CheckUpdate() { +func CheckUpdate(ctx context.Context) { if Tag == "" { logger.Debugf("Distribution built without a version tag, can't determine release chronology. Please consider using official releases at " + "https://github.com/elastic/elastic-package/releases") @@ -72,7 +72,7 @@ func CheckUpdate() { default: logger.Debugf("checking latest release in Github") githubClient := github.UnauthorizedClient() - githubRelease, err := githubClient.LatestRelease(context.TODO(), repositoryOwner, repositoryName) + githubRelease, err := githubClient.LatestRelease(ctx, repositoryOwner, repositoryName) if err != nil { logger.Debugf("Error: %v", err) return From 3fc76d5b8ab81d41d8c1e5521f318aa4f9d97a62 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 14 Feb 2024 17:56:44 +0100 Subject: [PATCH 05/32] Linting --- main.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/main.go b/main.go index b23da74445..48e158f4b7 100644 --- a/main.go +++ b/main.go @@ -36,9 +36,5 @@ func main() { } func errIsInterruption(err error) bool { - if errors.Is(err, context.Canceled) { - return true - } - - return false + return errors.Is(err, context.Canceled) } From 2ac99189caf985730b441d690f453b1ef0968b68 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 14 Feb 2024 18:08:28 +0100 Subject: [PATCH 06/32] Reuse waitUntilTrue --- internal/benchrunner/runners/rally/runner.go | 30 +-------------- internal/benchrunner/runners/stream/runner.go | 30 +-------------- internal/benchrunner/runners/system/runner.go | 34 ++--------------- internal/testrunner/runners/system/runner.go | 34 ++--------------- internal/wait/wait.go | 38 +++++++++++++++++++ 5 files changed, 50 insertions(+), 116 deletions(-) create mode 100644 internal/wait/wait.go diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index f82469cb64..5f7b0ef301 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -22,6 +22,7 @@ import ( "time" "github.com/elastic/elastic-package/internal/packages/installer" + "github.com/elastic/elastic-package/internal/wait" "github.com/magefile/mage/sh" @@ -332,7 +333,7 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("error deleting old data in data stream: %s: %w", r.runtimeDataStream, err) } - cleared, err := waitUntilTrue(ctx, func(context.Context) (bool, error) { + cleared, err := wait.UntilTrue(ctx, func(context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 2*time.Minute) @@ -1164,33 +1165,6 @@ func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { return numHits, nil } -func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { - timeoutTimer := time.NewTimer(timeout) - defer timeoutTimer.Stop() - - retryTicker := time.NewTicker(5 * time.Second) - defer retryTicker.Stop() - - for { - result, err := fn(ctx) - if err != nil { - return false, err - } - if result { - return true, nil - } - - select { - case <-retryTicker.C: - continue - case <-ctx.Done(): - return false, ctx.Err() - case <-timeoutTimer.C: - return false, nil - } - } -} - func createRunID() string { return uuid.New().String() } diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index dd6ff51bee..a4c6043f25 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -19,6 +19,7 @@ import ( "time" "github.com/elastic/elastic-package/internal/packages/installer" + "github.com/elastic/elastic-package/internal/wait" "github.com/google/uuid" "gopkg.in/yaml.v3" @@ -158,7 +159,7 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("error cleaning up old data in data streams: %w", err) } - cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { totalHits := 0 for _, runtimeDataStream := range r.runtimeDataStreams { hits, err := getTotalHits(r.options.ESAPI, runtimeDataStream) @@ -597,33 +598,6 @@ func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { return numHits, nil } -func waitUntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { - timeoutTimer := time.NewTimer(timeout) - defer timeoutTimer.Stop() - - retryTicker := time.NewTicker(5 * time.Second) - defer retryTicker.Stop() - - for { - result, err := fn(ctx) - if err != nil { - return false, err - } - if result { - return true, nil - } - - select { - case <-retryTicker.C: - continue - case <-ctx.Done(): - return false, ctx.Err() - case <-timeoutTimer.C: - return false, nil - } - } -} - func createRunID() string { return uuid.New().String() } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 0c0667ef9d..a95dadf30b 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/servicedeployer" + "github.com/elastic/elastic-package/internal/wait" ) const ( @@ -214,7 +215,7 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("error deleting old data in data stream: %s: %w", r.runtimeDataStream, err) } - cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 2*time.Minute) @@ -619,7 +620,7 @@ func (r *runner) runGenerator(destDir string) error { func (r *runner) checkEnrolledAgents(ctx context.Context) ([]kibana.Agent, error) { var agents []kibana.Agent - enrolled, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + enrolled, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { allAgents, err := r.options.KibanaClient.ListAgents() if err != nil { return false, fmt.Errorf("could not list agents: %w", err) @@ -649,7 +650,7 @@ func (r *runner) waitUntilBenchmarkFinishes(ctx context.Context) (bool, error) { } oldHits := 0 - return waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + return wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) if hits == 0 { @@ -965,33 +966,6 @@ func filterAgents(allAgents []kibana.Agent) []kibana.Agent { return filtered } -func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), timeout time.Duration) (bool, error) { - timeoutTicker := time.NewTicker(timeout) - defer timeoutTicker.Stop() - - retryTicker := time.NewTicker(5 * time.Second) - defer retryTicker.Stop() - - for { - result, err := fn(ctx) - if err != nil { - return false, err - } - if result { - return true, nil - } - - select { - case <-retryTicker.C: - continue - case <-ctx.Done(): - return false, ctx.Err() - case <-timeoutTicker.C: - return false, nil - } - } -} - func createRunID() string { return uuid.New().String() } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index 02ae5805c8..2b5e8384fc 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-package/internal/servicedeployer" "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" + "github.com/elastic/elastic-package/internal/wait" ) const ( @@ -625,7 +626,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext return result.WithError(fmt.Errorf("error deleting old data in data stream: %s: %w", dataStream, err)) } - cleared, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := r.getDocs(dataStream) if err != nil { return false, err @@ -700,7 +701,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext logger.Debug("checking for expected data in data stream...") var hits *hits oldHits := 0 - passed, waitErr := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + passed, waitErr := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error hits, err = r.getDocs(dataStream) if err != nil { @@ -830,7 +831,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext func checkEnrolledAgents(ctx context.Context, client *kibana.Client, ctxt servicedeployer.ServiceContext) ([]kibana.Agent, error) { var agents []kibana.Agent - enrolled, err := waitUntilTrue(ctx, func(ctx context.Context) (bool, error) { + enrolled, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { allAgents, err := client.ListAgents() if err != nil { return false, fmt.Errorf("could not list agents: %w", err) @@ -1235,33 +1236,6 @@ func deleteDataStreamDocs(api *elasticsearch.API, dataStream string) error { return nil } -func waitUntilTrue(ctx context.Context, fn func(context.Context) (bool, error), timeout time.Duration) (bool, error) { - timeoutTicker := time.NewTicker(timeout) - defer timeoutTicker.Stop() - - retryTicker := time.NewTicker(1 * time.Second) - defer retryTicker.Stop() - - for { - result, err := fn(ctx) - if err != nil { - return false, err - } - if result { - return true, nil - } - - select { - case <-retryTicker.C: - continue - case <-ctx.Done(): - return false, ctx.Err() - case <-timeoutTicker.C: - return false, nil - } - } -} - func filterAgents(allAgents []kibana.Agent, ctx servicedeployer.ServiceContext) []kibana.Agent { if ctx.Agent.Host.NamePrefix != "" { logger.Debugf("filter agents using criteria: NamePrefix=%s", ctx.Agent.Host.NamePrefix) diff --git a/internal/wait/wait.go b/internal/wait/wait.go new file mode 100644 index 0000000000..f24b846e61 --- /dev/null +++ b/internal/wait/wait.go @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package wait + +import ( + "context" + "time" +) + +// UntilTrue waits till the context is cancelled or the given function returns an error or true. +func UntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { + timeoutTimer := time.NewTimer(timeout) + defer timeoutTimer.Stop() + + retryTicker := time.NewTicker(5 * time.Second) + defer retryTicker.Stop() + + for { + result, err := fn(ctx) + if err != nil { + return false, err + } + if result { + return true, nil + } + + select { + case <-retryTicker.C: + continue + case <-ctx.Done(): + return false, ctx.Err() + case <-timeoutTimer.C: + return false, nil + } + } +} From 3a20101c77d0fe389ebcce4736e96731684822de Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 15 Feb 2024 11:47:43 +0100 Subject: [PATCH 07/32] Adjust default retry period --- internal/wait/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/wait/wait.go b/internal/wait/wait.go index f24b846e61..c75a36374c 100644 --- a/internal/wait/wait.go +++ b/internal/wait/wait.go @@ -14,7 +14,7 @@ func UntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeoutTimer := time.NewTimer(timeout) defer timeoutTimer.Stop() - retryTicker := time.NewTicker(5 * time.Second) + retryTicker := time.NewTicker(1 * time.Second) defer retryTicker.Stop() for { From 8e50588227a661fa5ff23e4962bfecb3f855584f Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 15 Feb 2024 13:34:45 +0100 Subject: [PATCH 08/32] More contexts --- internal/benchrunner/runners/rally/runner.go | 14 +++++++------- internal/benchrunner/runners/stream/runner.go | 14 +++++++------- internal/benchrunner/runners/system/runner.go | 14 +++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 5f7b0ef301..809cf8e5d1 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -194,8 +194,8 @@ func (r *runner) TearDown(ctx context.Context) error { } } - // Using nil context to avoid interrupting cleanup operations. - cleanupCtx := context.Background() + // Avoid cancellations during cleanup. + cleanupCtx := context.WithoutCancel(ctx) var merr multierror.Error @@ -280,7 +280,7 @@ func (r *runner) setUp(ctx context.Context) error { if r.scenario.Corpora.Generator != nil && len(r.options.CorpusAtPath) == 0 { var err error - r.generator, err = r.initializeGenerator() + r.generator, err = r.initializeGenerator(ctx) if err != nil { return fmt.Errorf("can't initialize generator: %w", err) } @@ -552,7 +552,7 @@ func (r *runner) deleteDataStreamDocs(dataStream string) error { return nil } -func (r *runner) initializeGenerator() (genlib.Generator, error) { +func (r *runner) initializeGenerator(ctx context.Context) (genlib.Generator, error) { totEvents := r.scenario.Corpora.Generator.TotalEvents config, err := r.getGeneratorConfig() @@ -560,7 +560,7 @@ func (r *runner) initializeGenerator() (genlib.Generator, error) { return nil, err } - fields, err := r.getGeneratorFields() + fields, err := r.getGeneratorFields(ctx) if err != nil { return nil, err } @@ -622,7 +622,7 @@ func (r *runner) getGeneratorConfig() (*config.Config, error) { return &cfg, nil } -func (r *runner) getGeneratorFields() (fields.Fields, error) { +func (r *runner) getGeneratorFields(ctx context.Context) (fields.Fields, error) { var ( data []byte err error @@ -646,7 +646,7 @@ func (r *runner) getGeneratorFields() (fields.Fields, error) { } } - fields, err := fields.LoadFieldsWithTemplateFromString(context.Background(), string(data)) + fields, err := fields.LoadFieldsWithTemplateFromString(ctx, string(data)) if err != nil { return nil, fmt.Errorf("could not load fields yaml: %w", err) } diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index a4c6043f25..534460bf89 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -77,8 +77,8 @@ func (r *runner) TearDown(ctx context.Context) error { return nil } - // Using nil context to avoid interrupting cleanup operations. - cleanupCtx := context.Background() + // Avoid cancellations during cleanup. + cleanupCtx := context.WithoutCancel(ctx) var merr multierror.Error @@ -127,7 +127,7 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("error installing package: %w", err) } - err = r.collectGenerators() + err = r.collectGenerators(ctx) if err != nil { return fmt.Errorf("can't initialize generator: %w", err) } @@ -284,14 +284,14 @@ func (r *runner) initializeGenerator(tpl []byte, config genlib.Config, fields ge return genlib.NewGeneratorWithTextTemplate(tpl, config, fields, totEvents) } } -func (r *runner) collectGenerators() error { +func (r *runner) collectGenerators(ctx context.Context) error { for scenarioName, scenario := range r.scenarios { config, err := r.getGeneratorConfig(scenario) if err != nil { return err } - fields, err := r.getGeneratorFields(scenario) + fields, err := r.getGeneratorFields(ctx, scenario) if err != nil { return err } @@ -360,7 +360,7 @@ func (r *runner) getGeneratorConfig(scenario *scenario) (*config.Config, error) return &cfg, nil } -func (r *runner) getGeneratorFields(scenario *scenario) (fields.Fields, error) { +func (r *runner) getGeneratorFields(ctx context.Context, scenario *scenario) (fields.Fields, error) { var ( data []byte err error @@ -384,7 +384,7 @@ func (r *runner) getGeneratorFields(scenario *scenario) (fields.Fields, error) { } } - fields, err := fields.LoadFieldsWithTemplateFromString(context.Background(), string(data)) + fields, err := fields.LoadFieldsWithTemplateFromString(ctx, string(data)) if err != nil { return nil, fmt.Errorf("could not load fields yaml: %w", err) } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index a95dadf30b..bb2c1d8dff 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -88,8 +88,8 @@ func (r *runner) TearDown(ctx context.Context) error { } - // Using nil context to avoid interrupting cleanup operations. - cleanupCtx := context.Background() + // Avoid cancellations during cleanup. + cleanupCtx := context.WithoutCancel(ctx) var merr multierror.Error @@ -159,7 +159,7 @@ func (r *runner) setUp(ctx context.Context) error { if r.scenario.Corpora.Generator != nil { var err error - r.generator, err = r.initializeGenerator() + r.generator, err = r.initializeGenerator(ctx) if err != nil { return fmt.Errorf("can't initialize generator: %w", err) } @@ -446,7 +446,7 @@ func (r *runner) createPackagePolicy(pkgManifest *packages.PackageManifest, p *k return policy, nil } -func (r *runner) initializeGenerator() (genlib.Generator, error) { +func (r *runner) initializeGenerator(ctx context.Context) (genlib.Generator, error) { totEvents := r.scenario.Corpora.Generator.TotalEvents config, err := r.getGeneratorConfig() @@ -454,7 +454,7 @@ func (r *runner) initializeGenerator() (genlib.Generator, error) { return nil, err } - fields, err := r.getGeneratorFields() + fields, err := r.getGeneratorFields(ctx) if err != nil { return nil, err } @@ -516,7 +516,7 @@ func (r *runner) getGeneratorConfig() (*config.Config, error) { return &cfg, nil } -func (r *runner) getGeneratorFields() (fields.Fields, error) { +func (r *runner) getGeneratorFields(ctx context.Context) (fields.Fields, error) { var ( data []byte err error @@ -540,7 +540,7 @@ func (r *runner) getGeneratorFields() (fields.Fields, error) { } } - fields, err := fields.LoadFieldsWithTemplateFromString(context.Background(), string(data)) + fields, err := fields.LoadFieldsWithTemplateFromString(ctx, string(data)) if err != nil { return nil, fmt.Errorf("could not load fields yaml: %w", err) } From cccecc1f9412bc6b52fde14b472259640d5cee4d Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 15 Feb 2024 14:24:26 +0100 Subject: [PATCH 09/32] Rename ServiceContext --- internal/benchrunner/runners/rally/metrics.go | 12 +++--- internal/benchrunner/runners/rally/runner.go | 36 ++++++++--------- internal/benchrunner/runners/stream/runner.go | 6 +-- .../benchrunner/runners/system/metrics.go | 12 +++--- internal/benchrunner/runners/system/runner.go | 30 +++++++------- .../benchrunner/runners/system/scenario.go | 14 +++---- internal/service/boot.go | 10 ++--- internal/servicedeployer/compose.go | 30 +++++++------- internal/servicedeployer/custom_agent.go | 14 +++---- internal/servicedeployer/deployed_service.go | 8 ++-- .../servicedeployer/{context.go => info.go} | 8 ++-- internal/servicedeployer/kubernetes.go | 20 +++++----- internal/servicedeployer/service_deployer.go | 2 +- internal/servicedeployer/terraform.go | 20 +++++----- internal/servicedeployer/terraform_env.go | 8 ++-- internal/servicedeployer/terraform_test.go | 22 +++++----- internal/testrunner/runners/system/runner.go | 40 +++++++++---------- .../testrunner/runners/system/test_config.go | 14 +++---- 18 files changed, 153 insertions(+), 153 deletions(-) rename internal/servicedeployer/{context.go => info.go} (89%) diff --git a/internal/benchrunner/runners/rally/metrics.go b/internal/benchrunner/runners/rally/metrics.go index b5fa905c7c..2911ae50ac 100644 --- a/internal/benchrunner/runners/rally/metrics.go +++ b/internal/benchrunner/runners/rally/metrics.go @@ -21,7 +21,7 @@ import ( ) type collector struct { - ctxt servicedeployer.ServiceContext + svcInfo servicedeployer.ServiceInfo metadata benchMeta scenario scenario @@ -64,7 +64,7 @@ type metricsSummary struct { } func newCollector( - ctxt servicedeployer.ServiceContext, + svcInfo servicedeployer.ServiceInfo, benchName string, scenario scenario, esAPI, metricsAPI *elasticsearch.API, @@ -73,9 +73,9 @@ func newCollector( ) *collector { meta := benchMeta{Parameters: scenario} meta.Info.Benchmark = benchName - meta.Info.RunID = ctxt.Test.RunID + meta.Info.RunID = svcInfo.Test.RunID return &collector{ - ctxt: ctxt, + svcInfo: svcInfo, interval: interval, scenario: scenario, metadata: meta, @@ -202,12 +202,12 @@ func (c *collector) createMetricsIndex() { } func (c *collector) indexName() string { - return fmt.Sprintf("bench-metrics-%s-%s", c.datastream, c.ctxt.Test.RunID) + return fmt.Sprintf("bench-metrics-%s-%s", c.datastream, c.svcInfo.Test.RunID) } func (c *collector) summarize() (*metricsSummary, error) { sum := metricsSummary{ - RunID: c.ctxt.Test.RunID, + RunID: c.svcInfo.Test.RunID, IngestPipelineStats: make(map[string]ingest.PipelineStatsMap), NodesStats: make(map[string]ingest.NodeStats), DiskUsage: c.diskUsage, diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 70a0ab9744..97fcf17c32 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -149,7 +149,7 @@ type runner struct { options Options scenario *scenario - ctxt servicedeployer.ServiceContext + svcInfo servicedeployer.ServiceInfo runtimeDataStream string indexTemplateBody string pipelinePrefix string @@ -231,7 +231,7 @@ func (r *runner) TearDown(ctx context.Context) error { } func (r *runner) createRallyTrackDir(locationManager *locations.LocationManager) error { - outputDir := filepath.Join(locationManager.RallyCorpusDir(), r.ctxt.Test.RunID) + outputDir := filepath.Join(locationManager.RallyCorpusDir(), r.svcInfo.Test.RunID) if err := os.MkdirAll(outputDir, 0755); err != nil { return fmt.Errorf("failed to create output directory: %w", err) } @@ -245,15 +245,15 @@ func (r *runner) setUp(ctx context.Context) error { } rallyCorpusDir := locationManager.RallyCorpusDir() - r.ctxt.Logs.Folder.Local = rallyCorpusDir - r.ctxt.Logs.Folder.Agent = RallyCorpusAgentDir - r.ctxt.Test.RunID = createRunID() + r.svcInfo.Logs.Folder.Local = rallyCorpusDir + r.svcInfo.Logs.Folder.Agent = RallyCorpusAgentDir + r.svcInfo.Test.RunID = createRunID() - outputDir, err := servicedeployer.CreateOutputDir(locationManager, r.ctxt.Test.RunID) + outputDir, err := servicedeployer.CreateOutputDir(locationManager, r.svcInfo.Test.RunID) if err != nil { return fmt.Errorf("could not create output dir for terraform deployer %w", err) } - r.ctxt.OutputDir = outputDir + r.svcInfo.OutputDir = outputDir err = r.createRallyTrackDir(locationManager) if err != nil { @@ -406,8 +406,8 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro var corpusDocCount uint64 // if there is a generator config, generate the data, unless a corpus path is set if r.generator != nil && len(r.options.CorpusAtPath) == 0 { - logger.Debugf("generating corpus data to %s...", r.ctxt.Logs.Folder.Local) - corpusDocCount, err = r.runGenerator(r.ctxt.Logs.Folder.Local) + logger.Debugf("generating corpus data to %s...", r.svcInfo.Logs.Folder.Local) + corpusDocCount, err = r.runGenerator(r.svcInfo.Logs.Folder.Local) if err != nil { return nil, fmt.Errorf("can't generate benchmarks data corpus for data stream: %w", err) } @@ -415,7 +415,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro if len(r.options.CorpusAtPath) > 0 { logger.Debugf("reading corpus data from %s...", r.options.CorpusAtPath) - corpusDocCount, err = r.copyCorpusFile(r.options.CorpusAtPath, r.ctxt.Logs.Folder.Local) + corpusDocCount, err = r.copyCorpusFile(r.options.CorpusAtPath, r.svcInfo.Logs.Folder.Local) if err != nil { return nil, fmt.Errorf("can't read benchmarks data corpus for data stream: %w", err) } @@ -425,7 +425,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro return nil, errors.New("can't find documents in the corpus for data stream") } - if err := r.createRallyTrack(corpusDocCount, r.ctxt.Logs.Folder.Local); err != nil { + if err := r.createRallyTrack(corpusDocCount, r.svcInfo.Logs.Folder.Local); err != nil { return nil, fmt.Errorf("can't create benchmarks data rally track for data stream: %w", err) } @@ -512,7 +512,7 @@ func (r *runner) installPackageFromPackageRoot() error { func (r *runner) startMetricsColletion() { // TODO collect agent hosts metrics using system integration r.mcollector = newCollector( - r.ctxt, + r.svcInfo, r.options.BenchName, *r.scenario, r.options.ESAPI, @@ -900,7 +900,7 @@ func (r *runner) runRally(ctx context.Context) ([]rallyStat, error) { cmd := exec.Command( "esrally", "race", - "--race-id="+r.ctxt.Test.RunID, + "--race-id="+r.svcInfo.Test.RunID, "--report-format=csv", fmt.Sprintf(`--report-file=%s`, r.reportFile), fmt.Sprintf(`--target-hosts={"default":["%s"]}`, elasticsearchHost), @@ -915,12 +915,12 @@ func (r *runner) runRally(ctx context.Context) ([]rallyStat, error) { logger.Debugf("output command: %s", cmd) output, err := cmd.Output() if err != nil { - return nil, fmt.Errorf("could not run esrally track in path: %s (stdout=%q, stderr=%q): %w", r.ctxt.Logs.Folder.Local, output, errOutput.String(), err) + return nil, fmt.Errorf("could not run esrally track in path: %s (stdout=%q, stderr=%q): %w", r.svcInfo.Logs.Folder.Local, output, errOutput.String(), err) } reportCSV, err := os.Open(r.reportFile) if err != nil { - return nil, fmt.Errorf("could not open esrally report in path: %s: %w", r.ctxt.Logs.Folder.Local, err) + return nil, fmt.Errorf("could not open esrally report in path: %s: %w", r.svcInfo.Logs.Folder.Local, err) } reader := csv.NewReader(reportCSV) @@ -932,7 +932,7 @@ func (r *runner) runRally(ctx context.Context) ([]rallyStat, error) { break } if err != nil { - return nil, fmt.Errorf("could not read esrally report in path: %s (stderr=%q): %w", r.ctxt.Logs.Folder.Local, errOutput.String(), err) + return nil, fmt.Errorf("could not read esrally report in path: %s (stderr=%q): %w", r.svcInfo.Logs.Folder.Local, errOutput.String(), err) } stats = append(stats, rallyStat{Metric: record[0], Task: record[1], Value: record[2], Unit: record[3]}) @@ -994,7 +994,7 @@ func (r *runner) reindexData() error { }`, mapping)), ) - indexName := fmt.Sprintf("bench-reindex-%s-%s", r.runtimeDataStream, r.ctxt.Test.RunID) + indexName := fmt.Sprintf("bench-reindex-%s-%s", r.runtimeDataStream, r.svcInfo.Test.RunID) logger.Debugf("creating %s index in metricstore...", indexName) @@ -1118,7 +1118,7 @@ type benchMeta struct { func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]interface{}) map[string]interface{} { var m benchMeta m.Info.Benchmark = r.options.BenchName - m.Info.RunID = r.ctxt.Test.RunID + m.Info.RunID = r.svcInfo.Test.RunID m.Parameters = *r.scenario e["benchmark_metadata"] = m return e diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 04017be0ee..282e603b3b 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -40,7 +40,7 @@ type runner struct { options Options scenarios map[string]*scenario - ctxt servicedeployer.ServiceContext + svcInfo servicedeployer.ServiceInfo runtimeDataStreams map[string]string generators map[string]genlib.Generator backFillGenerators map[string]genlib.Generator @@ -109,7 +109,7 @@ func (r *runner) setUp(ctx context.Context) error { r.runtimeDataStreams = make(map[string]string) - r.ctxt.Test.RunID = createRunID() + r.svcInfo.Test.RunID = createRunID() pkgManifest, err := packages.ReadPackageManifestFromPackageRoot(r.options.PackageRootPath) if err != nil { @@ -554,7 +554,7 @@ type benchMeta struct { func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]any) map[string]interface{} { var m benchMeta m.Info.Benchmark = r.options.BenchName - m.Info.RunID = r.ctxt.Test.RunID + m.Info.RunID = r.svcInfo.Test.RunID e["benchmark_metadata"] = m return e } diff --git a/internal/benchrunner/runners/system/metrics.go b/internal/benchrunner/runners/system/metrics.go index bd3c0cb518..c68ec183d5 100644 --- a/internal/benchrunner/runners/system/metrics.go +++ b/internal/benchrunner/runners/system/metrics.go @@ -21,7 +21,7 @@ import ( ) type collector struct { - ctxt servicedeployer.ServiceContext + svcInfo servicedeployer.ServiceInfo metadata benchMeta scenario scenario @@ -65,7 +65,7 @@ type metricsSummary struct { } func newCollector( - ctxt servicedeployer.ServiceContext, + svcInfo servicedeployer.ServiceInfo, benchName string, scenario scenario, esAPI, metricsAPI *elasticsearch.API, @@ -74,9 +74,9 @@ func newCollector( ) *collector { meta := benchMeta{Parameters: scenario} meta.Info.Benchmark = benchName - meta.Info.RunID = ctxt.Test.RunID + meta.Info.RunID = svcInfo.Test.RunID return &collector{ - ctxt: ctxt, + svcInfo: svcInfo, interval: interval, scenario: scenario, metadata: meta, @@ -201,12 +201,12 @@ func (c *collector) createMetricsIndex() { } func (c *collector) indexName() string { - return fmt.Sprintf("bench-metrics-%s-%s", c.datastream, c.ctxt.Test.RunID) + return fmt.Sprintf("bench-metrics-%s-%s", c.datastream, c.svcInfo.Test.RunID) } func (c *collector) summarize() (*metricsSummary, error) { sum := metricsSummary{ - RunID: c.ctxt.Test.RunID, + RunID: c.svcInfo.Test.RunID, IngestPipelineStats: make(map[string]ingest.PipelineStatsMap), NodesStats: make(map[string]ingest.NodeStats), DiskUsage: c.diskUsage, diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index bb2c1d8dff..694ffa3a2c 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -49,7 +49,7 @@ type runner struct { options Options scenario *scenario - ctxt servicedeployer.ServiceContext + svcInfo servicedeployer.ServiceInfo benchPolicy *kibana.Policy runtimeDataStream string pipelinePrefix string @@ -141,17 +141,17 @@ func (r *runner) setUp(ctx context.Context) error { } serviceLogsDir := locationManager.ServiceLogDir() - r.ctxt.Logs.Folder.Local = serviceLogsDir - r.ctxt.Logs.Folder.Agent = ServiceLogsAgentDir - r.ctxt.Test.RunID = createRunID() + r.svcInfo.Logs.Folder.Local = serviceLogsDir + r.svcInfo.Logs.Folder.Agent = ServiceLogsAgentDir + r.svcInfo.Test.RunID = createRunID() - outputDir, err := servicedeployer.CreateOutputDir(locationManager, r.ctxt.Test.RunID) + outputDir, err := servicedeployer.CreateOutputDir(locationManager, r.svcInfo.Test.RunID) if err != nil { return fmt.Errorf("could not create output dir for terraform deployer %w", err) } - r.ctxt.OutputDir = outputDir + r.svcInfo.OutputDir = outputDir - scenario, err := readConfig(r.options.BenchPath, r.options.BenchName, r.ctxt) + scenario, err := readConfig(r.options.BenchPath, r.options.BenchName, r.svcInfo) if err != nil { return err } @@ -254,13 +254,13 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro return nil, fmt.Errorf("could not create service runner: %w", err) } - r.ctxt.Name = r.scenario.Corpora.InputService.Name - service, err = serviceDeployer.SetUp(ctx, r.ctxt) + r.svcInfo.Name = r.scenario.Corpora.InputService.Name + service, err = serviceDeployer.SetUp(ctx, r.svcInfo) if err != nil { return nil, fmt.Errorf("could not setup service: %w", err) } - r.ctxt = service.Context() + r.svcInfo = service.Info() r.shutdownServiceHandler = func(ctx context.Context) error { logger.Debug("tearing down service...") if err := service.TearDown(ctx); err != nil { @@ -276,8 +276,8 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro // if there is a generator config, generate the data if r.generator != nil { - logger.Debugf("generating corpus data to %s...", r.ctxt.Logs.Folder.Local) - if err := r.runGenerator(r.ctxt.Logs.Folder.Local); err != nil { + logger.Debugf("generating corpus data to %s...", r.svcInfo.Logs.Folder.Local) + if err := r.runGenerator(r.svcInfo.Logs.Folder.Local); err != nil { return nil, fmt.Errorf("can't generate benchmarks data corpus for data stream: %w", err) } } @@ -317,7 +317,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro func (r *runner) startMetricsColletion() { // TODO collect agent hosts metrics using system integration r.mcollector = newCollector( - r.ctxt, + r.svcInfo, r.options.BenchName, *r.scenario, r.options.ESAPI, @@ -777,7 +777,7 @@ func (r *runner) reindexData() error { }`, mapping)), ) - indexName := fmt.Sprintf("bench-reindex-%s-%s", r.runtimeDataStream, r.ctxt.Test.RunID) + indexName := fmt.Sprintf("bench-reindex-%s-%s", r.runtimeDataStream, r.svcInfo.Test.RunID) logger.Debugf("creating %s index in metricstore...", indexName) @@ -901,7 +901,7 @@ type benchMeta struct { func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]interface{}) map[string]interface{} { var m benchMeta m.Info.Benchmark = r.options.BenchName - m.Info.RunID = r.ctxt.Test.RunID + m.Info.RunID = r.svcInfo.Test.RunID m.Parameters = *r.scenario e["benchmark_metadata"] = m return e diff --git a/internal/benchrunner/runners/system/scenario.go b/internal/benchrunner/runners/system/scenario.go index 00a7909f0a..3cb9cceb47 100644 --- a/internal/benchrunner/runners/system/scenario.go +++ b/internal/benchrunner/runners/system/scenario.go @@ -77,7 +77,7 @@ func defaultConfig() *scenario { } } -func readConfig(benchPath string, scenario string, ctxt servicedeployer.ServiceContext) (*scenario, error) { +func readConfig(benchPath string, scenario string, svcInfo servicedeployer.ServiceInfo) (*scenario, error) { configPath := filepath.Clean(filepath.Join(benchPath, fmt.Sprintf("%s.yml", scenario))) data, err := os.ReadFile(configPath) if err != nil { @@ -87,7 +87,7 @@ func readConfig(benchPath string, scenario string, ctxt servicedeployer.ServiceC return nil, fmt.Errorf("could not load system benchmark configuration file: %s: %w", configPath, err) } - data, err = applyContext(data, ctxt) + data, err = applyServiceInfo(data, svcInfo) if err != nil { return nil, fmt.Errorf("could not apply context to benchmark configuration file: %s: %w", configPath, err) } @@ -109,17 +109,17 @@ func readConfig(benchPath string, scenario string, ctxt servicedeployer.ServiceC return c, nil } -// applyContext takes the given system benchmark configuration (data) and replaces any placeholder variables in -// it with values from the given context (ctxt). The context may be populated from various sources but usually the +// applyServiceInfo takes the given system benchmark configuration (data) and replaces any placeholder variables in +// it with values from the given service information. The context may be populated from various sources but usually the // most interesting context values will be set by a ServiceDeployer in its SetUp method. -func applyContext(data []byte, ctxt servicedeployer.ServiceContext) ([]byte, error) { +func applyServiceInfo(data []byte, svcInfo servicedeployer.ServiceInfo) ([]byte, error) { tmpl, err := raymond.Parse(string(data)) if err != nil { return data, fmt.Errorf("parsing template body failed: %w", err) } - tmpl.RegisterHelpers(ctxt.Aliases()) + tmpl.RegisterHelpers(svcInfo.Aliases()) - result, err := tmpl.Exec(ctxt) + result, err := tmpl.Exec(svcInfo) if err != nil { return data, fmt.Errorf("could not render data with context: %w", err) } diff --git a/internal/service/boot.go b/internal/service/boot.go index 5d7ce98a53..17ac0c439f 100644 --- a/internal/service/boot.go +++ b/internal/service/boot.go @@ -54,11 +54,11 @@ func BootUp(ctx context.Context, options Options) error { return fmt.Errorf("reading service logs directory failed: %w", err) } - var serviceCtxt servicedeployer.ServiceContext - serviceCtxt.Name = options.ServiceName - serviceCtxt.Logs.Folder.Agent = system.ServiceLogsAgentDir - serviceCtxt.Logs.Folder.Local = locationManager.ServiceLogDir() - deployed, err := serviceDeployer.SetUp(ctx, serviceCtxt) + var svcInfo servicedeployer.ServiceInfo + svcInfo.Name = options.ServiceName + svcInfo.Logs.Folder.Agent = system.ServiceLogsAgentDir + svcInfo.Logs.Folder.Local = locationManager.ServiceLogDir() + deployed, err := serviceDeployer.SetUp(ctx, svcInfo) if err != nil { return fmt.Errorf("can't set up the service deployer: %w", err) } diff --git a/internal/servicedeployer/compose.go b/internal/servicedeployer/compose.go index 15920d0879..c539b3c4a1 100644 --- a/internal/servicedeployer/compose.go +++ b/internal/servicedeployer/compose.go @@ -41,7 +41,7 @@ type DockerComposeServiceDeployerOptions struct { } type dockerComposeDeployedService struct { - ctxt ServiceContext + svcInfo ServiceInfo ymlPaths []string project string @@ -61,7 +61,7 @@ func NewDockerComposeServiceDeployer(options DockerComposeServiceDeployerOptions } // SetUp sets up the service and returns any relevant information. -func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) (DeployedService, error) { +func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt ServiceInfo) (DeployedService, error) { logger.Debug("setting up service using Docker Compose service deployer") service := dockerComposeDeployedService{ ymlPaths: d.ymlPaths, @@ -158,7 +158,7 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt Service } outCtxt.Agent.Host.NamePrefix = "docker-fleet-agent" - service.ctxt = outCtxt + service.svcInfo = outCtxt return &service, nil } @@ -175,8 +175,8 @@ func (s *dockerComposeDeployedService) Signal(ctx context.Context, signal string s.variant.Env...), ExtraArgs: []string{"-s", signal}, } - if s.ctxt.Name != "" { - opts.Services = append(opts.Services, s.ctxt.Name) + if s.svcInfo.Name != "" { + opts.Services = append(opts.Services, s.svcInfo.Name) } err = p.Kill(ctx, opts) @@ -206,12 +206,12 @@ func (s *dockerComposeDeployedService) ExitCode(ctx context.Context, service str func (s *dockerComposeDeployedService) TearDown(ctx context.Context) error { logger.Debugf("tearing down service using Docker Compose runner") defer func() { - err := files.RemoveContent(s.ctxt.Logs.Folder.Local) + err := files.RemoveContent(s.svcInfo.Logs.Folder.Local) if err != nil { - logger.Errorf("could not remove the service logs (path: %s)", s.ctxt.Logs.Folder.Local) + logger.Errorf("could not remove the service logs (path: %s)", s.svcInfo.Logs.Folder.Local) } // Remove the outputs generated by the service container - if err = os.RemoveAll(s.ctxt.OutputDir); err != nil { + if err = os.RemoveAll(s.svcInfo.OutputDir); err != nil { logger.Errorf("could not remove the temporary output files %w", err) } }() @@ -226,7 +226,7 @@ func (s *dockerComposeDeployedService) TearDown(ctx context.Context) error { s.env, s.variant.Env...), } - processServiceContainerLogs(ctx, p, opts, s.ctxt.Name) + processServiceContainerLogs(ctx, p, opts, s.svcInfo.Name) if err := p.Down(ctx, compose.CommandOptions{ Env: opts.Env, @@ -237,14 +237,14 @@ func (s *dockerComposeDeployedService) TearDown(ctx context.Context) error { return nil } -// Context returns the current context for the service. -func (s *dockerComposeDeployedService) Context() ServiceContext { - return s.ctxt +// Info returns the current context for the service. +func (s *dockerComposeDeployedService) Info() ServiceInfo { + return s.svcInfo } -// SetContext sets the current context for the service. -func (s *dockerComposeDeployedService) SetContext(ctxt ServiceContext) error { - s.ctxt = ctxt +// SetInfo sets the current context for the service. +func (s *dockerComposeDeployedService) SetInfo(ctxt ServiceInfo) error { + s.svcInfo = ctxt return nil } diff --git a/internal/servicedeployer/custom_agent.go b/internal/servicedeployer/custom_agent.go index 3f406b21eb..6e4b85113f 100644 --- a/internal/servicedeployer/custom_agent.go +++ b/internal/servicedeployer/custom_agent.go @@ -62,7 +62,7 @@ func NewCustomAgentDeployer(options CustomAgentDeployerOptions) (*CustomAgentDep } // SetUp sets up the service and returns any relevant information. -func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) (DeployedService, error) { +func (d *CustomAgentDeployer) SetUp(ctx context.Context, svcInfo ServiceInfo) (DeployedService, error) { logger.Debug("setting up service using Docker Compose service deployer") appConfig, err := install.Configuration() @@ -77,7 +77,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) env := append( appConfig.StackImageRefs(d.stackVersion).AsEnv(), - fmt.Sprintf("%s=%s", serviceLogsDirEnv, inCtxt.Logs.Folder.Local), + fmt.Sprintf("%s=%s", serviceLogsDirEnv, svcInfo.Logs.Folder.Local), fmt.Sprintf("%s=%s", localCACertEnv, caCertPath), ) @@ -100,7 +100,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) }, } - outCtxt := inCtxt + outCtxt := svcInfo p, err := compose.NewProject(service.project, service.ymlPaths...) if err != nil { @@ -126,8 +126,8 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) } } - inCtxt.Name = dockerCustomAgentName - serviceName := inCtxt.Name + svcInfo.Name = dockerCustomAgentName + serviceName := svcInfo.Name opts := compose.CommandOptions{ Env: env, @@ -177,8 +177,8 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, inCtxt ServiceContext) outCtxt.Port = outCtxt.Ports[0] } - outCtxt.Agent.Host.NamePrefix = inCtxt.Name - service.ctxt = outCtxt + outCtxt.Agent.Host.NamePrefix = svcInfo.Name + service.svcInfo = outCtxt return &service, nil } diff --git a/internal/servicedeployer/deployed_service.go b/internal/servicedeployer/deployed_service.go index 8c9d302f11..861a4c42d7 100644 --- a/internal/servicedeployer/deployed_service.go +++ b/internal/servicedeployer/deployed_service.go @@ -19,11 +19,11 @@ type DeployedService interface { // Signal sends a signal to the service. Signal(ctx context.Context, signal string) error - // Context returns the current context from the service. - Context() ServiceContext + // Info returns the current information from the service. + Info() ServiceInfo - // SetContext sets the current context for the service. - SetContext(str ServiceContext) error + // SetInfo sets the current information about the service. + SetInfo(ServiceInfo) error // ExitCode returns true if the service is exited and its exit code. ExitCode(ctx context.Context, service string) (bool, int, error) diff --git a/internal/servicedeployer/context.go b/internal/servicedeployer/info.go similarity index 89% rename from internal/servicedeployer/context.go rename to internal/servicedeployer/info.go index 8ef382cb60..d8a6dd1be4 100644 --- a/internal/servicedeployer/context.go +++ b/internal/servicedeployer/info.go @@ -10,10 +10,10 @@ const ( testRunIDEnv = "TEST_RUN_ID" ) -// ServiceContext encapsulates context that is both available to a ServiceDeployer and -// populated by a DeployedService. The fields in ServiceContext may be used in handlebars +// ServiceInfo encapsulates context that is both available to a ServiceDeployer and +// populated by a DeployedService. The fields in ServiceInfo may be used in handlebars // templates in system test configuration files, for example: {{ Hostname }}. -type ServiceContext struct { +type ServiceInfo struct { // Name is the name of the service. Name string @@ -66,7 +66,7 @@ type ServiceContext struct { } // Aliases method returned aliases to properties of the service context. -func (sc *ServiceContext) Aliases() map[string]interface{} { +func (sc *ServiceInfo) Aliases() map[string]interface{} { m := map[string]interface{}{ serviceLogsDirEnv: func() interface{} { return sc.Logs.Folder.Agent diff --git a/internal/servicedeployer/kubernetes.go b/internal/servicedeployer/kubernetes.go index 14861ef673..543044efee 100644 --- a/internal/servicedeployer/kubernetes.go +++ b/internal/servicedeployer/kubernetes.go @@ -45,7 +45,7 @@ type KubernetesServiceDeployerOptions struct { } type kubernetesDeployedService struct { - ctxt ServiceContext + svcInfo ServiceInfo definitionsDir string } @@ -78,12 +78,12 @@ func (s kubernetesDeployedService) ExitCode(_ context.Context, _ string) (bool, return false, -1, ErrNotSupported } -func (s kubernetesDeployedService) Context() ServiceContext { - return s.ctxt +func (s kubernetesDeployedService) Info() ServiceInfo { + return s.svcInfo } -func (s *kubernetesDeployedService) SetContext(sc ServiceContext) error { - s.ctxt = sc +func (s *kubernetesDeployedService) SetInfo(sc ServiceInfo) error { + s.svcInfo = sc return nil } @@ -103,7 +103,7 @@ func NewKubernetesServiceDeployer(opts KubernetesServiceDeployerOptions) (*Kuber // SetUp function links the kind container with elastic-package-stack network, installs Elastic-Agent and optionally // custom YAML definitions. -func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceContext) (DeployedService, error) { +func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, svcInfo ServiceInfo) (DeployedService, error) { err := kind.VerifyContext(ctx) if err != nil { return nil, fmt.Errorf("kind context verification failed: %w", err) @@ -134,13 +134,13 @@ func (ksd KubernetesServiceDeployer) SetUp(ctx context.Context, service ServiceC } } - service.Name = kind.ControlPlaneContainerName - service.Hostname = kind.ControlPlaneContainerName + svcInfo.Name = kind.ControlPlaneContainerName + svcInfo.Hostname = kind.ControlPlaneContainerName // kind-control-plane is the name of the kind host where Pod is running since we use hostNetwork setting // to deploy Agent Pod. Because of this, hostname inside pod will be equal to the name of the k8s host. - service.Agent.Host.NamePrefix = "kind-control-plane" + svcInfo.Agent.Host.NamePrefix = "kind-control-plane" return &kubernetesDeployedService{ - ctxt: service, + svcInfo: svcInfo, definitionsDir: ksd.definitionsDir, }, nil } diff --git a/internal/servicedeployer/service_deployer.go b/internal/servicedeployer/service_deployer.go index d9002b878d..f151916e50 100644 --- a/internal/servicedeployer/service_deployer.go +++ b/internal/servicedeployer/service_deployer.go @@ -11,5 +11,5 @@ import "context" type ServiceDeployer interface { // SetUp implements the logic for setting up a service. It takes a context and returns a // ServiceHandler. - SetUp(context.Context, ServiceContext) (DeployedService, error) + SetUp(context.Context, ServiceInfo) (DeployedService, error) } diff --git a/internal/servicedeployer/terraform.go b/internal/servicedeployer/terraform.go index fead3b69f2..1a0295e927 100644 --- a/internal/servicedeployer/terraform.go +++ b/internal/servicedeployer/terraform.go @@ -45,11 +45,11 @@ type TerraformServiceDeployer struct { } // addTerraformOutputs method reads the terraform outputs generated in the json format and -// adds them to the custom properties of ServiceContext and can be used in the handlebars template +// adds them to the custom properties of ServiceInfo and can be used in the handlebars template // like `{{TF_OUTPUT_queue_url}}` where `queue_url` is the output configured -func addTerraformOutputs(outCtxt ServiceContext) error { +func addTerraformOutputs(svcInfo ServiceInfo) error { // Read the `output.json` file where terraform outputs are generated - outputFile := filepath.Join(outCtxt.OutputDir, terraformOutputJsonFile) + outputFile := filepath.Join(svcInfo.OutputDir, terraformOutputJsonFile) content, err := os.ReadFile(outputFile) if err != nil { return fmt.Errorf("failed to read terraform output file: %w", err) @@ -71,12 +71,12 @@ func addTerraformOutputs(outCtxt ServiceContext) error { return nil } - if outCtxt.CustomProperties == nil { - outCtxt.CustomProperties = make(map[string]any, len(terraformOutputs)) + if svcInfo.CustomProperties == nil { + svcInfo.CustomProperties = make(map[string]any, len(terraformOutputs)) } // Prefix variables names with TF_OUTPUT_ for k, outputs := range terraformOutputs { - outCtxt.CustomProperties[terraformOutputPrefix+k] = outputs.Value + svcInfo.CustomProperties[terraformOutputPrefix+k] = outputs.Value } return nil } @@ -89,7 +89,7 @@ func NewTerraformServiceDeployer(definitionsDir string) (*TerraformServiceDeploy } // SetUp method boots up the Docker Compose with Terraform executor and mounted .tf definitions. -func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceContext) (DeployedService, error) { +func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcInfo ServiceInfo) (DeployedService, error) { logger.Debug("setting up service using Terraform deployer") configDir, err := tsd.installDockerfile() @@ -104,14 +104,14 @@ func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceCo ymlPaths = append(ymlPaths, envYmlPath) } - tfEnvironment := tsd.buildTerraformExecutorEnvironment(svcCtxt) + tfEnvironment := tsd.buildTerraformExecutorEnvironment(svcInfo) service := dockerComposeDeployedService{ ymlPaths: ymlPaths, project: "elastic-package-service", env: tfEnvironment, } - outCtxt := svcCtxt + outCtxt := svcInfo p, err := compose.NewProject(service.project, service.ymlPaths...) if err != nil { @@ -162,7 +162,7 @@ func (tsd TerraformServiceDeployer) SetUp(ctx context.Context, svcCtxt ServiceCo if err != nil { return nil, fmt.Errorf("could not handle terraform output: %w", err) } - service.ctxt = outCtxt + service.svcInfo = outCtxt return &service, nil } diff --git a/internal/servicedeployer/terraform_env.go b/internal/servicedeployer/terraform_env.go index 93256b3fbf..428aefe352 100644 --- a/internal/servicedeployer/terraform_env.go +++ b/internal/servicedeployer/terraform_env.go @@ -20,12 +20,12 @@ const ( envYmlFile = "env.yml" ) -func (tsd TerraformServiceDeployer) buildTerraformExecutorEnvironment(ctxt ServiceContext) []string { +func (tsd TerraformServiceDeployer) buildTerraformExecutorEnvironment(info ServiceInfo) []string { vars := map[string]string{} - vars[serviceLogsDirEnv] = ctxt.Logs.Folder.Local - vars[tfTestRunID] = ctxt.Test.RunID + vars[serviceLogsDirEnv] = info.Logs.Folder.Local + vars[tfTestRunID] = info.Test.RunID vars[tfDir] = tsd.definitionsDir - vars[tfOutputDir] = ctxt.OutputDir + vars[tfOutputDir] = info.OutputDir var pairs []string for k, v := range vars { diff --git a/internal/servicedeployer/terraform_test.go b/internal/servicedeployer/terraform_test.go index c8ee673b0f..3b06c8a977 100644 --- a/internal/servicedeployer/terraform_test.go +++ b/internal/servicedeployer/terraform_test.go @@ -16,7 +16,7 @@ func TestAddTerraformOutputs(t *testing.T) { var testCases = []struct { testName string err string - ctxt ServiceContext + svcInfo ServiceInfo runId string content []byte expectedProps map[string]interface{} @@ -25,7 +25,7 @@ func TestAddTerraformOutputs(t *testing.T) { { testName: "invalid_json_output", runId: "987987", - ctxt: ServiceContext{ + svcInfo: ServiceInfo{ Test: struct{ RunID string }{"987987"}, }, content: []byte( @@ -37,7 +37,7 @@ func TestAddTerraformOutputs(t *testing.T) { { testName: "empty_json_output", runId: "v", - ctxt: ServiceContext{ + svcInfo: ServiceInfo{ Test: struct{ RunID string }{"9887"}, }, content: []byte( @@ -48,7 +48,7 @@ func TestAddTerraformOutputs(t *testing.T) { { testName: "single_value_output", runId: "99999", - ctxt: ServiceContext{ + svcInfo: ServiceInfo{ Test: struct{ RunID string }{"99999"}, }, content: []byte( @@ -67,7 +67,7 @@ func TestAddTerraformOutputs(t *testing.T) { { testName: "multiple_value_output", runId: "23465", - ctxt: ServiceContext{ + svcInfo: ServiceInfo{ Test: struct{ RunID string }{"23465"}, }, content: []byte( @@ -92,7 +92,7 @@ func TestAddTerraformOutputs(t *testing.T) { { testName: "complex_value_output", runId: "078907890", - ctxt: ServiceContext{ + svcInfo: ServiceInfo{ Test: struct{ RunID string }{"078907890"}, }, content: []byte( @@ -138,21 +138,21 @@ func TestAddTerraformOutputs(t *testing.T) { for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { - tc.ctxt.CustomProperties = make(map[string]interface{}) - tc.ctxt.OutputDir = t.TempDir() + tc.svcInfo.CustomProperties = make(map[string]interface{}) + tc.svcInfo.OutputDir = t.TempDir() - if err := os.WriteFile(tc.ctxt.OutputDir+"/tfOutputValues.json", tc.content, 0777); err != nil { + if err := os.WriteFile(tc.svcInfo.OutputDir+"/tfOutputValues.json", tc.content, 0777); err != nil { t.Fatal(err) } // Test that the terraform output values are generated correctly - err := addTerraformOutputs(tc.ctxt) + err := addTerraformOutputs(tc.svcInfo) if tc.expectedError { require.Error(t, err) return } require.NoError(t, err) - assert.Equal(t, tc.expectedProps, tc.ctxt.CustomProperties) + assert.Equal(t, tc.expectedProps, tc.svcInfo.CustomProperties) }) } } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index c434fd01a4..5cae6f7c28 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -200,7 +200,7 @@ func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]tes } serviceOptions := r.createServiceOptions(variant) - serviceContext, err := r.createServiceContext(serviceOptions) + serviceContext, err := r.createServiceInfo(serviceOptions) if err != nil { return result.WithError(err) } @@ -276,20 +276,20 @@ func (r *runner) createServiceOptions(variantName string) servicedeployer.Factor } } -func (r *runner) createServiceContext(serviceOptions servicedeployer.FactoryOptions) (servicedeployer.ServiceContext, error) { - var serviceContext servicedeployer.ServiceContext - serviceContext.Name = r.options.TestFolder.Package - serviceContext.Logs.Folder.Local = r.locationManager.ServiceLogDir() - serviceContext.Logs.Folder.Agent = ServiceLogsAgentDir - serviceContext.Test.RunID = createTestRunID() +func (r *runner) createServiceInfo(serviceOptions servicedeployer.FactoryOptions) (servicedeployer.ServiceInfo, error) { + var svcInfo servicedeployer.ServiceInfo + svcInfo.Name = r.options.TestFolder.Package + svcInfo.Logs.Folder.Local = r.locationManager.ServiceLogDir() + svcInfo.Logs.Folder.Agent = ServiceLogsAgentDir + svcInfo.Test.RunID = createTestRunID() - outputDir, err := servicedeployer.CreateOutputDir(r.locationManager, serviceContext.Test.RunID) + outputDir, err := servicedeployer.CreateOutputDir(r.locationManager, svcInfo.Test.RunID) if err != nil { - return servicedeployer.ServiceContext{}, fmt.Errorf("could not create output dir for terraform deployer %w", err) + return servicedeployer.ServiceInfo{}, fmt.Errorf("could not create output dir for terraform deployer %w", err) } - serviceContext.OutputDir = outputDir + svcInfo.OutputDir = outputDir - return serviceContext, nil + return svcInfo, nil } // TearDown method doesn't perform any global action as the "tear down" is executed per test case. @@ -475,7 +475,7 @@ func (r *runner) run(ctx context.Context) (results []testrunner.TestResult, err func (r *runner) runTestPerVariant(ctx context.Context, result *testrunner.ResultComposer, cfgFile, variantName string) ([]testrunner.TestResult, error) { serviceOptions := r.createServiceOptions(variantName) - serviceContext, err := r.createServiceContext(serviceOptions) + serviceContext, err := r.createServiceInfo(serviceOptions) if err != nil { return result.WithError(err) } @@ -645,7 +645,7 @@ type scenarioTest struct { docs []common.MapStr } -func (r *runner) prepareScenario(ctx context.Context, config *testConfig, serviceContext servicedeployer.ServiceContext, serviceOptions servicedeployer.FactoryOptions) (*scenarioTest, error) { +func (r *runner) prepareScenario(ctx context.Context, config *testConfig, serviceContext servicedeployer.ServiceInfo, serviceOptions servicedeployer.FactoryOptions) (*scenarioTest, error) { var err error var serviceStateData ServiceState if r.options.RunSetup { @@ -701,7 +701,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic if err != nil { return nil, fmt.Errorf("could not setup service: %w", err) } - serviceContext = service.Context() + serviceContext = service.Info() r.shutdownServiceHandler = func() error { logger.Debug("tearing down service...") if err := service.TearDown(ctx); err != nil { @@ -1158,7 +1158,7 @@ func (r *runner) validateTestScenario(result *testrunner.ResultComposer, scenari return result.WithSuccess() } -func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext servicedeployer.ServiceContext, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { +func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext servicedeployer.ServiceInfo, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { result := r.newResult(config.Name()) if config.Skip != nil { @@ -1178,7 +1178,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext return r.validateTestScenario(result, scenario, config, serviceOptions) } -func checkEnrolledAgents(ctx context.Context, client *kibana.Client, serviceContext servicedeployer.ServiceContext) ([]kibana.Agent, error) { +func checkEnrolledAgents(ctx context.Context, client *kibana.Client, serviceContext servicedeployer.ServiceInfo) ([]kibana.Agent, error) { var agents []kibana.Agent enrolled, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { allAgents, err := client.ListAgents() @@ -1585,9 +1585,9 @@ func deleteDataStreamDocs(api *elasticsearch.API, dataStream string) error { return nil } -func filterAgents(allAgents []kibana.Agent, ctx servicedeployer.ServiceContext) []kibana.Agent { - if ctx.Agent.Host.NamePrefix != "" { - logger.Debugf("filter agents using criteria: NamePrefix=%s", ctx.Agent.Host.NamePrefix) +func filterAgents(allAgents []kibana.Agent, svcInfo servicedeployer.ServiceInfo) []kibana.Agent { + if svcInfo.Agent.Host.NamePrefix != "" { + logger.Debugf("filter agents using criteria: NamePrefix=%s", svcInfo.Agent.Host.NamePrefix) } var filtered []kibana.Agent @@ -1596,7 +1596,7 @@ func filterAgents(allAgents []kibana.Agent, ctx servicedeployer.ServiceContext) continue // For some reason Kibana doesn't always return a valid policy revision (eventually it will be present and valid) } - if ctx.Agent.Host.NamePrefix != "" && !strings.HasPrefix(agent.LocalMetadata.Host.Name, ctx.Agent.Host.NamePrefix) { + if svcInfo.Agent.Host.NamePrefix != "" && !strings.HasPrefix(agent.LocalMetadata.Host.Name, svcInfo.Agent.Host.NamePrefix) { continue } filtered = append(filtered, agent) diff --git a/internal/testrunner/runners/system/test_config.go b/internal/testrunner/runners/system/test_config.go index 5a88520d0a..2a319777df 100644 --- a/internal/testrunner/runners/system/test_config.go +++ b/internal/testrunner/runners/system/test_config.go @@ -70,7 +70,7 @@ func (t testConfig) Name() string { return sb.String() } -func newConfig(configFilePath string, ctxt servicedeployer.ServiceContext, serviceVariantName string) (*testConfig, error) { +func newConfig(configFilePath string, svcInfo servicedeployer.ServiceInfo, serviceVariantName string) (*testConfig, error) { data, err := os.ReadFile(configFilePath) if err != nil && errors.Is(err, os.ErrNotExist) { return nil, fmt.Errorf("unable to find system test configuration file: %s: %w", configFilePath, err) @@ -80,7 +80,7 @@ func newConfig(configFilePath string, ctxt servicedeployer.ServiceContext, servi return nil, fmt.Errorf("could not load system test configuration file: %s: %w", configFilePath, err) } - data, err = applyContext(data, ctxt) + data, err = applyServiceInfo(data, svcInfo) if err != nil { return nil, fmt.Errorf("could not apply context to test configuration file: %s: %w", configFilePath, err) } @@ -117,17 +117,17 @@ func listConfigFiles(systemTestFolderPath string) (files []string, err error) { return files, nil } -// applyContext takes the given system test configuration (data) and replaces any placeholder variables in -// it with values from the given context (ctxt). The context may be populated from various sources but usually the +// applyServiceInfo takes the given system test configuration (data) and replaces any placeholder variables in +// it with values from the given service information. The context may be populated from various sources but usually the // most interesting context values will be set by a ServiceDeployer in its SetUp method. -func applyContext(data []byte, ctxt servicedeployer.ServiceContext) ([]byte, error) { +func applyServiceInfo(data []byte, serviceInfo servicedeployer.ServiceInfo) ([]byte, error) { tmpl, err := raymond.Parse(string(data)) if err != nil { return data, fmt.Errorf("parsing template body failed: %w", err) } - tmpl.RegisterHelpers(ctxt.Aliases()) + tmpl.RegisterHelpers(serviceInfo.Aliases()) - result, err := tmpl.Exec(ctxt) + result, err := tmpl.Exec(serviceInfo) if err != nil { return data, fmt.Errorf("could not render data with context: %w", err) } From 5d1c58ff15b6910467f13b3029a5dd2185ea4d4f Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 15 Feb 2024 20:34:06 +0100 Subject: [PATCH 10/32] Add context to more handlers --- internal/testrunner/runners/asset/runner.go | 6 +-- internal/testrunner/runners/system/runner.go | 39 +++++++++++--------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index 7a204d9c3a..2925c31064 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -34,7 +34,7 @@ type runner struct { kibanaClient *kibana.Client // Execution order of following handlers is defined in runner.tearDown() method. - removePackageHandler func() error + removePackageHandler func(context.Context) error } // Ensures that runner implements testrunner.TestRunner interface @@ -107,7 +107,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { return result.WithError(fmt.Errorf("can't install the package: %w", err)) } - r.removePackageHandler = func() error { + r.removePackageHandler = func(context.Context) error { pkgManifest, err := packages.ReadPackageManifestFromPackageRoot(r.packageRootPath) if err != nil { return fmt.Errorf("reading package manifest failed: %w", err) @@ -178,7 +178,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { func (r *runner) TearDown(ctx context.Context) error { if r.removePackageHandler != nil { - if err := r.removePackageHandler(); err != nil { + if err := r.removePackageHandler(ctx); err != nil { return err } } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index 5cae6f7c28..f4c4ef486d 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -110,12 +110,12 @@ type runner struct { serviceStateFilePath string // Execution order of following handlers is defined in runner.TearDown() method. - deleteTestPolicyHandler func() error - deletePackageHandler func() error - resetAgentPolicyHandler func() error - resetAgentLogLevelHandler func() error - shutdownServiceHandler func() error - wipeDataStreamHandler func() error + deleteTestPolicyHandler func(context.Context) error + deletePackageHandler func(context.Context) error + resetAgentPolicyHandler func(context.Context) error + resetAgentLogLevelHandler func(context.Context) error + shutdownServiceHandler func(context.Context) error + wipeDataStreamHandler func(context.Context) error } // Ensures that runner implements testrunner.TestRunner interface @@ -306,43 +306,46 @@ func (r *runner) tearDownTest(ctx context.Context) error { } } + // Avoid cancellations during cleanup. + cleanupCtx := context.WithoutCancel(ctx) + if r.resetAgentPolicyHandler != nil { - if err := r.resetAgentPolicyHandler(); err != nil { + if err := r.resetAgentPolicyHandler(cleanupCtx); err != nil { return err } r.resetAgentPolicyHandler = nil } if r.resetAgentLogLevelHandler != nil { - if err := r.resetAgentLogLevelHandler(); err != nil { + if err := r.resetAgentLogLevelHandler(cleanupCtx); err != nil { return err } r.resetAgentLogLevelHandler = nil } if r.deleteTestPolicyHandler != nil { - if err := r.deleteTestPolicyHandler(); err != nil { + if err := r.deleteTestPolicyHandler(cleanupCtx); err != nil { return err } r.deleteTestPolicyHandler = nil } if r.deletePackageHandler != nil { - if err := r.deletePackageHandler(); err != nil { + if err := r.deletePackageHandler(cleanupCtx); err != nil { return err } r.deletePackageHandler = nil } if r.shutdownServiceHandler != nil { - if err := r.shutdownServiceHandler(); err != nil { + if err := r.shutdownServiceHandler(cleanupCtx); err != nil { return err } r.shutdownServiceHandler = nil } if r.wipeDataStreamHandler != nil { - if err := r.wipeDataStreamHandler(); err != nil { + if err := r.wipeDataStreamHandler(cleanupCtx); err != nil { return err } r.wipeDataStreamHandler = nil @@ -702,7 +705,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil, fmt.Errorf("could not setup service: %w", err) } serviceContext = service.Info() - r.shutdownServiceHandler = func() error { + r.shutdownServiceHandler = func(ctx context.Context) error { logger.Debug("tearing down service...") if err := service.TearDown(ctx); err != nil { return fmt.Errorf("error tearing down service: %w", err) @@ -740,7 +743,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil, fmt.Errorf("failed to install package: %v", err) } } - r.deletePackageHandler = func() error { + r.deletePackageHandler = func(ctx context.Context) error { stackVersion, err := semver.NewVersion(serviceOptions.StackVersion) if err != nil { return fmt.Errorf("failed to parse stack version: %w", err) @@ -786,7 +789,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil, fmt.Errorf("could not create test policy: %w", err) } } - r.deleteTestPolicyHandler = func() error { + r.deleteTestPolicyHandler = func(ctx context.Context) error { logger.Debug("deleting test policy...") if err := r.options.KibanaClient.DeletePolicy(*policy); err != nil { return fmt.Errorf("error cleaning up test policy: %w", err) @@ -820,7 +823,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic ds.Inputs[0].Streams[0].DataStream.Dataset, ) - r.wipeDataStreamHandler = func() error { + r.wipeDataStreamHandler = func(ctx context.Context) error { logger.Debugf("deleting data in data stream...") if err := deleteDataStreamDocs(r.options.API, scenario.dataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -862,7 +865,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic } } // Assign policy to agent - r.resetAgentPolicyHandler = func() error { + r.resetAgentPolicyHandler = func(ctx context.Context) error { logger.Debug("reassigning original policy back to agent...") if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { return fmt.Errorf("error reassigning original policy to agent: %w", err) @@ -885,7 +888,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil, fmt.Errorf("error setting log level debug for agent %s: %w", agent.ID, err) } } - r.resetAgentLogLevelHandler = func() error { + r.resetAgentLogLevelHandler = func(ctx context.Context) error { logger.Debugf("reassigning original log level %q back to agent...", origLogLevel) if err := r.options.KibanaClient.SetAgentLogLevel(agent.ID, origLogLevel); err != nil { From dfa337a82f0e02d90952e0e4f827552b6e7ae29b Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 23 Feb 2024 19:38:49 +0100 Subject: [PATCH 11/32] Handle returned error --- cmd/root.go | 7 ++++--- internal/benchrunner/runners/stream/runner.go | 2 +- main.go | 9 ++++++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 8557d2c88d..a2e401cf92 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -42,9 +42,10 @@ var commands = []*cobraext.Command{ // RootCmd creates and returns root cmd for elastic-package func RootCmd() *cobra.Command { rootCmd := &cobra.Command{ - Use: "elastic-package", - Short: "elastic-package - Command line tool for developing Elastic Integrations", - SilenceUsage: true, + Use: "elastic-package", + Short: "elastic-package - Command line tool for developing Elastic Integrations", + SilenceUsage: true, + SilenceErrors: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { return cobraext.ComposeCommandActions(cmd, args, processPersistentFlags, diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 282e603b3b..fcee139635 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -210,7 +210,7 @@ func (r *runner) run(ctx context.Context) (err error) { return err case <-ctx.Done(): close(r.done) - return nil + return ctx.Err() } } diff --git a/main.go b/main.go index 48e158f4b7..ba9b3def40 100644 --- a/main.go +++ b/main.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/elastic-package/cmd" "github.com/elastic/elastic-package/internal/install" + "github.com/elastic/elastic-package/internal/logger" ) func main() { @@ -27,10 +28,12 @@ func main() { defer cancel() err = rootCmd.ExecuteContext(ctx) - if errIsInterruption(err) { - os.Exit(130) - } if err != nil { + if errIsInterruption(err) { + logger.Info("Signal caught!") + os.Exit(130) + } + logger.Error(rootCmd.ErrPrefix(), err) os.Exit(1) } } From 5822449833312a5e211a3c38ce1d977c9a6c188f Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 23 Feb 2024 19:55:45 +0100 Subject: [PATCH 12/32] Reduce sync primitives in streamer --- internal/benchrunner/runners/stream/runner.go | 60 ++++++++++--------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index fcee139635..78f3d74a68 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -44,10 +44,6 @@ type runner struct { runtimeDataStreams map[string]string generators map[string]genlib.Generator backFillGenerators map[string]genlib.Generator - errChanGenerators chan error - - wg sync.WaitGroup - done chan struct{} // Execution order of following handlers is defined in runner.TearDown() method. removePackageHandler func(context.Context) error @@ -68,8 +64,6 @@ func (r *runner) Run(ctx context.Context) (reporters.Reportable, error) { } func (r *runner) TearDown(ctx context.Context) error { - r.wg.Wait() - if !r.options.PerformCleanup { r.removePackageHandler = nil r.wipeDataStreamHandler = nil @@ -104,8 +98,6 @@ func (r *runner) TearDown(ctx context.Context) error { func (r *runner) setUp(ctx context.Context) error { r.generators = make(map[string]genlib.Generator) r.backFillGenerators = make(map[string]genlib.Generator) - r.errChanGenerators = make(chan error) - r.done = make(chan struct{}) r.runtimeDataStreams = make(map[string]string) @@ -202,16 +194,7 @@ func (r *runner) wipeDataStreamsOnSetup() error { } func (r *runner) run(ctx context.Context) (err error) { - r.streamData() - - select { - case err = <-r.errChanGenerators: - close(r.done) - return err - case <-ctx.Done(): - close(r.done) - return ctx.Err() - } + return r.streamData(ctx) } func (r *runner) installPackage() error { @@ -476,17 +459,26 @@ func (r *runner) performBulkRequest(bulkRequest string) error { return nil } -func (r *runner) streamData() { +func (r *runner) streamData(ctx context.Context) error { logger.Debug("streaming data...") - r.wg.Add(len(r.backFillGenerators) + len(r.generators)) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errC := make(chan error) + defer close(errC) + + var wg sync.WaitGroup + wg.Add(len(r.backFillGenerators) + len(r.generators)) + defer wg.Wait() + for scenarioName, generator := range r.generators { go func(scenarioName string, generator genlib.Generator) { - defer r.wg.Done() + defer wg.Done() ticker := time.NewTicker(r.options.PeriodDuration) indexName := r.runtimeDataStreams[scenarioName] for { select { - case <-r.done: + case <-ctx.Done(): return case <-ticker.C: logger.Debugf("bulk request of %d events on %s...", r.options.EventsPerPeriod, indexName) @@ -500,14 +492,14 @@ func (r *runner) streamData() { } if err != nil { - r.errChanGenerators <- fmt.Errorf("error while generating event for streaming: %w", err) + errC <- fmt.Errorf("error while generating event for streaming: %w", err) return } } err := r.performBulkRequest(bulkBodyBuilder.String()) if err != nil { - r.errChanGenerators <- fmt.Errorf("error performing bulk request: %w", err) + errC <- fmt.Errorf("error performing bulk request: %w", err) return } } @@ -517,7 +509,7 @@ func (r *runner) streamData() { for scenarioName, backFillGenerator := range r.backFillGenerators { go func(scenarioName string, generator genlib.Generator) { - defer r.wg.Done() + defer wg.Done() var bulkBodyBuilder strings.Builder indexName := r.runtimeDataStreams[scenarioName] logger.Debugf("bulk request of %s backfill events on %s...", r.options.BackFill.String(), indexName) @@ -530,18 +522,32 @@ func (r *runner) streamData() { } if err != nil { - r.errChanGenerators <- fmt.Errorf("error while generating event for streaming: %w", err) + errC <- fmt.Errorf("error while generating event for streaming: %w", err) return } } err := r.performBulkRequest(bulkBodyBuilder.String()) if err != nil { - r.errChanGenerators <- fmt.Errorf("error performing bulk request: %w", err) + errC <- fmt.Errorf("error performing bulk request: %w", err) return } }(scenarioName, backFillGenerator) } + + var err error + select { + case <-ctx.Done(): + err = ctx.Err() + case err = <-errC: + // Ensure no other goroutine is blocked sending errors. + go func() { + for range errC { + } + }() + cancel() + } + return err } type benchMeta struct { From 854149a0c758d3f3683ee9fac8b3139f4da5823c Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 23 Feb 2024 20:41:17 +0100 Subject: [PATCH 13/32] Refactor data streamer --- internal/benchrunner/runners/stream/runner.go | 143 ++++++++++-------- 1 file changed, 82 insertions(+), 61 deletions(-) diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 78f3d74a68..2191670065 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -468,71 +468,28 @@ func (r *runner) streamData(ctx context.Context) error { defer close(errC) var wg sync.WaitGroup - wg.Add(len(r.backFillGenerators) + len(r.generators)) defer wg.Wait() - for scenarioName, generator := range r.generators { - go func(scenarioName string, generator genlib.Generator) { + for scenarioName := range r.generators { + wg.Add(1) + go func(scenarioName string) { defer wg.Done() - ticker := time.NewTicker(r.options.PeriodDuration) - indexName := r.runtimeDataStreams[scenarioName] - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - logger.Debugf("bulk request of %d events on %s...", r.options.EventsPerPeriod, indexName) - var bulkBodyBuilder strings.Builder - buf := bytes.NewBufferString("") - for i := uint64(0); i < r.options.EventsPerPeriod; i++ { - var err error - bulkBodyBuilder, err = r.collectBulkRequestBody(indexName, scenarioName, buf, generator, bulkBodyBuilder) - if err == io.EOF { - break - } - - if err != nil { - errC <- fmt.Errorf("error while generating event for streaming: %w", err) - return - } - } - - err := r.performBulkRequest(bulkBodyBuilder.String()) - if err != nil { - errC <- fmt.Errorf("error performing bulk request: %w", err) - return - } - } + err := r.runStreamGenerator(ctx, scenarioName) + if err != nil { + errC <- err } - }(scenarioName, generator) + }(scenarioName) } - for scenarioName, backFillGenerator := range r.backFillGenerators { - go func(scenarioName string, generator genlib.Generator) { + for scenarioName := range r.backFillGenerators { + wg.Add(1) + go func(scenarioName string) { defer wg.Done() - var bulkBodyBuilder strings.Builder - indexName := r.runtimeDataStreams[scenarioName] - logger.Debugf("bulk request of %s backfill events on %s...", r.options.BackFill.String(), indexName) - buf := bytes.NewBufferString("") - for { - var err error - bulkBodyBuilder, err = r.collectBulkRequestBody(indexName, scenarioName, buf, generator, bulkBodyBuilder) - if err == io.EOF { - break - } - - if err != nil { - errC <- fmt.Errorf("error while generating event for streaming: %w", err) - return - } - } - - err := r.performBulkRequest(bulkBodyBuilder.String()) + err := r.runBackfillGenerator(ctx, scenarioName) if err != nil { - errC <- fmt.Errorf("error performing bulk request: %w", err) - return + errC <- err } - }(scenarioName, backFillGenerator) + }(scenarioName) } var err error @@ -540,16 +497,80 @@ func (r *runner) streamData(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() case err = <-errC: - // Ensure no other goroutine is blocked sending errors. - go func() { - for range errC { - } - }() cancel() } + // Ensure no goroutine is blocked sending errors. + go func() { + for range errC { + } + }() return err } +func (r *runner) runStreamGenerator(ctx context.Context, scenarioName string) error { + generator := r.generators[scenarioName] + indexName := r.runtimeDataStreams[scenarioName] + + ticker := time.NewTicker(r.options.PeriodDuration) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + + logger.Debugf("bulk request of %d events on %s...", r.options.EventsPerPeriod, indexName) + var bulkBodyBuilder strings.Builder + buf := bytes.NewBufferString("") + for i := uint64(0); i < r.options.EventsPerPeriod; i++ { + var err error + bulkBodyBuilder, err = r.collectBulkRequestBody(indexName, scenarioName, buf, generator, bulkBodyBuilder) + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return fmt.Errorf("error while generating event for streaming: %w", err) + } + } + + err := r.performBulkRequest(bulkBodyBuilder.String()) + if err != nil { + return fmt.Errorf("error performing bulk request: %w", err) + } + } + + return nil +} + +func (r *runner) runBackfillGenerator(ctx context.Context, scenarioName string) error { + var bulkBodyBuilder strings.Builder + generator := r.backFillGenerators[scenarioName] + indexName := r.runtimeDataStreams[scenarioName] + logger.Debugf("bulk request of %s backfill events on %s...", r.options.BackFill.String(), indexName) + buf := bytes.NewBufferString("") + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + bulkBodyBuilder, err = r.collectBulkRequestBody(indexName, scenarioName, buf, generator, bulkBodyBuilder) + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return fmt.Errorf("error while generating event for streaming: %w", err) + } + } + + return r.performBulkRequest(bulkBodyBuilder.String()) +} + type benchMeta struct { Info struct { Benchmark string `json:"benchmark"` From 638cadde74214eb61c9198b151f37eaf6c8505d8 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 23 Feb 2024 21:24:30 +0100 Subject: [PATCH 14/32] Simplify empty method --- internal/benchrunner/runners/stream/runner.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 2191670065..c5b46a1385 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -193,10 +193,6 @@ func (r *runner) wipeDataStreamsOnSetup() error { return nil } -func (r *runner) run(ctx context.Context) (err error) { - return r.streamData(ctx) -} - func (r *runner) installPackage() error { return r.installPackageFromPackageRoot() } @@ -459,7 +455,7 @@ func (r *runner) performBulkRequest(bulkRequest string) error { return nil } -func (r *runner) streamData(ctx context.Context) error { +func (r *runner) run(ctx context.Context) error { logger.Debug("streaming data...") ctx, cancel := context.WithCancel(ctx) defer cancel() From 4f746a4bdb147b8f3915772a815a23ac23adf501 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 5 Mar 2024 20:12:30 +0100 Subject: [PATCH 15/32] Recover previous periods on wait loops --- internal/benchrunner/runners/rally/runner.go | 2 +- internal/benchrunner/runners/stream/runner.go | 2 +- internal/benchrunner/runners/system/runner.go | 6 +++--- internal/testrunner/runners/system/runner.go | 6 +++--- internal/wait/wait.go | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 97fcf17c32..0069864fa4 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -333,7 +333,7 @@ func (r *runner) setUp(ctx context.Context) error { cleared, err := wait.UntilTrue(ctx, func(context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err - }, 2*time.Minute) + }, 5*time.Second, 2*time.Minute) if err != nil || !cleared { if err == nil { err = errors.New("unable to clear previous data") diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index c5b46a1385..6d4aa9fc60 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -160,7 +160,7 @@ func (r *runner) setUp(ctx context.Context) error { totalHits += hits } return totalHits == 0, nil - }, 2*time.Minute) + }, 5*time.Second, 2*time.Minute) if err != nil || !cleared { if err == nil { err = errors.New("unable to clear previous data") diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 694ffa3a2c..636df6e075 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -218,7 +218,7 @@ func (r *runner) setUp(ctx context.Context) error { cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) return hits == 0, err - }, 2*time.Minute) + }, 5*time.Second, 2*time.Minute) if err != nil || !cleared { if err == nil { err = errors.New("unable to clear previous data") @@ -632,7 +632,7 @@ func (r *runner) checkEnrolledAgents(ctx context.Context) ([]kibana.Agent, error } return true, nil - }, 5*time.Minute) + }, 5*time.Second, 5*time.Minute) if err != nil { return nil, fmt.Errorf("agent enrollment failed: %w", err) } @@ -672,7 +672,7 @@ func (r *runner) waitUntilBenchmarkFinishes(ctx context.Context) (bool, error) { } return ret, err - }, *r.scenario.WaitForDataTimeout) + }, 5*time.Second, *r.scenario.WaitForDataTimeout) } func (r *runner) enrollAgents(ctx context.Context) error { diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index f4c4ef486d..af5a6c4c16 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -955,7 +955,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic } return hits.size() > 0, nil - }, waitForDataTimeout) + }, 1*time.Second, waitForDataTimeout) if config.Service != "" && !config.IgnoreServiceError { exited, code, err := service.ExitCode(ctx, config.Service) @@ -1072,7 +1072,7 @@ func (r *runner) deleteOldDocumentsDataStreamAndWait(ctx context.Context, dataSt return hits.size() == 0, nil } return startHits.size() > hits.size(), nil - }, 2*time.Minute) + }, 1*time.Second, 2*time.Minute) if err != nil || !cleared { if err == nil { err = errors.New("unable to clear previous data") @@ -1195,7 +1195,7 @@ func checkEnrolledAgents(ctx context.Context, client *kibana.Client, serviceCont return false, nil // selected agents are unavailable yet } return true, nil - }, 5*time.Minute) + }, 1*time.Second, 5*time.Minute) if err != nil { return nil, fmt.Errorf("agent enrollment failed: %w", err) } diff --git a/internal/wait/wait.go b/internal/wait/wait.go index c75a36374c..b826573911 100644 --- a/internal/wait/wait.go +++ b/internal/wait/wait.go @@ -10,11 +10,11 @@ import ( ) // UntilTrue waits till the context is cancelled or the given function returns an error or true. -func UntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), timeout time.Duration) (bool, error) { +func UntilTrue(ctx context.Context, fn func(ctx context.Context) (bool, error), period, timeout time.Duration) (bool, error) { timeoutTimer := time.NewTimer(timeout) defer timeoutTimer.Stop() - retryTicker := time.NewTicker(1 * time.Second) + retryTicker := time.NewTicker(period) defer retryTicker.Stop() for { From ae9ca78a9172f77c9dca64c473079f94219e2b96 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 5 Mar 2024 21:01:49 +0100 Subject: [PATCH 16/32] Pass contexts to ES requests in system test runner --- internal/testrunner/runners/system/runner.go | 46 +++++++++++--------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index af5a6c4c16..c8f7e931a1 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -240,7 +240,7 @@ func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]tes if err != nil { return result.WithError(fmt.Errorf("failed to prepare scenario: %w", err)) } - return r.validateTestScenario(result, scenario, testConfig, serviceOptions) + return r.validateTestScenario(ctx, result, scenario, testConfig, serviceOptions) } if r.options.RunTearDown { @@ -506,8 +506,9 @@ func createTestRunID() string { return fmt.Sprintf("%d", rand.Intn(testRunMaxID-testRunMinID)+testRunMinID) } -func (r *runner) isSyntheticsEnabled(dataStream, componentTemplatePackage string) (bool, error) { +func (r *runner) isSyntheticsEnabled(ctx context.Context, dataStream, componentTemplatePackage string) (bool, error) { resp, err := r.options.API.Cluster.GetComponentTemplate( + r.options.API.Cluster.GetComponentTemplate.WithContext(ctx), r.options.API.Cluster.GetComponentTemplate.WithName(componentTemplatePackage), ) if err != nil { @@ -577,8 +578,9 @@ func (h hits) size() int { return len(h.Source) } -func (r *runner) getDocs(dataStream string) (*hits, error) { +func (r *runner) getDocs(ctx context.Context, dataStream string) (*hits, error) { resp, err := r.options.API.Search( + r.options.API.Search.WithContext(ctx), r.options.API.Search.WithIndex(dataStream), r.options.API.Search.WithSort("@timestamp:asc"), r.options.API.Search.WithSize(elasticsearchQuerySize), @@ -825,7 +827,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic r.wipeDataStreamHandler = func(ctx context.Context) error { logger.Debugf("deleting data in data stream...") - if err := deleteDataStreamDocs(r.options.API, scenario.dataStream); err != nil { + if err := deleteDataStreamDocs(ctx, r.options.API, scenario.dataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) } return nil @@ -935,7 +937,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic oldHits := 0 passed, waitErr := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error - hits, err = r.getDocs(scenario.dataStream) + hits, err = r.getDocs(ctx, scenario.dataStream) if err != nil { return false, err } @@ -976,7 +978,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic } logger.Debugf("check whether or not synthetics is enabled (component template %s)...", componentTemplatePackage) - scenario.syntheticEnabled, err = r.isSyntheticsEnabled(scenario.dataStream, componentTemplatePackage) + scenario.syntheticEnabled, err = r.isSyntheticsEnabled(ctx, scenario.dataStream, componentTemplatePackage) if err != nil { return nil, fmt.Errorf("failed to check if synthetic source is enabled: %w", err) } @@ -1055,15 +1057,15 @@ func (r *runner) writeScenarioState(currentPolicy, origPolicy *kibana.Policy, co func (r *runner) deleteOldDocumentsDataStreamAndWait(ctx context.Context, dataStream string, mustBeZero bool) error { logger.Debugf("Delete previous documents in data stream %q", dataStream) - if err := deleteDataStreamDocs(r.options.API, dataStream); err != nil { + if err := deleteDataStreamDocs(ctx, r.options.API, dataStream); err != nil { return fmt.Errorf("error deleting old data in data stream: %s: %w", dataStream, err) } - startHits, err := r.getDocs(dataStream) + startHits, err := r.getDocs(ctx, dataStream) if err != nil { return err } - cleared, err := wait.UntilTrue(ctx, func(context.Context) (bool, error) { - hits, err := r.getDocs(dataStream) + cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { + hits, err := r.getDocs(ctx, dataStream) if err != nil { return false, err } @@ -1082,7 +1084,7 @@ func (r *runner) deleteOldDocumentsDataStreamAndWait(ctx context.Context, dataSt return nil } -func (r *runner) validateTestScenario(result *testrunner.ResultComposer, scenario *scenarioTest, config *testConfig, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { +func (r *runner) validateTestScenario(ctx context.Context, result *testrunner.ResultComposer, scenario *scenarioTest, config *testConfig, serviceOptions servicedeployer.FactoryOptions) ([]testrunner.TestResult, error) { // Validate fields in docs // when reroute processors are used, expectedDatasets should be set depends on the processor config var expectedDatasets []string @@ -1154,7 +1156,7 @@ func (r *runner) validateTestScenario(result *testrunner.ResultComposer, scenari } // Check transforms if present - if err := r.checkTransforms(config, scenario.pkgManifest, scenario.kibanaDataStream, scenario.dataStream); err != nil { + if err := r.checkTransforms(ctx, config, scenario.pkgManifest, scenario.kibanaDataStream, scenario.dataStream); err != nil { return result.WithError(err) } @@ -1178,7 +1180,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext return result.WithError(err) } - return r.validateTestScenario(result, scenario, config, serviceOptions) + return r.validateTestScenario(ctx, result, scenario, config, serviceOptions) } func checkEnrolledAgents(ctx context.Context, client *kibana.Client, serviceContext servicedeployer.ServiceInfo) ([]kibana.Agent, error) { @@ -1452,7 +1454,7 @@ func selectPolicyTemplateByName(policies []packages.PolicyTemplate, name string) return packages.PolicyTemplate{}, fmt.Errorf("policy template %q not found", name) } -func (r *runner) checkTransforms(config *testConfig, pkgManifest *packages.PackageManifest, ds kibana.PackageDataStream, dataStream string) error { +func (r *runner) checkTransforms(ctx context.Context, config *testConfig, pkgManifest *packages.PackageManifest, ds kibana.PackageDataStream, dataStream string) error { transforms, err := packages.ReadTransformsFromPackageRoot(r.options.PackageRootPath) if err != nil { return fmt.Errorf("loading transforms for package failed (root: %s): %w", r.options.PackageRootPath, err) @@ -1477,7 +1479,7 @@ func (r *runner) checkTransforms(config *testConfig, pkgManifest *packages.Packa transform.Name, transform.Definition.Meta.FleetTransformVersion, ) - transformId, err := r.getTransformId(transformPattern) + transformId, err := r.getTransformId(ctx, transformPattern) if err != nil { return fmt.Errorf("failed to determine transform ID: %w", err) } @@ -1485,7 +1487,7 @@ func (r *runner) checkTransforms(config *testConfig, pkgManifest *packages.Packa // Using the preview instead of checking the actual index because // transforms with retention policies may be deleting the documents based // on old fixtures as soon as they are indexed. - transformDocs, err := r.previewTransform(transformId) + transformDocs, err := r.previewTransform(ctx, transformId) if err != nil { return fmt.Errorf("failed to preview transform %q: %w", transformId, err) } @@ -1510,8 +1512,9 @@ func (r *runner) checkTransforms(config *testConfig, pkgManifest *packages.Packa return nil } -func (r *runner) getTransformId(transformPattern string) (string, error) { +func (r *runner) getTransformId(ctx context.Context, transformPattern string) (string, error) { resp, err := r.options.API.TransformGetTransform( + r.options.API.TransformGetTransform.WithContext(ctx), r.options.API.TransformGetTransform.WithTransformID(transformPattern), ) if err != nil { @@ -1545,8 +1548,9 @@ func (r *runner) getTransformId(transformPattern string) (string, error) { return id, nil } -func (r *runner) previewTransform(transformId string) ([]common.MapStr, error) { +func (r *runner) previewTransform(ctx context.Context, transformId string) ([]common.MapStr, error) { resp, err := r.options.API.TransformPreviewTransform( + r.options.API.TransformPreviewTransform.WithContext(ctx), r.options.API.TransformPreviewTransform.WithTransformID(transformId), ) if err != nil { @@ -1569,9 +1573,11 @@ func (r *runner) previewTransform(transformId string) ([]common.MapStr, error) { return preview.Documents, nil } -func deleteDataStreamDocs(api *elasticsearch.API, dataStream string) error { +func deleteDataStreamDocs(ctx context.Context, api *elasticsearch.API, dataStream string) error { body := strings.NewReader(`{ "query": { "match_all": {} } }`) - resp, err := api.DeleteByQuery([]string{dataStream}, body) + resp, err := api.DeleteByQuery([]string{dataStream}, body, + api.DeleteByQuery.WithContext(ctx), + ) if err != nil { return fmt.Errorf("failed to delete data stream docs: %w", err) } From 0f7f60823f1bdf40e8c7a3e40669af9bb279f1be Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 5 Mar 2024 21:37:46 +0100 Subject: [PATCH 17/32] Add contexts to kibana --- cmd/benchmark.go | 4 --- cmd/dump.go | 2 +- cmd/edit.go | 4 +-- cmd/export.go | 9 ++--- cmd/install.go | 4 +-- cmd/uninstall.go | 2 +- internal/benchrunner/runners/rally/runner.go | 24 +++++++------- internal/benchrunner/runners/stream/runner.go | 14 ++++---- internal/benchrunner/runners/system/runner.go | 20 +++++------ internal/dump/agentpolicies.go | 4 +-- internal/export/dashboards.go | 9 ++--- internal/kibana/agents.go | 12 +++---- internal/kibana/client.go | 28 ++++++++-------- internal/kibana/client_test.go | 7 ++-- internal/kibana/dashboards.go | 5 +-- internal/kibana/fleet.go | 17 +++++----- internal/kibana/packages.go | 17 +++++----- internal/kibana/policies.go | 33 ++++++++++--------- internal/kibana/savedobjects.go | 23 ++++++------- internal/kibana/savedobjects_test.go | 9 ++--- internal/kibana/status.go | 9 ++--- internal/packages/installer/factory.go | 7 ++-- internal/packages/installer/installer.go | 11 ++++--- internal/packages/installer/zip_installer.go | 11 ++++--- internal/serverless/project.go | 30 ++++++++--------- internal/stack/serverless.go | 10 +++--- internal/testrunner/runners/asset/runner.go | 8 ++--- internal/testrunner/runners/system/runner.go | 18 +++++----- 28 files changed, 181 insertions(+), 170 deletions(-) diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 93108284e8..09e8dcdee7 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -210,10 +210,6 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { results = append(results, r) } - if err != nil { - return fmt.Errorf("error running package pipeline benchmarks: %w", err) - } - for _, report := range results { if err := reporters.WriteReportable(reporters.Output(reportOutput), report); err != nil { return fmt.Errorf("error writing benchmark report: %w", err) diff --git a/cmd/dump.go b/cmd/dump.go index 75ae705242..4515ef6652 100644 --- a/cmd/dump.go +++ b/cmd/dump.go @@ -96,7 +96,7 @@ func dumpInstalledObjectsCmdAction(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("failed to initialize Kibana client: %w", err) } - installedPackage, err := kibanaClient.GetPackage(packageName) + installedPackage, err := kibanaClient.GetPackage(cmd.Context(), packageName) if err != nil { return fmt.Errorf("failed to get package status: %w", err) } diff --git a/cmd/edit.go b/cmd/edit.go index 066edc6787..38fc7d0168 100644 --- a/cmd/edit.go +++ b/cmd/edit.go @@ -93,7 +93,7 @@ func editDashboardsCmd(cmd *cobra.Command, args []string) error { } if len(dashboardIDs) == 0 { - dashboardIDs, err = promptDashboardIDs(kibanaClient) + dashboardIDs, err = promptDashboardIDs(cmd.Context(), kibanaClient) if err != nil { return fmt.Errorf("prompt for dashboard selection failed: %w", err) } @@ -107,7 +107,7 @@ func editDashboardsCmd(cmd *cobra.Command, args []string) error { updatedDashboardIDs := make([]string, 0, len(dashboardIDs)) failedDashboardUpdates := make(map[string]error, len(dashboardIDs)) for _, dashboardID := range dashboardIDs { - err = kibanaClient.SetManagedSavedObject("dashboard", dashboardID, false) + err = kibanaClient.SetManagedSavedObject(cmd.Context(), "dashboard", dashboardID, false) if err != nil { failedDashboardUpdates[dashboardID] = err } else { diff --git a/cmd/export.go b/cmd/export.go index b3c1bbd58c..da2adb56db 100644 --- a/cmd/export.go +++ b/cmd/export.go @@ -5,6 +5,7 @@ package cmd import ( + "context" "fmt" "github.com/AlecAivazis/survey/v2" @@ -93,7 +94,7 @@ func exportDashboardsCmd(cmd *cobra.Command, args []string) error { } if len(dashboardIDs) == 0 { - dashboardIDs, err = promptDashboardIDs(kibanaClient) + dashboardIDs, err = promptDashboardIDs(cmd.Context(), kibanaClient) if err != nil { return fmt.Errorf("prompt for dashboard selection failed: %w", err) } @@ -104,7 +105,7 @@ func exportDashboardsCmd(cmd *cobra.Command, args []string) error { } } - err = export.Dashboards(kibanaClient, dashboardIDs) + err = export.Dashboards(cmd.Context(), kibanaClient, dashboardIDs) if err != nil { return fmt.Errorf("dashboards export failed: %w", err) } @@ -113,8 +114,8 @@ func exportDashboardsCmd(cmd *cobra.Command, args []string) error { return nil } -func promptDashboardIDs(kibanaClient *kibana.Client) ([]string, error) { - savedDashboards, err := kibanaClient.FindDashboards() +func promptDashboardIDs(ctx context.Context, kibanaClient *kibana.Client) ([]string, error) { + savedDashboards, err := kibanaClient.FindDashboards(ctx) if err != nil { return nil, fmt.Errorf("finding dashboards failed: %w", err) } diff --git a/cmd/install.go b/cmd/install.go index 7dc781560d..b8ebcb03a5 100644 --- a/cmd/install.go +++ b/cmd/install.go @@ -90,7 +90,7 @@ func installCommandAction(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't process check-condition flag: %w", err) } if len(keyValuePairs) > 0 { - manifest, err := installer.Manifest() + manifest, err := installer.Manifest(cmd.Context()) if err != nil { return err } @@ -105,6 +105,6 @@ func installCommandAction(cmd *cobra.Command, _ []string) error { return nil } - _, err = installer.Install() + _, err = installer.Install(cmd.Context()) return err } diff --git a/cmd/uninstall.go b/cmd/uninstall.go index 7e44522fd2..1624d93c30 100644 --- a/cmd/uninstall.go +++ b/cmd/uninstall.go @@ -59,7 +59,7 @@ func uninstallCommandAction(cmd *cobra.Command, args []string) error { // Uninstall the package cmd.Println("Uninstall the package") - err = packageInstaller.Uninstall() + err = packageInstaller.Uninstall(cmd.Context()) if err != nil { return fmt.Errorf("can't uninstall the package: %w", err) } diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 0069864fa4..cd55f30a2f 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -271,7 +271,7 @@ func (r *runner) setUp(ctx context.Context) error { } r.scenario = scenario - if err = r.installPackage(); err != nil { + if err = r.installPackage(ctx); err != nil { return fmt.Errorf("error installing package: %w", err) } @@ -451,28 +451,28 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro return createReport(r.options.BenchName, r.corpusFile, r.scenario, msum, rallyStats) } -func (r *runner) installPackage() error { +func (r *runner) installPackage(ctx context.Context) error { if len(r.options.PackageVersion) > 0 { r.scenario.Package = r.options.PackageName r.scenario.Version = r.options.PackageVersion - return r.installPackageFromRegistry(r.options.PackageName, r.options.PackageVersion) + return r.installPackageFromRegistry(ctx, r.options.PackageName, r.options.PackageVersion) } - return r.installPackageFromPackageRoot() + return r.installPackageFromPackageRoot(ctx) } -func (r *runner) installPackageFromRegistry(packageName, packageVersion string) error { +func (r *runner) installPackageFromRegistry(ctx context.Context, packageName, packageVersion string) error { // POST /epm/packages/{pkgName}/{pkgVersion} // Configure package (single data stream) via Ingest Manager APIs. logger.Debug("installing package...") - _, err := r.options.KibanaClient.InstallPackage(packageName, packageVersion) + _, err := r.options.KibanaClient.InstallPackage(ctx, packageName, packageVersion) if err != nil { return fmt.Errorf("cannot install package %s@%s: %w", packageName, packageVersion, err) } - r.removePackageHandler = func(context.Context) error { + r.removePackageHandler = func(ctx context.Context) error { logger.Debug("removing benchmark package...") - if _, err := r.options.KibanaClient.RemovePackage(packageName, packageVersion); err != nil { + if _, err := r.options.KibanaClient.RemovePackage(ctx, packageName, packageVersion); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } return nil @@ -481,7 +481,7 @@ func (r *runner) installPackageFromRegistry(packageName, packageVersion string) return nil } -func (r *runner) installPackageFromPackageRoot() error { +func (r *runner) installPackageFromPackageRoot(ctx context.Context) error { logger.Debug("Installing package...") installer, err := installer.NewForPackage(installer.Options{ Kibana: r.options.KibanaClient, @@ -493,13 +493,13 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to initialize package installer: %w", err) } - _, err = installer.Install() + _, err = installer.Install(ctx) if err != nil { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func(context.Context) error { - if err := installer.Uninstall(); err != nil { + r.removePackageHandler = func(ctx context.Context) error { + if err := installer.Uninstall(ctx); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 6d4aa9fc60..20824771ef 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -114,7 +114,7 @@ func (r *runner) setUp(ctx context.Context) error { } r.scenarios = scenarios - if err = r.installPackage(); err != nil { + if err = r.installPackage(ctx); err != nil { return fmt.Errorf("error installing package: %w", err) } @@ -193,11 +193,11 @@ func (r *runner) wipeDataStreamsOnSetup() error { return nil } -func (r *runner) installPackage() error { - return r.installPackageFromPackageRoot() +func (r *runner) installPackage(ctx context.Context) error { + return r.installPackageFromPackageRoot(ctx) } -func (r *runner) installPackageFromPackageRoot() error { +func (r *runner) installPackageFromPackageRoot(ctx context.Context) error { logger.Debug("Installing package...") installer, err := installer.NewForPackage(installer.Options{ Kibana: r.options.KibanaClient, @@ -209,13 +209,13 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to initialize package installer: %w", err) } - _, err = installer.Install() + _, err = installer.Install(ctx) if err != nil { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func(context.Context) error { - if err := installer.Uninstall(); err != nil { + r.removePackageHandler = func(ctx context.Context) error { + if err := installer.Uninstall(ctx); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 636df6e075..ef683cdfde 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -170,7 +170,7 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("reading package manifest failed: %w", err) } - policy, err := r.createBenchmarkPolicy(pkgManifest) + policy, err := r.createBenchmarkPolicy(ctx, pkgManifest) if err != nil { return err } @@ -354,7 +354,7 @@ func (r *runner) deleteDataStreamDocs(dataStream string) error { return nil } -func (r *runner) createBenchmarkPolicy(pkgManifest *packages.PackageManifest) (*kibana.Policy, error) { +func (r *runner) createBenchmarkPolicy(ctx context.Context, pkgManifest *packages.PackageManifest) (*kibana.Policy, error) { // Configure package (single data stream) via Ingest Manager APIs. logger.Debug("creating benchmark policy...") benchTime := time.Now().Format("20060102T15:04:05Z") @@ -370,12 +370,12 @@ func (r *runner) createBenchmarkPolicy(pkgManifest *packages.PackageManifest) (* p.DataOutputID = "fleet-logstash-output" } - policy, err := r.options.KibanaClient.CreatePolicy(p) + policy, err := r.options.KibanaClient.CreatePolicy(ctx, p) if err != nil { return nil, err } - packagePolicy, err := r.createPackagePolicy(pkgManifest, policy) + packagePolicy, err := r.createPackagePolicy(ctx, pkgManifest, policy) if err != nil { return nil, err } @@ -384,12 +384,12 @@ func (r *runner) createBenchmarkPolicy(pkgManifest *packages.PackageManifest) (* var merr multierror.Error logger.Debug("deleting benchmark package policy...") - if err := r.options.KibanaClient.DeletePackagePolicy(*packagePolicy); err != nil { + if err := r.options.KibanaClient.DeletePackagePolicy(ctx, *packagePolicy); err != nil { merr = append(merr, fmt.Errorf("error cleaning up benchmark package policy: %w", err)) } logger.Debug("deleting benchmark policy...") - if err := r.options.KibanaClient.DeletePolicy(*policy); err != nil { + if err := r.options.KibanaClient.DeletePolicy(ctx, *policy); err != nil { merr = append(merr, fmt.Errorf("error cleaning up benchmark policy: %w", err)) } @@ -403,7 +403,7 @@ func (r *runner) createBenchmarkPolicy(pkgManifest *packages.PackageManifest) (* return policy, nil } -func (r *runner) createPackagePolicy(pkgManifest *packages.PackageManifest, p *kibana.Policy) (*kibana.PackagePolicy, error) { +func (r *runner) createPackagePolicy(ctx context.Context, pkgManifest *packages.PackageManifest, p *kibana.Policy) (*kibana.PackagePolicy, error) { logger.Debug("creating package policy...") if r.scenario.Version == "" { @@ -438,7 +438,7 @@ func (r *runner) createPackagePolicy(pkgManifest *packages.PackageManifest, p *k pp.Package.Name = pkgManifest.Name pp.Package.Version = r.scenario.Version - policy, err := r.options.KibanaClient.CreatePackagePolicy(pp) + policy, err := r.options.KibanaClient.CreatePackagePolicy(ctx, pp) if err != nil { return nil, err } @@ -621,7 +621,7 @@ func (r *runner) runGenerator(destDir string) error { func (r *runner) checkEnrolledAgents(ctx context.Context) ([]kibana.Agent, error) { var agents []kibana.Agent enrolled, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { - allAgents, err := r.options.KibanaClient.ListAgents() + allAgents, err := r.options.KibanaClient.ListAgents(ctx) if err != nil { return false, fmt.Errorf("could not list agents: %w", err) } @@ -697,7 +697,7 @@ func (r *runner) enrollAgents(ctx context.Context) error { return nil } - policyWithDataStream, err := r.options.KibanaClient.GetPolicy(r.benchPolicy.ID) + policyWithDataStream, err := r.options.KibanaClient.GetPolicy(ctx, r.benchPolicy.ID) if err != nil { return fmt.Errorf("could not read the policy with data stream: %w", err) } diff --git a/internal/dump/agentpolicies.go b/internal/dump/agentpolicies.go index b2746c13a6..b56fd46870 100644 --- a/internal/dump/agentpolicies.go +++ b/internal/dump/agentpolicies.go @@ -42,7 +42,7 @@ func NewAgentPoliciesDumper(client *kibana.Client) *AgentPoliciesDumper { } func (d *AgentPoliciesDumper) getAgentPolicy(ctx context.Context, name string) (*AgentPolicy, error) { - policy, err := d.client.GetRawPolicy(name) + policy, err := d.client.GetRawPolicy(ctx, name) if err != nil { return nil, err } @@ -86,7 +86,7 @@ func getPackagesUsingAgentPolicy(packagePolicies []packagePolicy) []string { } func (d *AgentPoliciesDumper) getAgentPoliciesFilteredByPackage(ctx context.Context, packageName string) ([]AgentPolicy, error) { - rawPolicies, err := d.client.ListRawPolicies() + rawPolicies, err := d.client.ListRawPolicies(ctx) if err != nil { return nil, err diff --git a/internal/export/dashboards.go b/internal/export/dashboards.go index 9cca9b2164..df57b85ac4 100644 --- a/internal/export/dashboards.go +++ b/internal/export/dashboards.go @@ -5,6 +5,7 @@ package export import ( + "context" "encoding/json" "fmt" "os" @@ -20,7 +21,7 @@ import ( // Dashboards method exports selected dashboards with references objects. All Kibana objects are saved to local files // in appropriate directories. -func Dashboards(kibanaClient *kibana.Client, dashboardsIDs []string) error { +func Dashboards(ctx context.Context, kibanaClient *kibana.Client, dashboardsIDs []string) error { packageRoot, err := packages.MustFindPackageRoot() if err != nil { return fmt.Errorf("locating package root failed: %w", err) @@ -40,16 +41,16 @@ func Dashboards(kibanaClient *kibana.Client, dashboardsIDs []string) error { return fmt.Errorf("cannot import from this Kibana version: %w", err) } - objects, err := kibanaClient.Export(dashboardsIDs) + objects, err := kibanaClient.Export(ctx, dashboardsIDs) if err != nil { return fmt.Errorf("exporting dashboards using Kibana client failed: %w", err) } - ctx := &transformationContext{ + transformContext := &transformationContext{ packageName: m.Name, } - objects, err = applyTransformations(ctx, objects) + objects, err = applyTransformations(transformContext, objects) if err != nil { return fmt.Errorf("can't transform Kibana objects: %w", err) } diff --git a/internal/kibana/agents.go b/internal/kibana/agents.go index 421ece0baa..725a9df20a 100644 --- a/internal/kibana/agents.go +++ b/internal/kibana/agents.go @@ -46,8 +46,8 @@ func (a *Agent) String() string { } // ListAgents returns the list of agents enrolled with Fleet. -func (c *Client) ListAgents() ([]Agent, error) { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/agents", FleetAPI)) +func (c *Client) ListAgents(ctx context.Context) ([]Agent, error) { + statusCode, respBody, err := c.get(ctx, fmt.Sprintf("%s/agents", FleetAPI)) if err != nil { return nil, fmt.Errorf("could not list agents: %w", err) } @@ -72,7 +72,7 @@ func (c *Client) AssignPolicyToAgent(ctx context.Context, a Agent, p Policy) err reqBody := `{ "policy_id": "` + p.ID + `" }` path := fmt.Sprintf("%s/agents/%s/reassign", FleetAPI, a.ID) - statusCode, respBody, err := c.put(path, []byte(reqBody)) + statusCode, respBody, err := c.put(ctx, path, []byte(reqBody)) if err != nil { return fmt.Errorf("could not assign policy to agent: %w", err) } @@ -95,7 +95,7 @@ func (c *Client) waitUntilPolicyAssigned(ctx context.Context, a Agent, p Policy) defer ticker.Stop() for { - agent, err := c.getAgent(a.ID) + agent, err := c.getAgent(ctx, a.ID) if err != nil { return fmt.Errorf("can't get the agent: %w", err) } @@ -118,8 +118,8 @@ func (c *Client) waitUntilPolicyAssigned(ctx context.Context, a Agent, p Policy) return nil } -func (c *Client) getAgent(agentID string) (*Agent, error) { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/agents/%s", FleetAPI, agentID)) +func (c *Client) getAgent(ctx context.Context, agentID string) (*Agent, error) { + statusCode, respBody, err := c.get(ctx, fmt.Sprintf("%s/agents/%s", FleetAPI, agentID)) if err != nil { return nil, fmt.Errorf("could not list agents: %w", err) } diff --git a/internal/kibana/client.go b/internal/kibana/client.go index b02ebe707b..6d4db93ce7 100644 --- a/internal/kibana/client.go +++ b/internal/kibana/client.go @@ -6,6 +6,7 @@ package kibana import ( "bytes" + "context" "crypto/tls" "errors" "fmt" @@ -65,7 +66,8 @@ func NewClient(opts ...ClientOption) (*Client, error) { // Allow to initialize version from tests. var zeroVersion VersionInfo if c.semver == nil || c.versionInfo == zeroVersion { - v, err := c.requestStatus() + // Passing a nil context here because we are on initialization. + v, err := c.requestStatus(context.Background()) if err != nil { return nil, fmt.Errorf("failed to get Kibana version: %w", err) } @@ -134,24 +136,24 @@ func HTTPClientSetup(setup func(*http.Client) *http.Client) ClientOption { } } -func (c *Client) get(resourcePath string) (int, []byte, error) { - return c.SendRequest(http.MethodGet, resourcePath, nil) +func (c *Client) get(ctx context.Context, resourcePath string) (int, []byte, error) { + return c.SendRequest(ctx, http.MethodGet, resourcePath, nil) } -func (c *Client) post(resourcePath string, body []byte) (int, []byte, error) { - return c.SendRequest(http.MethodPost, resourcePath, body) +func (c *Client) post(ctx context.Context, resourcePath string, body []byte) (int, []byte, error) { + return c.SendRequest(ctx, http.MethodPost, resourcePath, body) } -func (c *Client) put(resourcePath string, body []byte) (int, []byte, error) { - return c.SendRequest(http.MethodPut, resourcePath, body) +func (c *Client) put(ctx context.Context, resourcePath string, body []byte) (int, []byte, error) { + return c.SendRequest(ctx, http.MethodPut, resourcePath, body) } -func (c *Client) delete(resourcePath string) (int, []byte, error) { - return c.SendRequest(http.MethodDelete, resourcePath, nil) +func (c *Client) delete(ctx context.Context, resourcePath string) (int, []byte, error) { + return c.SendRequest(ctx, http.MethodDelete, resourcePath, nil) } -func (c *Client) SendRequest(method, resourcePath string, body []byte) (int, []byte, error) { - request, err := c.newRequest(method, resourcePath, bytes.NewReader(body)) +func (c *Client) SendRequest(ctx context.Context, method, resourcePath string, body []byte) (int, []byte, error) { + request, err := c.newRequest(ctx, method, resourcePath, bytes.NewReader(body)) if err != nil { return 0, nil, err } @@ -159,7 +161,7 @@ func (c *Client) SendRequest(method, resourcePath string, body []byte) (int, []b return c.doRequest(request) } -func (c *Client) newRequest(method, resourcePath string, reqBody io.Reader) (*http.Request, error) { +func (c *Client) newRequest(ctx context.Context, method, resourcePath string, reqBody io.Reader) (*http.Request, error) { base, err := url.Parse(c.host) if err != nil { return nil, fmt.Errorf("could not create base URL from host: %v: %w", c.host, err) @@ -175,7 +177,7 @@ func (c *Client) newRequest(method, resourcePath string, reqBody io.Reader) (*ht logger.Debugf("%s %s", method, u) - req, err := http.NewRequest(method, u.String(), reqBody) + req, err := http.NewRequestWithContext(ctx, method, u.String(), reqBody) if err != nil { return nil, fmt.Errorf("could not create %v request to Kibana API resource: %s: %w", method, resourcePath, err) } diff --git a/internal/kibana/client_test.go b/internal/kibana/client_test.go index 7a63ea57fd..820289853a 100644 --- a/internal/kibana/client_test.go +++ b/internal/kibana/client_test.go @@ -6,6 +6,7 @@ package kibana import ( "bytes" + "context" "crypto/x509" "encoding/pem" "fmt" @@ -37,7 +38,7 @@ func TestClientWithTLS(t *testing.T) { client, err := NewClient(version, Address(server.URL)) require.NoError(t, err) - _, _, err = client.get("/") + _, _, err = client.get(context.Background(), "/") assert.Error(t, err) }) @@ -45,7 +46,7 @@ func TestClientWithTLS(t *testing.T) { client, err := NewClient(version, Address(server.URL), CertificateAuthority(caCertFile)) require.NoError(t, err) - _, _, err = client.get("/") + _, _, err = client.get(context.Background(), "/") assert.NoError(t, err) }) @@ -53,7 +54,7 @@ func TestClientWithTLS(t *testing.T) { client, err := NewClient(version, Address(server.URL), TLSSkipVerify()) require.NoError(t, err) - _, _, err = client.get("/") + _, _, err = client.get(context.Background(), "/") assert.NoError(t, err) }) } diff --git a/internal/kibana/dashboards.go b/internal/kibana/dashboards.go index 38988a94ce..87a7aeebb4 100644 --- a/internal/kibana/dashboards.go +++ b/internal/kibana/dashboards.go @@ -5,6 +5,7 @@ package kibana import ( + "context" "encoding/json" "errors" "fmt" @@ -21,7 +22,7 @@ type exportedType struct { } // Export method exports selected dashboards using the Kibana Export API. -func (c *Client) Export(dashboardIDs []string) ([]common.MapStr, error) { +func (c *Client) Export(ctx context.Context, dashboardIDs []string) ([]common.MapStr, error) { logger.Debug("Export dashboards using the Kibana Export API") var query strings.Builder @@ -33,7 +34,7 @@ func (c *Client) Export(dashboardIDs []string) ([]common.MapStr, error) { } path := fmt.Sprintf("%s/dashboards/export%s", CoreAPI, query.String()) - statusCode, respBody, err := c.get(path) + statusCode, respBody, err := c.get(ctx, path) if err != nil { return nil, fmt.Errorf("could not export dashboards; API status code = %d; response body = %s: %w", statusCode, respBody, err) } diff --git a/internal/kibana/fleet.go b/internal/kibana/fleet.go index 869044bb12..5b81e52dfa 100644 --- a/internal/kibana/fleet.go +++ b/internal/kibana/fleet.go @@ -5,6 +5,7 @@ package kibana import ( + "context" "encoding/json" "errors" "fmt" @@ -27,10 +28,10 @@ type AgentSSL struct { } // DefaultFleetServerURL returns the default Fleet server configured in Kibana -func (c *Client) DefaultFleetServerURL() (string, error) { +func (c *Client) DefaultFleetServerURL(ctx context.Context) (string, error) { path := fmt.Sprintf("%s/fleet_server_hosts", FleetAPI) - statusCode, respBody, err := c.get(path) + statusCode, respBody, err := c.get(ctx, path) if err != nil { return "", fmt.Errorf("could not reach fleet server hosts endpoint: %w", err) } @@ -61,13 +62,13 @@ func (c *Client) DefaultFleetServerURL() (string, error) { // UpdateFleetOutput updates an existing output to fleet // For example, to update ssl certificates etc., -func (c *Client) UpdateFleetOutput(fo FleetOutput, outputId string) error { +func (c *Client) UpdateFleetOutput(ctx context.Context, fo FleetOutput, outputId string) error { reqBody, err := json.Marshal(fo) if err != nil { return fmt.Errorf("could not convert fleetOutput (request) to JSON: %w", err) } - statusCode, respBody, err := c.put(fmt.Sprintf("%s/outputs/%s", FleetAPI, outputId), reqBody) + statusCode, respBody, err := c.put(ctx, fmt.Sprintf("%s/outputs/%s", FleetAPI, outputId), reqBody) if err != nil { return fmt.Errorf("could not update fleet output: %w", err) } @@ -80,13 +81,13 @@ func (c *Client) UpdateFleetOutput(fo FleetOutput, outputId string) error { } // AddFleetOutput adds an additional output to fleet eg., logstash -func (c *Client) AddFleetOutput(fo FleetOutput) error { +func (c *Client) AddFleetOutput(ctx context.Context, fo FleetOutput) error { reqBody, err := json.Marshal(fo) if err != nil { return fmt.Errorf("could not convert fleetOutput (request) to JSON: %w", err) } - statusCode, respBody, err := c.post(fmt.Sprintf("%s/outputs", FleetAPI), reqBody) + statusCode, respBody, err := c.post(ctx, fmt.Sprintf("%s/outputs", FleetAPI), reqBody) if err != nil { return fmt.Errorf("could not create fleet output: %w", err) } @@ -98,7 +99,7 @@ func (c *Client) AddFleetOutput(fo FleetOutput) error { return nil } -func (c *Client) SetAgentLogLevel(agentID, level string) error { +func (c *Client) SetAgentLogLevel(ctx context.Context, agentID, level string) error { path := fmt.Sprintf("%s/agents/%s/actions", FleetAPI, agentID) type fleetAction struct { @@ -119,7 +120,7 @@ func (c *Client) SetAgentLogLevel(agentID, level string) error { return fmt.Errorf("could not convert action settingr (request) to JSON: %w", err) } - statusCode, respBody, err := c.post(path, reqBody) + statusCode, respBody, err := c.post(ctx, path, reqBody) if err != nil { return fmt.Errorf("could not update agent settings: %w", err) } diff --git a/internal/kibana/packages.go b/internal/kibana/packages.go index ba6d2d5735..4d900dca54 100644 --- a/internal/kibana/packages.go +++ b/internal/kibana/packages.go @@ -5,6 +5,7 @@ package kibana import ( + "context" "encoding/json" "fmt" "net/http" @@ -14,11 +15,11 @@ import ( ) // InstallPackage installs the given package in Fleet. -func (c *Client) InstallPackage(name, version string) ([]packages.Asset, error) { +func (c *Client) InstallPackage(ctx context.Context, name, version string) ([]packages.Asset, error) { path := c.epmPackageUrl(name, version) reqBody := []byte(`{"force":true}`) // allows installing older versions of the package being tested - statusCode, respBody, err := c.post(path, reqBody) + statusCode, respBody, err := c.post(ctx, path, reqBody) if err != nil { return nil, fmt.Errorf("could not install package: %w", err) } @@ -27,7 +28,7 @@ func (c *Client) InstallPackage(name, version string) ([]packages.Asset, error) } // InstallZipPackage installs the local zip package in Fleet. -func (c *Client) InstallZipPackage(zipFile string) ([]packages.Asset, error) { +func (c *Client) InstallZipPackage(ctx context.Context, zipFile string) ([]packages.Asset, error) { path := fmt.Sprintf("%s/epm/packages", FleetAPI) body, err := os.Open(zipFile) @@ -36,7 +37,7 @@ func (c *Client) InstallZipPackage(zipFile string) ([]packages.Asset, error) { } defer body.Close() - req, err := c.newRequest(http.MethodPost, path, body) + req, err := c.newRequest(ctx, http.MethodPost, path, body) if err != nil { return nil, err } @@ -52,9 +53,9 @@ func (c *Client) InstallZipPackage(zipFile string) ([]packages.Asset, error) { } // RemovePackage removes the given package from Fleet. -func (c *Client) RemovePackage(name, version string) ([]packages.Asset, error) { +func (c *Client) RemovePackage(ctx context.Context, name, version string) ([]packages.Asset, error) { path := c.epmPackageUrl(name, version) - statusCode, respBody, err := c.delete(path) + statusCode, respBody, err := c.delete(ctx, path) if err != nil { return nil, fmt.Errorf("could not delete package: %w", err) } @@ -70,9 +71,9 @@ type FleetPackage struct { } // GetPackage obtains information about a package from Fleet. -func (c *Client) GetPackage(name string) (*FleetPackage, error) { +func (c *Client) GetPackage(ctx context.Context, name string) (*FleetPackage, error) { path := c.epmPackageUrl(name, "") - statusCode, respBody, err := c.get(path) + statusCode, respBody, err := c.get(ctx, path) if err != nil { return nil, fmt.Errorf("could not get package: %w", err) } diff --git a/internal/kibana/policies.go b/internal/kibana/policies.go index 8e48019c02..897c0e2037 100644 --- a/internal/kibana/policies.go +++ b/internal/kibana/policies.go @@ -5,6 +5,7 @@ package kibana import ( + "context" "encoding/json" "fmt" "net/http" @@ -25,13 +26,13 @@ type Policy struct { } // CreatePolicy persists the given Policy in Fleet. -func (c *Client) CreatePolicy(p Policy) (*Policy, error) { +func (c *Client) CreatePolicy(ctx context.Context, p Policy) (*Policy, error) { reqBody, err := json.Marshal(p) if err != nil { return nil, fmt.Errorf("could not convert policy (request) to JSON: %w", err) } - statusCode, respBody, err := c.post(fmt.Sprintf("%s/agent_policies", FleetAPI), reqBody) + statusCode, respBody, err := c.post(ctx, fmt.Sprintf("%s/agent_policies", FleetAPI), reqBody) if err != nil { return nil, fmt.Errorf("could not create policy: %w", err) } @@ -52,8 +53,8 @@ func (c *Client) CreatePolicy(p Policy) (*Policy, error) { } // GetPolicy fetches the given Policy in Fleet. -func (c *Client) GetPolicy(policyID string) (*Policy, error) { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/agent_policies/%s", FleetAPI, policyID)) +func (c *Client) GetPolicy(ctx context.Context, policyID string) (*Policy, error) { + statusCode, respBody, err := c.get(ctx, fmt.Sprintf("%s/agent_policies/%s", FleetAPI, policyID)) if err != nil { return nil, fmt.Errorf("could not get policy: %w", err) } @@ -74,8 +75,8 @@ func (c *Client) GetPolicy(policyID string) (*Policy, error) { } // GetRawPolicy fetches the given Policy with all the fields in Fleet. -func (c *Client) GetRawPolicy(policyID string) (json.RawMessage, error) { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/agent_policies/%s", FleetAPI, policyID)) +func (c *Client) GetRawPolicy(ctx context.Context, policyID string) (json.RawMessage, error) { + statusCode, respBody, err := c.get(ctx, fmt.Sprintf("%s/agent_policies/%s", FleetAPI, policyID)) if err != nil { return nil, fmt.Errorf("could not get policy: %w", err) } @@ -96,7 +97,7 @@ func (c *Client) GetRawPolicy(policyID string) (json.RawMessage, error) { } // ListRawPolicies fetches all the Policies in Fleet. -func (c *Client) ListRawPolicies() ([]json.RawMessage, error) { +func (c *Client) ListRawPolicies(ctx context.Context) ([]json.RawMessage, error) { itemsRetrieved := 0 currentPage := 1 var items []json.RawMessage @@ -108,7 +109,7 @@ func (c *Client) ListRawPolicies() ([]json.RawMessage, error) { } for finished := false; !finished; finished = itemsRetrieved == resp.Total { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/agent_policies?full=true&page=%d", FleetAPI, currentPage)) + statusCode, respBody, err := c.get(ctx, fmt.Sprintf("%s/agent_policies?full=true&page=%d", FleetAPI, currentPage)) if err != nil { return nil, fmt.Errorf("could not get policies: %w", err) } @@ -130,10 +131,10 @@ func (c *Client) ListRawPolicies() ([]json.RawMessage, error) { } // DeletePolicy removes the given Policy from Fleet. -func (c *Client) DeletePolicy(p Policy) error { +func (c *Client) DeletePolicy(ctx context.Context, p Policy) error { reqBody := `{ "agentPolicyId": "` + p.ID + `" }` - statusCode, respBody, err := c.post(fmt.Sprintf("%s/agent_policies/delete", FleetAPI), []byte(reqBody)) + statusCode, respBody, err := c.post(ctx, fmt.Sprintf("%s/agent_policies/delete", FleetAPI), []byte(reqBody)) if err != nil { return fmt.Errorf("could not delete policy: %w", err) } @@ -199,13 +200,13 @@ type PackageDataStream struct { } // AddPackageDataStreamToPolicy adds a PackageDataStream to a Policy in Fleet. -func (c *Client) AddPackageDataStreamToPolicy(r PackageDataStream) error { +func (c *Client) AddPackageDataStreamToPolicy(ctx context.Context, r PackageDataStream) error { reqBody, err := json.Marshal(r) if err != nil { return fmt.Errorf("could not convert policy-package (request) to JSON: %w", err) } - statusCode, respBody, err := c.post(fmt.Sprintf("%s/package_policies", FleetAPI), reqBody) + statusCode, respBody, err := c.post(ctx, fmt.Sprintf("%s/package_policies", FleetAPI), reqBody) if err != nil { return fmt.Errorf("could not add package to policy: %w", err) } @@ -244,13 +245,13 @@ type PackagePolicyStream struct { } // CreatePackagePolicy persists the given Package Policy in Fleet. -func (c *Client) CreatePackagePolicy(p PackagePolicy) (*PackagePolicy, error) { +func (c *Client) CreatePackagePolicy(ctx context.Context, p PackagePolicy) (*PackagePolicy, error) { reqBody, err := json.Marshal(p) if err != nil { return nil, fmt.Errorf("could not convert package policy (request) to JSON: %w", err) } - statusCode, respBody, err := c.post(fmt.Sprintf("%s/package_policies", FleetAPI), reqBody) + statusCode, respBody, err := c.post(ctx, fmt.Sprintf("%s/package_policies", FleetAPI), reqBody) if err != nil { return nil, fmt.Errorf("could not create package policy (req %s): %w", string(reqBody), err) } @@ -277,8 +278,8 @@ func (c *Client) CreatePackagePolicy(p PackagePolicy) (*PackagePolicy, error) { } // DeletePackagePolicy removes the given Package Policy from Fleet. -func (c *Client) DeletePackagePolicy(p PackagePolicy) error { - statusCode, respBody, err := c.delete(fmt.Sprintf("%s/package_policies/%s", FleetAPI, p.ID)) +func (c *Client) DeletePackagePolicy(ctx context.Context, p PackagePolicy) error { + statusCode, respBody, err := c.delete(ctx, fmt.Sprintf("%s/package_policies/%s", FleetAPI, p.ID)) if err != nil { return fmt.Errorf("could not delete package policy: %w", err) } diff --git a/internal/kibana/savedobjects.go b/internal/kibana/savedobjects.go index d43eca33c7..1f243b5389 100644 --- a/internal/kibana/savedobjects.go +++ b/internal/kibana/savedobjects.go @@ -6,6 +6,7 @@ package kibana import ( "bytes" + "context" "encoding/json" "fmt" "mime/multipart" @@ -57,14 +58,14 @@ func (dso *DashboardSavedObject) String() string { } // FindDashboards method returns dashboards available in the Kibana instance. -func (c *Client) FindDashboards() (DashboardSavedObjects, error) { +func (c *Client) FindDashboards(ctx context.Context) (DashboardSavedObjects, error) { logger.Debug("Find dashboards using the Saved Objects API") var foundObjects DashboardSavedObjects page := 1 for { - r, err := c.findDashboardsNextPage(page) + r, err := c.findDashboardsNextPage(ctx, page) if err != nil { return nil, fmt.Errorf("can't fetch page with results: %w", err) } @@ -91,9 +92,9 @@ func (c *Client) FindDashboards() (DashboardSavedObjects, error) { return foundObjects, nil } -func (c *Client) findDashboardsNextPage(page int) (*savedObjectsResponse, error) { +func (c *Client) findDashboardsNextPage(ctx context.Context, page int) (*savedObjectsResponse, error) { path := fmt.Sprintf("%s/_find?type=dashboard&fields=title&per_page=%d&page=%d", SavedObjectsAPI, findDashboardsPerPage, page) - statusCode, respBody, err := c.get(path) + statusCode, respBody, err := c.get(ctx, path) if err != nil { return nil, fmt.Errorf("could not find dashboards; API status code = %d; response body = %s: %w", statusCode, string(respBody), err) } @@ -115,7 +116,7 @@ func (c *Client) findDashboardsNextPage(page int) (*savedObjectsResponse, error) // allow to edit them. // Managed property cannot be directly changed, so we modify it by exporting the // saved object and importing it again, overwriting the original one. -func (c *Client) SetManagedSavedObject(savedObjectType string, id string, managed bool) error { +func (c *Client) SetManagedSavedObject(ctx context.Context, savedObjectType string, id string, managed bool) error { exportRequest := ExportSavedObjectsRequest{ ExcludeExportDetails: true, IncludeReferencesDeep: false, @@ -126,7 +127,7 @@ func (c *Client) SetManagedSavedObject(savedObjectType string, id string, manage }, }, } - objects, err := c.ExportSavedObjects(exportRequest) + objects, err := c.ExportSavedObjects(ctx, exportRequest) if err != nil { return fmt.Errorf("failed to export %s %s: %w", savedObjectType, id, err) } @@ -139,7 +140,7 @@ func (c *Client) SetManagedSavedObject(savedObjectType string, id string, manage Overwrite: true, Objects: objects, } - _, err = c.ImportSavedObjects(importRequest) + _, err = c.ImportSavedObjects(ctx, importRequest) if err != nil { return fmt.Errorf("failed to import %s %s: %w", savedObjectType, id, err) } @@ -158,14 +159,14 @@ type ExportSavedObjectsRequestObject struct { Type string `json:"type"` } -func (c *Client) ExportSavedObjects(request ExportSavedObjectsRequest) ([]map[string]any, error) { +func (c *Client) ExportSavedObjects(ctx context.Context, request ExportSavedObjectsRequest) ([]map[string]any, error) { body, err := json.Marshal(request) if err != nil { return nil, fmt.Errorf("failed to encode request: %w", err) } path := SavedObjectsAPI + "/_export" - statusCode, respBody, err := c.SendRequest(http.MethodPost, path, body) + statusCode, respBody, err := c.SendRequest(ctx, http.MethodPost, path, body) if err != nil { return nil, fmt.Errorf("could not export saved objects; API status code = %d; response body = %s: %w", statusCode, string(respBody), err) } @@ -208,7 +209,7 @@ type ImportResult struct { Meta map[string]any `json:"meta"` } -func (c *Client) ImportSavedObjects(importRequest ImportSavedObjectsRequest) (*ImportSavedObjectsResponse, error) { +func (c *Client) ImportSavedObjects(ctx context.Context, importRequest ImportSavedObjectsRequest) (*ImportSavedObjectsResponse, error) { var body bytes.Buffer multipartWriter := multipart.NewWriter(&body) fileWriter, err := multipartWriter.CreateFormFile("file", "file.ndjson") @@ -229,7 +230,7 @@ func (c *Client) ImportSavedObjects(importRequest ImportSavedObjectsRequest) (*I } path := SavedObjectsAPI + "/_import" - request, err := c.newRequest(http.MethodPost, path, &body) + request, err := c.newRequest(ctx, http.MethodPost, path, &body) if err != nil { return nil, fmt.Errorf("cannot create new request: %w", err) } diff --git a/internal/kibana/savedobjects_test.go b/internal/kibana/savedobjects_test.go index c678a70b8f..9f7cc1de0f 100644 --- a/internal/kibana/savedobjects_test.go +++ b/internal/kibana/savedobjects_test.go @@ -5,6 +5,7 @@ package kibana_test import ( + "context" "net/http" "testing" @@ -25,7 +26,7 @@ func TestSetManagedSavedObject(t *testing.T) { id := preloadDashboard(t, client) require.True(t, getManagedSavedObject(t, client, "dashboard", id)) - err := client.SetManagedSavedObject("dashboard", id, false) + err := client.SetManagedSavedObject(context.Background(), "dashboard", id, false) require.NoError(t, err) assert.False(t, getManagedSavedObject(t, client, "dashboard", id)) } @@ -45,11 +46,11 @@ func preloadDashboard(t *testing.T, client *kibana.Client) string { }, }, } - _, err := client.ImportSavedObjects(importRequest) + _, err := client.ImportSavedObjects(context.Background(), importRequest) require.NoError(t, err) t.Cleanup(func() { - statusCode, _, err := client.SendRequest(http.MethodDelete, kibana.SavedObjectsAPI+"/dashboard/"+id, nil) + statusCode, _, err := client.SendRequest(context.Background(), http.MethodDelete, kibana.SavedObjectsAPI+"/dashboard/"+id, nil) require.NoError(t, err) require.Equal(t, http.StatusOK, statusCode) }) @@ -67,7 +68,7 @@ func getManagedSavedObject(t *testing.T, client *kibana.Client, savedObjectType }, }, } - export, err := client.ExportSavedObjects(exportRequest) + export, err := client.ExportSavedObjects(context.Background(), exportRequest) require.NoError(t, err) require.Len(t, export, 1) diff --git a/internal/kibana/status.go b/internal/kibana/status.go index 4edbf428d6..2e16afb9da 100644 --- a/internal/kibana/status.go +++ b/internal/kibana/status.go @@ -5,6 +5,7 @@ package kibana import ( + "context" "encoding/json" "fmt" "net/http" @@ -42,9 +43,9 @@ func (c *Client) Version() (VersionInfo, error) { return c.versionInfo, nil } -func (c *Client) requestStatus() (statusType, error) { +func (c *Client) requestStatus(ctx context.Context) (statusType, error) { var status statusType - statusCode, respBody, err := c.get(StatusAPI) + statusCode, respBody, err := c.get(ctx, StatusAPI) if err != nil { return status, fmt.Errorf("could not reach status endpoint: %w", err) } @@ -63,8 +64,8 @@ func (c *Client) requestStatus() (statusType, error) { } // CheckHealth checks the Kibana health -func (c *Client) CheckHealth() error { - status, err := c.requestStatus() +func (c *Client) CheckHealth(ctx context.Context) error { + status, err := c.requestStatus(ctx) if err != nil { return fmt.Errorf("could not reach status endpoint: %w", err) } diff --git a/internal/packages/installer/factory.go b/internal/packages/installer/factory.go index 5309438d2c..29819f43a0 100644 --- a/internal/packages/installer/factory.go +++ b/internal/packages/installer/factory.go @@ -5,6 +5,7 @@ package installer import ( + "context" "errors" "fmt" @@ -21,10 +22,10 @@ var semver8_7_0 = semver.MustParse("8.7.0") // Installer is responsible for installation/uninstallation of the package. type Installer interface { - Install() (*InstalledPackage, error) - Uninstall() error + Install(context.Context) (*InstalledPackage, error) + Uninstall(context.Context) error - Manifest() (*packages.PackageManifest, error) + Manifest(context.Context) (*packages.PackageManifest, error) } // Options are the parameters used to build an installer. diff --git a/internal/packages/installer/installer.go b/internal/packages/installer/installer.go index dcf90ec736..7a54259ee7 100644 --- a/internal/packages/installer/installer.go +++ b/internal/packages/installer/installer.go @@ -5,6 +5,7 @@ package installer import ( + "context" "fmt" "github.com/elastic/elastic-package/internal/kibana" @@ -36,8 +37,8 @@ func CreateForManifest(kibanaClient *kibana.Client, packageRoot string) (*manife } // Install method installs the package using Kibana API. -func (i *manifestInstaller) Install() (*InstalledPackage, error) { - assets, err := i.kibanaClient.InstallPackage(i.manifest.Name, i.manifest.Version) +func (i *manifestInstaller) Install(ctx context.Context) (*InstalledPackage, error) { + assets, err := i.kibanaClient.InstallPackage(ctx, i.manifest.Name, i.manifest.Version) if err != nil { return nil, fmt.Errorf("can't install the package: %w", err) } @@ -50,8 +51,8 @@ func (i *manifestInstaller) Install() (*InstalledPackage, error) { } // Uninstall method uninstalls the package using Kibana API. -func (i *manifestInstaller) Uninstall() error { - _, err := i.kibanaClient.RemovePackage(i.manifest.Name, i.manifest.Version) +func (i *manifestInstaller) Uninstall(ctx context.Context) error { + _, err := i.kibanaClient.RemovePackage(ctx, i.manifest.Name, i.manifest.Version) if err != nil { return fmt.Errorf("can't remove the package: %w", err) } @@ -59,6 +60,6 @@ func (i *manifestInstaller) Uninstall() error { } // Manifest method returns the package manifest. -func (i *manifestInstaller) Manifest() (*packages.PackageManifest, error) { +func (i *manifestInstaller) Manifest(context.Context) (*packages.PackageManifest, error) { return i.manifest, nil } diff --git a/internal/packages/installer/zip_installer.go b/internal/packages/installer/zip_installer.go index df062bfcc9..39672c8a1d 100644 --- a/internal/packages/installer/zip_installer.go +++ b/internal/packages/installer/zip_installer.go @@ -5,6 +5,7 @@ package installer import ( + "context" "fmt" "github.com/elastic/elastic-package/internal/kibana" @@ -32,8 +33,8 @@ func CreateForZip(kibanaClient *kibana.Client, zipPath string) (*zipInstaller, e } // Install method installs the package using Kibana API. -func (i *zipInstaller) Install() (*InstalledPackage, error) { - assets, err := i.kibanaClient.InstallZipPackage(i.zipPath) +func (i *zipInstaller) Install(ctx context.Context) (*InstalledPackage, error) { + assets, err := i.kibanaClient.InstallZipPackage(ctx, i.zipPath) if err != nil { return nil, fmt.Errorf("can't install the package: %w", err) } @@ -46,8 +47,8 @@ func (i *zipInstaller) Install() (*InstalledPackage, error) { } // Uninstall method uninstalls the package using Kibana API. -func (i *zipInstaller) Uninstall() error { - _, err := i.kibanaClient.RemovePackage(i.manifest.Name, i.manifest.Version) +func (i *zipInstaller) Uninstall(ctx context.Context) error { + _, err := i.kibanaClient.RemovePackage(ctx, i.manifest.Name, i.manifest.Version) if err != nil { return fmt.Errorf("can't remove the package: %w", err) } @@ -55,6 +56,6 @@ func (i *zipInstaller) Uninstall() error { } // Manifest method returns the package manifest. -func (i *zipInstaller) Manifest() (*packages.PackageManifest, error) { +func (i *zipInstaller) Manifest(context.Context) (*packages.PackageManifest, error) { return i.manifest, nil } diff --git a/internal/serverless/project.go b/internal/serverless/project.go index 1c2a92f7bf..d95cd2806c 100644 --- a/internal/serverless/project.go +++ b/internal/serverless/project.go @@ -75,7 +75,7 @@ func (p *Project) Status(ctx context.Context, elasticsearchClient *elasticsearch status = map[string]string{ "elasticsearch": healthStatus(p.getESHealth(ctx, elasticsearchClient)), - "kibana": healthStatus(p.getKibanaHealth(kibanaClient)), + "kibana": healthStatus(p.getKibanaHealth(ctx, kibanaClient)), "fleet": healthStatus(p.getFleetHealth(ctx)), } return status, nil @@ -99,7 +99,7 @@ func (p *Project) ensureElasticsearchHealthy(ctx context.Context, elasticsearchC func (p *Project) ensureKibanaHealthy(ctx context.Context, kibanaClient *kibana.Client) error { for { - err := kibanaClient.CheckHealth() + err := kibanaClient.CheckHealth(ctx) if err == nil { return nil } @@ -129,8 +129,8 @@ func (p *Project) ensureFleetHealthy(ctx context.Context) error { } } -func (p *Project) DefaultFleetServerURL(kibanaClient *kibana.Client) (string, error) { - fleetURL, err := kibanaClient.DefaultFleetServerURL() +func (p *Project) DefaultFleetServerURL(ctx context.Context, kibanaClient *kibana.Client) (string, error) { + fleetURL, err := kibanaClient.DefaultFleetServerURL(ctx) if err != nil { return "", fmt.Errorf("failed to query fleet server hosts: %w", err) } @@ -138,7 +138,7 @@ func (p *Project) DefaultFleetServerURL(kibanaClient *kibana.Client) (string, er return fleetURL, nil } -func (p *Project) AddLogstashFleetOutput(profile *profile.Profile, kibanaClient *kibana.Client) error { +func (p *Project) AddLogstashFleetOutput(ctx context.Context, profile *profile.Profile, kibanaClient *kibana.Client) error { logstashFleetOutput := kibana.FleetOutput{ Name: "logstash-output", ID: FleetLogstashOutput, @@ -146,14 +146,14 @@ func (p *Project) AddLogstashFleetOutput(profile *profile.Profile, kibanaClient Hosts: []string{"logstash:5044"}, } - if err := kibanaClient.AddFleetOutput(logstashFleetOutput); err != nil { + if err := kibanaClient.AddFleetOutput(ctx, logstashFleetOutput); err != nil { return fmt.Errorf("failed to add logstash fleet output: %w", err) } return nil } -func (p *Project) UpdateLogstashFleetOutput(profile *profile.Profile, kibanaClient *kibana.Client) error { +func (p *Project) UpdateLogstashFleetOutput(ctx context.Context, profile *profile.Profile, kibanaClient *kibana.Client) error { certsDir := filepath.Join(profile.ProfilePath, "certs", "elastic-agent") caFile, err := os.ReadFile(filepath.Join(certsDir, "ca-cert.pem")) @@ -179,7 +179,7 @@ func (p *Project) UpdateLogstashFleetOutput(profile *profile.Profile, kibanaClie }, } - if err := kibanaClient.UpdateFleetOutput(logstashFleetOutput, FleetLogstashOutput); err != nil { + if err := kibanaClient.UpdateFleetOutput(ctx, logstashFleetOutput, FleetLogstashOutput); err != nil { return fmt.Errorf("failed to update logstash fleet output: %w", err) } @@ -190,8 +190,8 @@ func (p *Project) getESHealth(ctx context.Context, elasticsearchClient *elastics return elasticsearchClient.CheckHealth(ctx) } -func (p *Project) getKibanaHealth(kibanaClient *kibana.Client) error { - return kibanaClient.CheckHealth() +func (p *Project) getKibanaHealth(ctx context.Context, kibanaClient *kibana.Client) error { + return kibanaClient.CheckHealth(ctx) } func (p *Project) getFleetHealth(ctx context.Context) error { @@ -232,7 +232,7 @@ func (p *Project) getFleetHealth(ctx context.Context) error { return nil } -func (p *Project) CreateAgentPolicy(kibanaClient *kibana.Client, stackVersion string, outputId string, selfMonitor bool) error { +func (p *Project) CreateAgentPolicy(ctx context.Context, kibanaClient *kibana.Client, stackVersion string, outputId string, selfMonitor bool) error { policy := kibana.Policy{ ID: "elastic-agent-managed-ep", Name: "Elastic-Agent (elastic-package)", @@ -245,13 +245,13 @@ func (p *Project) CreateAgentPolicy(kibanaClient *kibana.Client, stackVersion st policy.MonitoringEnabled = []string{"logs", "metrics"} } - newPolicy, err := kibanaClient.CreatePolicy(policy) + newPolicy, err := kibanaClient.CreatePolicy(ctx, policy) if err != nil { return fmt.Errorf("error while creating agent policy: %w", err) } if selfMonitor { - err := p.createSystemPackagePolicy(kibanaClient, stackVersion, newPolicy.ID, newPolicy.Namespace) + err := p.createSystemPackagePolicy(ctx, kibanaClient, stackVersion, newPolicy.ID, newPolicy.Namespace) if err != nil { return err } @@ -260,7 +260,7 @@ func (p *Project) CreateAgentPolicy(kibanaClient *kibana.Client, stackVersion st return nil } -func (p *Project) createSystemPackagePolicy(kibanaClient *kibana.Client, stackVersion, agentPolicyID, namespace string) error { +func (p *Project) createSystemPackagePolicy(ctx context.Context, kibanaClient *kibana.Client, stackVersion, agentPolicyID, namespace string) error { systemPackages, err := registry.Production.Revisions("system", registry.SearchOptions{ KibanaVersion: strings.TrimSuffix(stackVersion, kibana.SNAPSHOT_SUFFIX), }) @@ -279,7 +279,7 @@ func (p *Project) createSystemPackagePolicy(kibanaClient *kibana.Client, stackVe packagePolicy.Package.Name = "system" packagePolicy.Package.Version = systemPackages[0].Version - _, err = kibanaClient.CreatePackagePolicy(packagePolicy) + _, err = kibanaClient.CreatePackagePolicy(ctx, packagePolicy) if err != nil { return fmt.Errorf("error while creating package policy: %w", err) } diff --git a/internal/stack/serverless.go b/internal/stack/serverless.go index b4996f13b1..6b9ff061be 100644 --- a/internal/stack/serverless.go +++ b/internal/stack/serverless.go @@ -100,7 +100,7 @@ func (sp *serverlessProvider) createProject(ctx context.Context, settings projec return Config{}, err } - config.Parameters[paramServerlessFleetURL], err = project.DefaultFleetServerURL(sp.kibanaClient) + config.Parameters[paramServerlessFleetURL], err = project.DefaultFleetServerURL(ctx, sp.kibanaClient) if err != nil { return Config{}, fmt.Errorf("failed to get fleet URL: %w", err) } @@ -120,7 +120,7 @@ func (sp *serverlessProvider) createProject(ctx context.Context, settings projec } if settings.LogstashEnabled { - err = project.AddLogstashFleetOutput(sp.profile, sp.kibanaClient) + err = project.AddLogstashFleetOutput(ctx, sp.profile, sp.kibanaClient) if err != nil { return Config{}, err } @@ -146,7 +146,7 @@ func (sp *serverlessProvider) currentProjectWithClientsAndFleetEndpoint(ctx cont fleetURL, found := config.Parameters[paramServerlessFleetURL] if !found { - fleetURL, err = project.DefaultFleetServerURL(sp.kibanaClient) + fleetURL, err = project.DefaultFleetServerURL(ctx, sp.kibanaClient) if err != nil { return nil, fmt.Errorf("failed to get fleet URL: %w", err) } @@ -277,7 +277,7 @@ func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error } logger.Infof("Creating agent policy") - err = project.CreateAgentPolicy(sp.kibanaClient, options.StackVersion, outputID, settings.SelfMonitor) + err = project.CreateAgentPolicy(ctx, sp.kibanaClient, options.StackVersion, outputID, settings.SelfMonitor) if err != nil { return fmt.Errorf("failed to create agent policy: %w", err) @@ -300,7 +300,7 @@ func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error // Updating the output with ssl certificates created in startLocalServices // The certificates are updated only when a new project is created and logstash is enabled if isNewProject && settings.LogstashEnabled { - err = project.UpdateLogstashFleetOutput(sp.profile, sp.kibanaClient) + err = project.UpdateLogstashFleetOutput(ctx, sp.profile, sp.kibanaClient) if err != nil { return err } diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index 2925c31064..fdfbea1e99 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -68,10 +68,10 @@ func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]tes r.packageRootPath = options.PackageRootPath r.kibanaClient = options.KibanaClient - return r.run() + return r.run(ctx) } -func (r *runner) run() ([]testrunner.TestResult, error) { +func (r *runner) run(ctx context.Context) ([]testrunner.TestResult, error) { result := testrunner.NewResultComposer(testrunner.TestResult{ TestType: TestType, Package: r.testFolder.Package, @@ -102,7 +102,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { if err != nil { return result.WithError(fmt.Errorf("can't create the package installer: %w", err)) } - installedPackage, err := packageInstaller.Install() + installedPackage, err := packageInstaller.Install(ctx) if err != nil { return result.WithError(fmt.Errorf("can't install the package: %w", err)) } @@ -130,7 +130,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { } logger.Debug("removing package...") - err = packageInstaller.Uninstall() + err = packageInstaller.Uninstall(ctx) if err != nil { // logging the error as a warning and not returning it since there could be other reasons that could make fail this process // for instance being defined a test agent policy where this package is used for debugging purposes diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index c8f7e931a1..066d9149c7 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -740,7 +740,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic // Allowed to re-install the package in RunTestsOnly to be able to // test new changes introduced in the package logger.Debug("Installing package...") - _, err = installer.Install() + _, err = installer.Install(ctx) if err != nil { return nil, fmt.Errorf("failed to install package: %v", err) } @@ -759,7 +759,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic } logger.Debug("removing package...") - err = installer.Uninstall() + err = installer.Uninstall(ctx) if err != nil { // logging the error as a warning and not returning it since there could be other reasons that could make fail this process // for instance being defined a test agent policy where this package is used for debugging purposes @@ -786,14 +786,14 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic if r.options.Profile.Config("stack.logstash_enabled", "false") == "true" { p.DataOutputID = "fleet-logstash-output" } - policy, err = r.options.KibanaClient.CreatePolicy(p) + policy, err = r.options.KibanaClient.CreatePolicy(ctx, p) if err != nil { return nil, fmt.Errorf("could not create test policy: %w", err) } } r.deleteTestPolicyHandler = func(ctx context.Context) error { logger.Debug("deleting test policy...") - if err := r.options.KibanaClient.DeletePolicy(*policy); err != nil { + if err := r.options.KibanaClient.DeletePolicy(ctx, *policy); err != nil { return fmt.Errorf("error cleaning up test policy: %w", err) } return nil @@ -804,7 +804,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic if r.options.RunTearDown || r.options.RunTestsOnly { logger.Debug("Skip adding data stream config to policy") } else { - if err := r.options.KibanaClient.AddPackageDataStreamToPolicy(ds); err != nil { + if err := r.options.KibanaClient.AddPackageDataStreamToPolicy(ctx, ds); err != nil { return nil, fmt.Errorf("could not add data stream config to policy: %w", err) } } @@ -885,7 +885,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic default: logger.Debug("Set Debug log level to agent") origLogLevel = agent.LocalMetadata.Elastic.Agent.LogLevel - err = r.options.KibanaClient.SetAgentLogLevel(agent.ID, "debug") + err = r.options.KibanaClient.SetAgentLogLevel(ctx, agent.ID, "debug") if err != nil { return nil, fmt.Errorf("error setting log level debug for agent %s: %w", agent.ID, err) } @@ -893,7 +893,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic r.resetAgentLogLevelHandler = func(ctx context.Context) error { logger.Debugf("reassigning original log level %q back to agent...", origLogLevel) - if err := r.options.KibanaClient.SetAgentLogLevel(agent.ID, origLogLevel); err != nil { + if err := r.options.KibanaClient.SetAgentLogLevel(ctx, agent.ID, origLogLevel); err != nil { return fmt.Errorf("error reassigning original log level to agent: %w", err) } return nil @@ -903,7 +903,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic case r.options.RunTearDown || r.options.RunTestsOnly: logger.Debug("Skip assiging package data stream to agent") default: - policyWithDataStream, err := r.options.KibanaClient.GetPolicy(policy.ID) + policyWithDataStream, err := r.options.KibanaClient.GetPolicy(ctx, policy.ID) if err != nil { return nil, fmt.Errorf("could not read the policy with data stream: %w", err) } @@ -1186,7 +1186,7 @@ func (r *runner) runTest(ctx context.Context, config *testConfig, serviceContext func checkEnrolledAgents(ctx context.Context, client *kibana.Client, serviceContext servicedeployer.ServiceInfo) ([]kibana.Agent, error) { var agents []kibana.Agent enrolled, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { - allAgents, err := client.ListAgents() + allAgents, err := client.ListAgents(ctx) if err != nil { return false, fmt.Errorf("could not list agents: %w", err) } From f0a56fcf81f824ec71267e9e5575abf788d1c5c1 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 5 Mar 2024 21:59:43 +0100 Subject: [PATCH 18/32] Recover previous error format --- cmd/root.go | 7 +++---- main.go | 15 ++++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index a2e401cf92..8557d2c88d 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -42,10 +42,9 @@ var commands = []*cobraext.Command{ // RootCmd creates and returns root cmd for elastic-package func RootCmd() *cobra.Command { rootCmd := &cobra.Command{ - Use: "elastic-package", - Short: "elastic-package - Command line tool for developing Elastic Integrations", - SilenceUsage: true, - SilenceErrors: true, + Use: "elastic-package", + Short: "elastic-package - Command line tool for developing Elastic Integrations", + SilenceUsage: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { return cobraext.ComposeCommandActions(cmd, args, processPersistentFlags, diff --git a/main.go b/main.go index ba9b3def40..42e6c241d0 100644 --- a/main.go +++ b/main.go @@ -17,23 +17,24 @@ import ( ) func main() { - rootCmd := cmd.RootCmd() - err := install.EnsureInstalled() if err != nil { log.Fatalf("Validating installation failed: %v", err) } + rootCmd := cmd.RootCmd() + rootCmd.SilenceErrors = true // Silence errors so we handle them here. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() err = rootCmd.ExecuteContext(ctx) + if errIsInterruption(err) { + logger.Info("Signal caught!") + os.Exit(130) + } if err != nil { - if errIsInterruption(err) { - logger.Info("Signal caught!") - os.Exit(130) - } - logger.Error(rootCmd.ErrPrefix(), err) + rootCmd.PrintErr(rootCmd.ErrPrefix(), err) os.Exit(1) } } From 3adbddf6da5466e0cc86c69d47c8559ddf7ac89d Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 5 Mar 2024 22:28:13 +0100 Subject: [PATCH 19/32] Add common function to get count of documents in data stream --- .../runners/common/elasticsearch.go | 53 +++++++++++++++++++ internal/benchrunner/runners/rally/metrics.go | 20 +++---- internal/benchrunner/runners/rally/runner.go | 50 +++-------------- .../benchrunner/runners/rally/scenario.go | 6 +-- internal/benchrunner/runners/stream/runner.go | 42 +-------------- .../benchrunner/runners/system/metrics.go | 16 +++--- internal/benchrunner/runners/system/runner.go | 50 +++-------------- 7 files changed, 89 insertions(+), 148 deletions(-) create mode 100644 internal/benchrunner/runners/common/elasticsearch.go diff --git a/internal/benchrunner/runners/common/elasticsearch.go b/internal/benchrunner/runners/common/elasticsearch.go new file mode 100644 index 0000000000..02e69caf71 --- /dev/null +++ b/internal/benchrunner/runners/common/elasticsearch.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/elastic/elastic-package/internal/elasticsearch" + "github.com/elastic/elastic-package/internal/logger" +) + +func CountDocsInDataStream(ctx context.Context, esapi *elasticsearch.API, dataStream string) (int, error) { + resp, err := esapi.Count( + esapi.Count.WithContext(ctx), + esapi.Count.WithIndex(dataStream), + esapi.Count.WithIgnoreUnavailable(true), + ) + if err != nil { + return 0, fmt.Errorf("could not search data stream: %w", err) + } + defer resp.Body.Close() + + if resp.IsError() { + return 0, fmt.Errorf("failed to get hits count: %s", resp.String()) + } + + var results struct { + Count int + Error *struct { + Type string + Reason string + } + Status int + } + + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return 0, fmt.Errorf("could not decode search results response: %w", err) + } + + numHits := results.Count + if results.Error != nil { + logger.Debugf("found %d hits in %s data stream: %s: %s Status=%d", + numHits, dataStream, results.Error.Type, results.Error.Reason, results.Status) + } else { + logger.Debugf("found %d hits in %s data stream", numHits, dataStream) + } + + return numHits, nil +} diff --git a/internal/benchrunner/runners/rally/metrics.go b/internal/benchrunner/runners/rally/metrics.go index 2911ae50ac..c2cff767ef 100644 --- a/internal/benchrunner/runners/rally/metrics.go +++ b/internal/benchrunner/runners/rally/metrics.go @@ -6,6 +6,7 @@ package rally import ( "bytes" + "context" _ "embed" "encoding/json" "fmt" @@ -14,6 +15,7 @@ import ( "sync/atomic" "time" + "github.com/elastic/elastic-package/internal/benchrunner/runners/common" "github.com/elastic/elastic-package/internal/elasticsearch" "github.com/elastic/elastic-package/internal/elasticsearch/ingest" "github.com/elastic/elastic-package/internal/logger" @@ -87,10 +89,10 @@ func newCollector( } } -func (c *collector) start() { +func (c *collector) start(ctx context.Context) { c.createMetricsIndex() - c.collectMetricsBeforeRallyRun() + c.collectMetricsBeforeRallyRun(ctx) c.wg.Add(1) go func() { @@ -98,7 +100,7 @@ func (c *collector) start() { <-c.stopC // last collect before stopping - c.collectMetricsAfterRallyRun() + c.collectMetricsAfterRallyRun(ctx) c.publish(c.createEventsFromMetrics(c.endMetrics)) }() } @@ -111,7 +113,7 @@ func (c *collector) stop() { c.wg.Wait() } -func (c *collector) collectMetricsBeforeRallyRun() { +func (c *collector) collectMetricsBeforeRallyRun(ctx context.Context) { resp, err := c.esAPI.Indices.Refresh(c.esAPI.Indices.Refresh.WithIndex(c.datastream)) if err != nil { logger.Errorf("unable to refresh data stream at the beginning of rally run: %s", err) @@ -123,7 +125,7 @@ func (c *collector) collectMetricsBeforeRallyRun() { return } - c.startTotalHits = c.collectTotalHits() + c.startTotalHits = c.collectTotalHits(ctx) c.startMetrics = c.collect() c.startIngestMetrics = c.collectIngestMetrics() c.publish(c.createEventsFromMetrics(c.startMetrics)) @@ -285,7 +287,7 @@ func (c *collector) collectDiskUsage() map[string]ingest.DiskUsage { return du } -func (c *collector) collectMetricsAfterRallyRun() { +func (c *collector) collectMetricsAfterRallyRun(ctx context.Context) { resp, err := c.esAPI.Indices.Refresh(c.esAPI.Indices.Refresh.WithIndex(c.datastream)) if err != nil { logger.Errorf("unable to refresh data stream at the end of rally run: %s", err) @@ -300,13 +302,13 @@ func (c *collector) collectMetricsAfterRallyRun() { c.diskUsage = c.collectDiskUsage() c.endMetrics = c.collect() c.endIngestMetrics = c.collectIngestMetrics() - c.endTotalHits = c.collectTotalHits() + c.endTotalHits = c.collectTotalHits(ctx) c.publish(c.createEventsFromMetrics(c.endMetrics)) } -func (c *collector) collectTotalHits() int { - totalHits, err := getTotalHits(c.esAPI, c.datastream) +func (c *collector) collectTotalHits(ctx context.Context) int { + totalHits, err := common.CountDocsInDataStream(ctx, c.esAPI, c.datastream) if err != nil { logger.Debugf("could not get total hits: %v", err) } diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 0069864fa4..47d32af425 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -31,8 +31,8 @@ import ( "github.com/elastic/elastic-package/internal/benchrunner" "github.com/elastic/elastic-package/internal/benchrunner/reporters" + "github.com/elastic/elastic-package/internal/benchrunner/runners/common" "github.com/elastic/elastic-package/internal/configuration/locations" - "github.com/elastic/elastic-package/internal/elasticsearch" "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" @@ -330,8 +330,8 @@ func (r *runner) setUp(ctx context.Context) error { return fmt.Errorf("error deleting old data in data stream: %s: %w", r.runtimeDataStream, err) } - cleared, err := wait.UntilTrue(ctx, func(context.Context) (bool, error) { - hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) + cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { + hits, err := common.CountDocsInDataStream(ctx, r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 5*time.Second, 2*time.Minute) if err != nil || !cleared { @@ -400,7 +400,7 @@ func (r *runner) wipeDataStreamOnSetup() error { } func (r *runner) run(ctx context.Context) (report reporters.Reportable, err error) { - r.startMetricsColletion() + r.startMetricsColletion(ctx) defer r.mcollector.stop() var corpusDocCount uint64 @@ -509,7 +509,7 @@ func (r *runner) installPackageFromPackageRoot() error { return nil } -func (r *runner) startMetricsColletion() { +func (r *runner) startMetricsColletion(ctx context.Context) { // TODO collect agent hosts metrics using system integration r.mcollector = newCollector( r.svcInfo, @@ -521,7 +521,7 @@ func (r *runner) startMetricsColletion() { r.runtimeDataStream, r.pipelinePrefix, ) - r.mcollector.start() + r.mcollector.start(ctx) } func (r *runner) collectAndSummarizeMetrics() (*metricsSummary, error) { @@ -1124,44 +1124,6 @@ func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]interface{}) map[ return e } -func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { - resp, err := esapi.Count( - esapi.Count.WithIndex(dataStream), - esapi.Count.WithIgnoreUnavailable(true), - ) - if err != nil { - return 0, fmt.Errorf("could not search data stream: %w", err) - } - defer resp.Body.Close() - - if resp.IsError() { - return 0, fmt.Errorf("failed to get hits count: %s", resp.String()) - } - - var results struct { - Count int - Error *struct { - Type string - Reason string - } - Status int - } - - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return 0, fmt.Errorf("could not decode search results response: %w", err) - } - - numHits := results.Count - if results.Error != nil { - logger.Debugf("found %d hits in %s data stream: %s: %s Status=%d", - numHits, dataStream, results.Error.Type, results.Error.Reason, results.Status) - } else { - logger.Debugf("found %d hits in %s data stream", numHits, dataStream) - } - - return numHits, nil -} - func createRunID() string { return uuid.New().String() } diff --git a/internal/benchrunner/runners/rally/scenario.go b/internal/benchrunner/runners/rally/scenario.go index 27982bf618..6135fd449a 100644 --- a/internal/benchrunner/runners/rally/scenario.go +++ b/internal/benchrunner/runners/rally/scenario.go @@ -59,10 +59,8 @@ func readConfig(path, scenario, packageName, packageVersion string) (*scenario, return nil, fmt.Errorf("can't load benchmark configuration: %s: %w", configPath, err) } - if err == nil { - if err := cfg.Unpack(c); err != nil { - return nil, fmt.Errorf("can't unpack benchmark configuration: %s: %w", configPath, err) - } + if err := cfg.Unpack(c); err != nil { + return nil, fmt.Errorf("can't unpack benchmark configuration: %s: %w", configPath, err) } c.Package = packageName diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 6d4aa9fc60..104b04a675 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -27,7 +27,7 @@ import ( "github.com/elastic/elastic-package/internal/benchrunner" "github.com/elastic/elastic-package/internal/benchrunner/reporters" - "github.com/elastic/elastic-package/internal/elasticsearch" + "github.com/elastic/elastic-package/internal/benchrunner/runners/common" "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/multierror" "github.com/elastic/elastic-package/internal/packages" @@ -153,7 +153,7 @@ func (r *runner) setUp(ctx context.Context) error { cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { totalHits := 0 for _, runtimeDataStream := range r.runtimeDataStreams { - hits, err := getTotalHits(r.options.ESAPI, runtimeDataStream) + hits, err := common.CountDocsInDataStream(ctx, r.options.ESAPI, runtimeDataStream) if err != nil { return false, err } @@ -582,44 +582,6 @@ func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]any) map[string]i return e } -func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { - resp, err := esapi.Count( - esapi.Count.WithIndex(dataStream), - esapi.Count.WithIgnoreUnavailable(true), - ) - if err != nil { - return 0, fmt.Errorf("could not search data stream: %w", err) - } - defer resp.Body.Close() - - if resp.IsError() { - return 0, fmt.Errorf("failed to get hits count: %s", resp.String()) - } - - var results struct { - Count int - Error *struct { - Type string - Reason string - } - Status int - } - - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return 0, fmt.Errorf("could not decode search results response: %w", err) - } - - numHits := results.Count - if results.Error != nil { - logger.Debugf("found %d hits in %s data stream: %s: %s Status=%d", - numHits, dataStream, results.Error.Type, results.Error.Reason, results.Status) - } else { - logger.Debugf("found %d hits in %s data stream", numHits, dataStream) - } - - return numHits, nil -} - func createRunID() string { return uuid.New().String() } diff --git a/internal/benchrunner/runners/system/metrics.go b/internal/benchrunner/runners/system/metrics.go index c68ec183d5..cc14698b99 100644 --- a/internal/benchrunner/runners/system/metrics.go +++ b/internal/benchrunner/runners/system/metrics.go @@ -6,6 +6,7 @@ package system import ( "bytes" + "context" _ "embed" "encoding/json" "fmt" @@ -14,6 +15,7 @@ import ( "sync/atomic" "time" + "github.com/elastic/elastic-package/internal/benchrunner/runners/common" "github.com/elastic/elastic-package/internal/elasticsearch" "github.com/elastic/elastic-package/internal/elasticsearch/ingest" "github.com/elastic/elastic-package/internal/logger" @@ -88,7 +90,7 @@ func newCollector( } } -func (c *collector) start() { +func (c *collector) start(ctx context.Context) { c.tick = time.NewTicker(c.interval) c.createMetricsIndex() var once sync.Once @@ -101,14 +103,14 @@ func (c *collector) start() { select { case <-c.stopC: // last collect before stopping - c.collectMetricsPreviousToStop() + c.collectMetricsPreviousToStop(ctx) c.publish(c.createEventsFromMetrics(c.endMetrics)) return case <-c.tick.C: once.Do(func() { c.waitUntilReady() c.startIngestMetrics = c.collectIngestMetrics() - c.startTotalHits = c.collectTotalHits() + c.startTotalHits = c.collectTotalHits(ctx) c.startMetrics = c.collect() c.publish(c.createEventsFromMetrics(c.startMetrics)) }) @@ -311,15 +313,15 @@ func (c *collector) collectDiskUsage() map[string]ingest.DiskUsage { return du } -func (c *collector) collectMetricsPreviousToStop() { +func (c *collector) collectMetricsPreviousToStop(ctx context.Context) { c.endIngestMetrics = c.collectIngestMetrics() c.diskUsage = c.collectDiskUsage() - c.endTotalHits = c.collectTotalHits() + c.endTotalHits = c.collectTotalHits(ctx) c.endMetrics = c.collect() } -func (c *collector) collectTotalHits() int { - totalHits, err := getTotalHits(c.esAPI, c.datastream) +func (c *collector) collectTotalHits(ctx context.Context) int { + totalHits, err := common.CountDocsInDataStream(ctx, c.esAPI, c.datastream) if err != nil { logger.Debugf("could not total hits: %w", err) } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 636df6e075..3fc97c2313 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -26,8 +26,8 @@ import ( "github.com/elastic/elastic-package/internal/benchrunner" "github.com/elastic/elastic-package/internal/benchrunner/reporters" + "github.com/elastic/elastic-package/internal/benchrunner/runners/common" "github.com/elastic/elastic-package/internal/configuration/locations" - "github.com/elastic/elastic-package/internal/elasticsearch" "github.com/elastic/elastic-package/internal/kibana" "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/multierror" @@ -216,7 +216,7 @@ func (r *runner) setUp(ctx context.Context) error { } cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { - hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) + hits, err := common.CountDocsInDataStream(ctx, r.options.ESAPI, r.runtimeDataStream) return hits == 0, err }, 5*time.Second, 2*time.Minute) if err != nil || !cleared { @@ -271,7 +271,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro } } - r.startMetricsColletion() + r.startMetricsColletion(ctx) defer r.mcollector.stop() // if there is a generator config, generate the data @@ -314,7 +314,7 @@ func (r *runner) run(ctx context.Context) (report reporters.Reportable, err erro return createReport(r.options.BenchName, r.corporaFile, r.scenario, msum) } -func (r *runner) startMetricsColletion() { +func (r *runner) startMetricsColletion(ctx context.Context) { // TODO collect agent hosts metrics using system integration r.mcollector = newCollector( r.svcInfo, @@ -326,7 +326,7 @@ func (r *runner) startMetricsColletion() { r.runtimeDataStream, r.pipelinePrefix, ) - r.mcollector.start() + r.mcollector.start(ctx) } func (r *runner) collectAndSummarizeMetrics() (*metricsSummary, error) { @@ -652,7 +652,7 @@ func (r *runner) waitUntilBenchmarkFinishes(ctx context.Context) (bool, error) { oldHits := 0 return wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { var err error - hits, err := getTotalHits(r.options.ESAPI, r.runtimeDataStream) + hits, err := common.CountDocsInDataStream(ctx, r.options.ESAPI, r.runtimeDataStream) if hits == 0 { return false, err } @@ -907,44 +907,6 @@ func (r *runner) enrichEventWithBenchmarkMetadata(e map[string]interface{}) map[ return e } -func getTotalHits(esapi *elasticsearch.API, dataStream string) (int, error) { - resp, err := esapi.Count( - esapi.Count.WithIndex(dataStream), - esapi.Count.WithIgnoreUnavailable(true), - ) - if err != nil { - return 0, fmt.Errorf("could not search data stream: %w", err) - } - defer resp.Body.Close() - - if resp.IsError() { - return 0, fmt.Errorf("failed to get hits count: %s", resp.String()) - } - - var results struct { - Count int - Error *struct { - Type string - Reason string - } - Status int - } - - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return 0, fmt.Errorf("could not decode search results response: %w", err) - } - - numHits := results.Count - if results.Error != nil { - logger.Debugf("found %d hits in %s data stream: %s: %s Status=%d", - numHits, dataStream, results.Error.Type, results.Error.Reason, results.Status) - } else { - logger.Debugf("found %d hits in %s data stream", numHits, dataStream) - } - - return numHits, nil -} - func filterAgents(allAgents []kibana.Agent) []kibana.Agent { var filtered []kibana.Agent for _, agent := range allAgents { From 9898505aded93ca1948e8a3f53a3954fb4f6385a Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 16:36:26 +0100 Subject: [PATCH 20/32] Print new line after errors --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 42e6c241d0..20f228e4f8 100644 --- a/main.go +++ b/main.go @@ -34,7 +34,7 @@ func main() { os.Exit(130) } if err != nil { - rootCmd.PrintErr(rootCmd.ErrPrefix(), err) + rootCmd.PrintErrln(rootCmd.ErrPrefix(), err) os.Exit(1) } } From cfa10753b2d20323273395f101712d9b2cad230b Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 17:32:11 +0100 Subject: [PATCH 21/32] Log the message about signal caught when the signal is caught --- main.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/main.go b/main.go index 20f228e4f8..c4fda8e043 100644 --- a/main.go +++ b/main.go @@ -22,15 +22,18 @@ func main() { log.Fatalf("Validating installation failed: %v", err) } - rootCmd := cmd.RootCmd() - rootCmd.SilenceErrors = true // Silence errors so we handle them here. - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() + stop := context.AfterFunc(ctx, func() { + logger.Info("Signal caught!") + }) + defer stop() + rootCmd := cmd.RootCmd() + rootCmd.SilenceErrors = true // Silence errors so we handle them here. err = rootCmd.ExecuteContext(ctx) if errIsInterruption(err) { - logger.Info("Signal caught!") + rootCmd.Println("interrupted") os.Exit(130) } if err != nil { From ce50c2260c35ca7a829154a7b29b57c3847d31d5 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 17:55:14 +0100 Subject: [PATCH 22/32] Ensure that service container logs are always written --- internal/servicedeployer/compose.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/servicedeployer/compose.go b/internal/servicedeployer/compose.go index 7a04001029..3a57c2857c 100644 --- a/internal/servicedeployer/compose.go +++ b/internal/servicedeployer/compose.go @@ -119,7 +119,7 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, inCtxt Service err = p.WaitForHealthy(ctx, opts) if err != nil { - processServiceContainerLogs(ctx, p, compose.CommandOptions{ + processServiceContainerLogs(context.WithoutCancel(ctx), p, compose.CommandOptions{ Env: opts.Env, }, outCtxt.Name) return nil, fmt.Errorf("service is unhealthy: %w", err) From e12b4e9305103a2a9dc44b55bd253eb9f2a941c7 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 18:28:58 +0100 Subject: [PATCH 23/32] Add signal handling per command --- cmd/benchmark.go | 4 ++++ cmd/testrunner.go | 3 +++ internal/cobraext/signal.go | 38 +++++++++++++++++++++++++++++++++++++ main.go | 20 +++++++++++-------- 4 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 internal/cobraext/signal.go diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 93108284e8..2acabb1ee4 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -68,7 +68,11 @@ func setupBenchmarkCommand() *cobraext.Command { Use: "benchmark", Short: "Run benchmarks for the package", Long: benchLongDescription, + Annotations: map[string]string{ + "enable_signal_handling": "", + }, } + cobraext.EnableSignalHandling(cmd) cmd.PersistentFlags().StringP(cobraext.ProfileFlagName, "p", "", fmt.Sprintf(cobraext.ProfileFlagDescription, install.ProfileNameEnvVar)) diff --git a/cmd/testrunner.go b/cmd/testrunner.go index 0c9f2078e6..c5cdba1ebe 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -56,6 +56,9 @@ func setupTestCommand() *cobraext.Command { Use: "test", Short: "Run test suite for the package", Long: testLongDescription, + Annotations: map[string]string{ + "enable_signal_handling": "", + }, RunE: func(cmd *cobra.Command, args []string) error { cmd.Println("Run test suite for the package") diff --git a/internal/cobraext/signal.go b/internal/cobraext/signal.go new file mode 100644 index 0000000000..721be3b389 --- /dev/null +++ b/internal/cobraext/signal.go @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cobraext + +import ( + "os" + + "github.com/spf13/cobra" +) + +const signalHandlingAnnotation = "enable_signal_handling" + +func EnableSignalHandling(cmd *cobra.Command) { + cmd.Annotations[signalHandlingAnnotation] = "" +} + +func IsSignalHandingRequested(cmd *cobra.Command) bool { + _, found := getCommandAnnotation(cmd, signalHandlingAnnotation) + return found +} + +func getCommandAnnotation(cmd *cobra.Command, key string) (string, bool) { + if len(os.Args) == 0 { + return "", false + } + cmd, _, err := cmd.Root().Find(os.Args[1:]) + if err != nil { + return "", false + } + for ; cmd.HasParent(); cmd = cmd.Parent() { + if value, found := cmd.Annotations[key]; found { + return value, true + } + } + return "", false +} diff --git a/main.go b/main.go index c4fda8e043..a19c42a770 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( "os/signal" "github.com/elastic/elastic-package/cmd" + "github.com/elastic/elastic-package/internal/cobraext" "github.com/elastic/elastic-package/internal/install" "github.com/elastic/elastic-package/internal/logger" ) @@ -22,16 +23,19 @@ func main() { log.Fatalf("Validating installation failed: %v", err) } - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) - defer cancel() - stop := context.AfterFunc(ctx, func() { - logger.Info("Signal caught!") - }) - defer stop() - rootCmd := cmd.RootCmd() rootCmd.SilenceErrors = true // Silence errors so we handle them here. - err = rootCmd.ExecuteContext(ctx) + if cobraext.IsSignalHandingRequested(rootCmd) { + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + stop := context.AfterFunc(ctx, func() { + logger.Info("Signal caught!") + }) + defer stop() + rootCmd.SetContext(ctx) + } + + err = rootCmd.Execute() if errIsInterruption(err) { rootCmd.Println("interrupted") os.Exit(130) From 438fc2ac5219941eddbcbc99848403c867cf1c8a Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 18:33:58 +0100 Subject: [PATCH 24/32] Fix enablement of signal handling --- cmd/benchmark.go | 3 --- cmd/testrunner.go | 4 +--- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 2acabb1ee4..9bc4f9789c 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -68,9 +68,6 @@ func setupBenchmarkCommand() *cobraext.Command { Use: "benchmark", Short: "Run benchmarks for the package", Long: benchLongDescription, - Annotations: map[string]string{ - "enable_signal_handling": "", - }, } cobraext.EnableSignalHandling(cmd) diff --git a/cmd/testrunner.go b/cmd/testrunner.go index c5cdba1ebe..f2224e72bf 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -56,9 +56,6 @@ func setupTestCommand() *cobraext.Command { Use: "test", Short: "Run test suite for the package", Long: testLongDescription, - Annotations: map[string]string{ - "enable_signal_handling": "", - }, RunE: func(cmd *cobra.Command, args []string) error { cmd.Println("Run test suite for the package") @@ -69,6 +66,7 @@ func setupTestCommand() *cobraext.Command { return cobraext.ComposeCommandActions(cmd, args, testTypeCmdActions...) }, } + cobraext.EnableSignalHandling(cmd) cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription) cmd.PersistentFlags().BoolP(cobraext.GenerateTestResultFlagName, "g", false, cobraext.GenerateTestResultFlagDescription) From 761e6d67703df7d48ea2d8d0e5a51aa7428d6f10 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 18:35:05 +0100 Subject: [PATCH 25/32] Fix panic --- internal/cobraext/signal.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/cobraext/signal.go b/internal/cobraext/signal.go index 721be3b389..26d87c8b26 100644 --- a/internal/cobraext/signal.go +++ b/internal/cobraext/signal.go @@ -13,6 +13,9 @@ import ( const signalHandlingAnnotation = "enable_signal_handling" func EnableSignalHandling(cmd *cobra.Command) { + if cmd.Annotations == nil { + cmd.Annotations = make(map[string]string) + } cmd.Annotations[signalHandlingAnnotation] = "" } From 168d878d781fe674326fbb1cd1470b26b51e6c3e Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 18:52:17 +0100 Subject: [PATCH 26/32] Handle interruption of subcommands --- internal/compose/compose.go | 8 ++++++++ main.go | 12 +++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/internal/compose/compose.go b/internal/compose/compose.go index 518fbc0e73..f0a2ef6633 100644 --- a/internal/compose/compose.go +++ b/internal/compose/compose.go @@ -12,6 +12,7 @@ import ( "io" "os" "os/exec" + "runtime" "strconv" "strings" "time" @@ -479,6 +480,13 @@ func (p *Project) runDockerComposeCmd(ctx context.Context, opts dockerComposeOpt args = append(args, opts.args...) cmd := exec.CommandContext(ctx, name, args...) + cmd.Cancel = func() error { + if runtime.GOOS == "windows" { + // Interrupt is not implemented in Windows. + return cmd.Process.Kill() + } + return cmd.Process.Signal(os.Interrupt) + } cmd.Env = append(os.Environ(), opts.env...) if logger.IsDebugMode() { diff --git a/main.go b/main.go index a19c42a770..62aa473873 100644 --- a/main.go +++ b/main.go @@ -9,6 +9,7 @@ import ( "errors" "log" "os" + "os/exec" "os/signal" "github.com/elastic/elastic-package/cmd" @@ -47,5 +48,14 @@ func main() { } func errIsInterruption(err error) bool { - return errors.Is(err, context.Canceled) + if errors.Is(err, context.Canceled) { + return true + } + + var exitError *exec.ExitError + if errors.As(err, &exitError) && (*exitError).ProcessState.ExitCode() == 130 { // 130 -> subcommand killed by sigint + return true + } + + return false } From 72c7b10a780295c8c98ced051cc1072d421371e4 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 20:37:11 +0100 Subject: [PATCH 27/32] Move signal enablement back to where it was --- cmd/benchmark.go | 34 ++++++++++++++++++++---------- cmd/testrunner.go | 10 ++++++--- internal/cobraext/signal.go | 41 ------------------------------------- internal/signal/sigint.go | 26 +++++++++++++++++++++++ main.go | 13 ------------ 5 files changed, 56 insertions(+), 68 deletions(-) delete mode 100644 internal/cobraext/signal.go create mode 100644 internal/signal/sigint.go diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 9bc4f9789c..cbf708f4e5 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/elastic-package/internal/elasticsearch" "github.com/elastic/elastic-package/internal/install" "github.com/elastic/elastic-package/internal/logger" + "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/stack" "github.com/spf13/cobra" @@ -69,7 +70,6 @@ func setupBenchmarkCommand() *cobraext.Command { Short: "Run benchmarks for the package", Long: benchLongDescription, } - cobraext.EnableSignalHandling(cmd) cmd.PersistentFlags().StringP(cobraext.ProfileFlagName, "p", "", fmt.Sprintf(cobraext.ProfileFlagDescription, install.ProfileNameEnvVar)) @@ -156,6 +156,9 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { } } + ctx, stop := signal.Enable(cmd.Context(), logger.Info) + defer stop() + benchFolders, err := pipeline.FindBenchmarkFolders(packageRootPath, dataStreams) if err != nil { return fmt.Errorf("unable to determine benchmark folder paths: %w", err) @@ -185,7 +188,7 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) } - err = esClient.CheckHealth(cmd.Context()) + err = esClient.CheckHealth(ctx) if err != nil { return err } @@ -202,7 +205,7 @@ func pipelineCommandAction(cmd *cobra.Command, args []string) error { ) runner := pipeline.NewPipelineBenchmark(opts) - r, err := benchrunner.Run(cmd.Context(), runner) + r, err := benchrunner.Run(ctx, runner) if err != nil { return fmt.Errorf("error running package pipeline benchmarks: %w", err) @@ -307,11 +310,14 @@ func rallyCommandAction(cmd *cobra.Command, args []string) error { return err } + ctx, stop := signal.Enable(cmd.Context(), logger.Info) + defer stop() + esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) } - err = esClient.CheckHealth(cmd.Context()) + err = esClient.CheckHealth(ctx) if err != nil { return err } @@ -335,7 +341,7 @@ func rallyCommandAction(cmd *cobra.Command, args []string) error { rally.WithRallyCorpusAtPath(corpusAtPath), } - esMetricsClient, err := initializeESMetricsClient(cmd.Context()) + esMetricsClient, err := initializeESMetricsClient(ctx) if err != nil { return fmt.Errorf("can't create Elasticsearch metrics client: %w", err) } @@ -345,7 +351,7 @@ func rallyCommandAction(cmd *cobra.Command, args []string) error { runner := rally.NewRallyBenchmark(rally.NewOptions(withOpts...)) - r, err := benchrunner.Run(cmd.Context(), runner) + r, err := benchrunner.Run(ctx, runner) if errors.Is(err, rally.ErrDryRun) { return nil } @@ -475,11 +481,14 @@ func streamCommandAction(cmd *cobra.Command, args []string) error { return err } + ctx, stop := signal.Enable(cmd.Context(), logger.Info) + defer stop() + esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) } - err = esClient.CheckHealth(cmd.Context()) + err = esClient.CheckHealth(ctx) if err != nil { return err } @@ -505,7 +514,7 @@ func streamCommandAction(cmd *cobra.Command, args []string) error { runner := stream.NewStreamBenchmark(stream.NewOptions(withOpts...)) - _, err = benchrunner.Run(cmd.Context(), runner) + _, err = benchrunner.Run(ctx, runner) if err != nil { return fmt.Errorf("error running package stream benchmarks: %w", err) } @@ -579,11 +588,14 @@ func systemCommandAction(cmd *cobra.Command, args []string) error { return err } + ctx, stop := signal.Enable(cmd.Context(), logger.Info) + defer stop() + esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) } - err = esClient.CheckHealth(cmd.Context()) + err = esClient.CheckHealth(ctx) if err != nil { return err } @@ -606,7 +618,7 @@ func systemCommandAction(cmd *cobra.Command, args []string) error { system.WithProfile(profile), } - esMetricsClient, err := initializeESMetricsClient(cmd.Context()) + esMetricsClient, err := initializeESMetricsClient(ctx) if err != nil { return fmt.Errorf("can't create Elasticsearch metrics client: %w", err) } @@ -616,7 +628,7 @@ func systemCommandAction(cmd *cobra.Command, args []string) error { runner := system.NewSystemBenchmark(system.NewOptions(withOpts...)) - r, err := benchrunner.Run(cmd.Context(), runner) + r, err := benchrunner.Run(ctx, runner) if err != nil { return fmt.Errorf("error running package system benchmarks: %w", err) } diff --git a/cmd/testrunner.go b/cmd/testrunner.go index f2224e72bf..a698a2096f 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -19,7 +19,9 @@ import ( "github.com/elastic/elastic-package/internal/common" "github.com/elastic/elastic-package/internal/install" "github.com/elastic/elastic-package/internal/kibana" + "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/packages" + "github.com/elastic/elastic-package/internal/signal" "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" "github.com/elastic/elastic-package/internal/testrunner/reporters/formats" @@ -66,7 +68,6 @@ func setupTestCommand() *cobraext.Command { return cobraext.ComposeCommandActions(cmd, args, testTypeCmdActions...) }, } - cobraext.EnableSignalHandling(cmd) cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription) cmd.PersistentFlags().BoolP(cobraext.GenerateTestResultFlagName, "g", false, cobraext.GenerateTestResultFlagDescription) @@ -294,11 +295,14 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command variantFlag, _ := cmd.Flags().GetString(cobraext.VariantFlagName) + ctx, stop := signal.Enable(cmd.Context(), logger.Info) + defer stop() + esClient, err := stack.NewElasticsearchClientFromProfile(profile) if err != nil { return fmt.Errorf("can't create Elasticsearch client: %w", err) } - err = esClient.CheckHealth(cmd.Context()) + err = esClient.CheckHealth(ctx) if err != nil { return err } @@ -329,7 +333,7 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command var results []testrunner.TestResult for _, folder := range testFolders { - r, err := testrunner.Run(cmd.Context(), testType, testrunner.TestOptions{ + r, err := testrunner.Run(ctx, testType, testrunner.TestOptions{ Profile: profile, TestFolder: folder, PackageRootPath: packageRootPath, diff --git a/internal/cobraext/signal.go b/internal/cobraext/signal.go deleted file mode 100644 index 26d87c8b26..0000000000 --- a/internal/cobraext/signal.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package cobraext - -import ( - "os" - - "github.com/spf13/cobra" -) - -const signalHandlingAnnotation = "enable_signal_handling" - -func EnableSignalHandling(cmd *cobra.Command) { - if cmd.Annotations == nil { - cmd.Annotations = make(map[string]string) - } - cmd.Annotations[signalHandlingAnnotation] = "" -} - -func IsSignalHandingRequested(cmd *cobra.Command) bool { - _, found := getCommandAnnotation(cmd, signalHandlingAnnotation) - return found -} - -func getCommandAnnotation(cmd *cobra.Command, key string) (string, bool) { - if len(os.Args) == 0 { - return "", false - } - cmd, _, err := cmd.Root().Find(os.Args[1:]) - if err != nil { - return "", false - } - for ; cmd.HasParent(); cmd = cmd.Parent() { - if value, found := cmd.Annotations[key]; found { - return value, true - } - } - return "", false -} diff --git a/internal/signal/sigint.go b/internal/signal/sigint.go new file mode 100644 index 0000000000..f522aa603a --- /dev/null +++ b/internal/signal/sigint.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package signal + +import ( + "context" + "os" + "os/signal" +) + +// Enable returns a context configured to be cancelled if an interruption signal +// is received. +// Returned context can be cancelled explicitly with the returned function. +func Enable(ctx context.Context, infoLogger func(a ...any)) (notifyCtx context.Context, stop func()) { + notifyCtx, stopNotify := signal.NotifyContext(ctx, os.Interrupt) + stopLogger := context.AfterFunc(notifyCtx, func() { + infoLogger("Signal caught!") + }) + + return notifyCtx, func() { + stopLogger() + stopNotify() + } +} diff --git a/main.go b/main.go index 62aa473873..07a69c83b3 100644 --- a/main.go +++ b/main.go @@ -10,12 +10,9 @@ import ( "log" "os" "os/exec" - "os/signal" "github.com/elastic/elastic-package/cmd" - "github.com/elastic/elastic-package/internal/cobraext" "github.com/elastic/elastic-package/internal/install" - "github.com/elastic/elastic-package/internal/logger" ) func main() { @@ -26,16 +23,6 @@ func main() { rootCmd := cmd.RootCmd() rootCmd.SilenceErrors = true // Silence errors so we handle them here. - if cobraext.IsSignalHandingRequested(rootCmd) { - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) - defer cancel() - stop := context.AfterFunc(ctx, func() { - logger.Info("Signal caught!") - }) - defer stop() - rootCmd.SetContext(ctx) - } - err = rootCmd.Execute() if errIsInterruption(err) { rootCmd.Println("interrupted") From 362a2634560a98e60b5828fba94edd379b11a2cf Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 6 Mar 2024 20:47:44 +0100 Subject: [PATCH 28/32] Fix imports --- cmd/benchmark.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/benchmark.go b/cmd/benchmark.go index cbf708f4e5..7dbf5e4a16 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -12,12 +12,6 @@ import ( "strings" "time" - "github.com/elastic/elastic-package/internal/elasticsearch" - "github.com/elastic/elastic-package/internal/install" - "github.com/elastic/elastic-package/internal/logger" - "github.com/elastic/elastic-package/internal/signal" - "github.com/elastic/elastic-package/internal/stack" - "github.com/spf13/cobra" "github.com/elastic/elastic-package/internal/benchrunner" @@ -30,7 +24,12 @@ import ( "github.com/elastic/elastic-package/internal/benchrunner/runners/system" "github.com/elastic/elastic-package/internal/cobraext" "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/elasticsearch" + "github.com/elastic/elastic-package/internal/install" + "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/packages" + "github.com/elastic/elastic-package/internal/signal" + "github.com/elastic/elastic-package/internal/stack" "github.com/elastic/elastic-package/internal/testrunner" ) From f900edb3d58e6582bb58e6f0e8621d38eb2f77f3 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 7 Mar 2024 08:51:12 +0100 Subject: [PATCH 29/32] Define context parameters so we don't forget later --- internal/benchrunner/runners/rally/runner.go | 14 +++++++------- internal/benchrunner/runners/stream/runner.go | 4 ++-- internal/benchrunner/runners/system/runner.go | 4 ++-- internal/testrunner/runners/asset/runner.go | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/benchrunner/runners/rally/runner.go b/internal/benchrunner/runners/rally/runner.go index 47d32af425..6f81081ac3 100644 --- a/internal/benchrunner/runners/rally/runner.go +++ b/internal/benchrunner/runners/rally/runner.go @@ -388,7 +388,7 @@ func (r *runner) extractSimulatedTemplate(indexTemplate string) (string, error) func (r *runner) wipeDataStreamOnSetup() error { // Delete old data logger.Debug("deleting old data in data stream...") - r.wipeDataStreamHandler = func(context.Context) error { + r.wipeDataStreamHandler = func(ctx context.Context) error { logger.Debugf("deleting data in data stream...") if err := r.deleteDataStreamDocs(r.runtimeDataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -470,7 +470,7 @@ func (r *runner) installPackageFromRegistry(packageName, packageVersion string) return fmt.Errorf("cannot install package %s@%s: %w", packageName, packageVersion, err) } - r.removePackageHandler = func(context.Context) error { + r.removePackageHandler = func(ctx context.Context) error { logger.Debug("removing benchmark package...") if _, err := r.options.KibanaClient.RemovePackage(packageName, packageVersion); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) @@ -498,7 +498,7 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func(context.Context) error { + r.removePackageHandler = func(ctx context.Context) error { if err := installer.Uninstall(); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } @@ -714,7 +714,7 @@ func (r *runner) runGenerator(destDir string) (uint64, error) { r.corpusFile = corpusFile.Name() - r.clearCorporaHandler = func(context.Context) error { + r.clearCorporaHandler = func(ctx context.Context) error { return errors.Join( os.Remove(r.corpusFile), ) @@ -781,7 +781,7 @@ func (r *runner) createRallyTrack(corpusDocsCount uint64, destDir string) error r.reportFile = reportFile.Name() if r.options.RallyTrackOutputDir != "" { - r.persistRallyTrackHandler = func(context.Context) error { + r.persistRallyTrackHandler = func(ctx context.Context) error { err := os.MkdirAll(r.options.RallyTrackOutputDir, 0755) if err != nil { return fmt.Errorf("cannot not create rally track output dir: %w", err) @@ -805,7 +805,7 @@ func (r *runner) createRallyTrack(corpusDocsCount uint64, destDir string) error } } - r.clearTrackHandler = func(context.Context) error { + r.clearTrackHandler = func(ctx context.Context) error { return errors.Join( os.Remove(r.trackFile), os.Remove(r.reportFile), @@ -853,7 +853,7 @@ func (r *runner) copyCorpusFile(corpusPath, destDir string) (uint64, error) { r.corpusFile = corpusFile.Name() - r.clearCorporaHandler = func(context.Context) error { + r.clearCorporaHandler = func(ctx context.Context) error { return errors.Join( os.Remove(r.corpusFile), ) diff --git a/internal/benchrunner/runners/stream/runner.go b/internal/benchrunner/runners/stream/runner.go index 104b04a675..7988d40847 100644 --- a/internal/benchrunner/runners/stream/runner.go +++ b/internal/benchrunner/runners/stream/runner.go @@ -174,7 +174,7 @@ func (r *runner) setUp(ctx context.Context) error { func (r *runner) wipeDataStreamsOnSetup() error { // Delete old data logger.Debug("deleting old data in data stream...") - r.wipeDataStreamHandler = func(context.Context) error { + r.wipeDataStreamHandler = func(ctx context.Context) error { logger.Debugf("deleting data in data stream...") for _, runtimeDataStream := range r.runtimeDataStreams { if err := r.deleteDataStreamDocs(runtimeDataStream); err != nil { @@ -214,7 +214,7 @@ func (r *runner) installPackageFromPackageRoot() error { return fmt.Errorf("failed to install package: %w", err) } - r.removePackageHandler = func(context.Context) error { + r.removePackageHandler = func(ctx context.Context) error { if err := installer.Uninstall(); err != nil { return fmt.Errorf("error removing benchmark package: %w", err) } diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 3fc97c2313..daca187daf 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -203,7 +203,7 @@ func (r *runner) setUp(ctx context.Context) error { r.scenario.Version, ) - r.wipeDataStreamHandler = func(context.Context) error { + r.wipeDataStreamHandler = func(ctx context.Context) error { logger.Debugf("deleting data in data stream...") if err := r.deleteDataStreamDocs(r.runtimeDataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -611,7 +611,7 @@ func (r *runner) runGenerator(destDir string) error { } r.corporaFile = f.Name() - r.clearCorporaHandler = func(context.Context) error { + r.clearCorporaHandler = func(ctx context.Context) error { return os.Remove(r.corporaFile) } diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index 2925c31064..247cb4bd4c 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -107,7 +107,7 @@ func (r *runner) run() ([]testrunner.TestResult, error) { return result.WithError(fmt.Errorf("can't install the package: %w", err)) } - r.removePackageHandler = func(context.Context) error { + r.removePackageHandler = func(ctx context.Context) error { pkgManifest, err := packages.ReadPackageManifestFromPackageRoot(r.packageRootPath) if err != nil { return fmt.Errorf("reading package manifest failed: %w", err) From 85bc8401928a498a4cecd3d93b2c6855fe1c4c2b Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 7 Mar 2024 11:32:01 +0100 Subject: [PATCH 30/32] Avoid cancelation of log dumps --- internal/testrunner/runners/system/runner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index c8f7e931a1..cfab74a285 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -462,7 +462,7 @@ func (r *runner) run(ctx context.Context) (results []testrunner.TestResult, err defer os.RemoveAll(tempDir) dumpOptions := stack.DumpOptions{Output: tempDir, Profile: r.options.Profile} - _, err = stack.Dump(ctx, dumpOptions) + _, err = stack.Dump(context.WithoutCancel(ctx), dumpOptions) if err != nil { return nil, fmt.Errorf("dump failed: %w", err) } From ecd46004ad1936135b18211d4eefe5f6b057ff3f Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Mon, 25 Mar 2024 19:58:44 +0100 Subject: [PATCH 31/32] Remove package if installation interrupted during asset tests --- internal/testrunner/runners/asset/runner.go | 26 ++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index 562af03fd7..c87bd76dc6 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -102,12 +102,8 @@ func (r *runner) run(ctx context.Context) ([]testrunner.TestResult, error) { if err != nil { return result.WithError(fmt.Errorf("can't create the package installer: %w", err)) } - installedPackage, err := packageInstaller.Install(ctx) - if err != nil { - return result.WithError(fmt.Errorf("can't install the package: %w", err)) - } - r.removePackageHandler = func(ctx context.Context) error { + removePackageHandler := func(ctx context.Context) error { pkgManifest, err := packages.ReadPackageManifestFromPackageRoot(r.packageRootPath) if err != nil { return fmt.Errorf("reading package manifest failed: %w", err) @@ -139,6 +135,20 @@ func (r *runner) run(ctx context.Context) ([]testrunner.TestResult, error) { return nil } + installedPackage, err := packageInstaller.Install(ctx) + if errors.Is(err, context.Canceled) { + // Installation interrupted, at this point the package may have been installed, try to remove it for cleanup. + err := removePackageHandler(context.WithoutCancel(ctx)) + if err != nil { + logger.Debugf("error while removing package after installation interrupted: %s", err) + } + } + if err != nil { + return result.WithError(fmt.Errorf("can't install the package: %w", err)) + } + + r.removePackageHandler = removePackageHandler + // No Elasticsearch asset is created when an Input package is installed through the API. // This would require to create a Agent policy and add that input package to the Agent policy. // As those input packages could have some required fields, it would also require to add @@ -177,10 +187,14 @@ func (r *runner) run(ctx context.Context) ([]testrunner.TestResult, error) { } func (r *runner) TearDown(ctx context.Context) error { + // Avoid cancellations during cleanup. + cleanupCtx := context.WithoutCancel(ctx) + if r.removePackageHandler != nil { - if err := r.removePackageHandler(ctx); err != nil { + if err := r.removePackageHandler(cleanupCtx); err != nil { return err } + r.removePackageHandler = nil } return nil From 0051f330f78b91d086cd8aa179b932c66f37f150 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Mon, 25 Mar 2024 23:18:53 +0100 Subject: [PATCH 32/32] Remove package if installation interrupted during system tests --- internal/testrunner/runners/system/runner.go | 33 +++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index 2235a79fb6..4938b88ab9 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -734,18 +734,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil, fmt.Errorf("failed to initialize package installer: %v", err) } - if r.options.RunTearDown { - logger.Debug("Skip installing package") - } else { - // Allowed to re-install the package in RunTestsOnly to be able to - // test new changes introduced in the package - logger.Debug("Installing package...") - _, err = installer.Install(ctx) - if err != nil { - return nil, fmt.Errorf("failed to install package: %v", err) - } - } - r.deletePackageHandler = func(ctx context.Context) error { + deletePackageHandler := func(ctx context.Context) error { stackVersion, err := semver.NewVersion(serviceOptions.StackVersion) if err != nil { return fmt.Errorf("failed to parse stack version: %w", err) @@ -768,6 +757,26 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, servic return nil } + if r.options.RunTearDown { + logger.Debug("Skip installing package") + } else { + // Allowed to re-install the package in RunTestsOnly to be able to + // test new changes introduced in the package + logger.Debug("Installing package...") + _, err = installer.Install(ctx) + if errors.Is(err, context.Canceled) { + // Installation interrupted, at this point the package may have been installed, try to remove it for cleanup. + err := deletePackageHandler(context.WithoutCancel(ctx)) + if err != nil { + logger.Debugf("error while removing package after installation interrupted: %s", err) + } + } + if err != nil { + return nil, fmt.Errorf("failed to install package: %v", err) + } + } + r.deletePackageHandler = deletePackageHandler + // Configure package (single data stream) via Fleet APIs. var policy *kibana.Policy if r.options.RunTearDown || r.options.RunTestsOnly {