Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .ci/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ pipeline {
'check-packages-with-kind': generateTestCommandStage(command: 'test-check-packages-with-kind', artifacts: ['build/test-results/*.xml', 'build/kubectl-dump.txt', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-other': generateTestCommandStage(command: 'test-check-packages-other', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-with-custom-agent': generateTestCommandStage(command: 'test-check-packages-with-custom-agent', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-benchmarks': generateTestCommandStage(command: 'test-check-packages-benchmarks', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: false),
'build-zip': generateTestCommandStage(command: 'test-build-zip', artifacts: ['build/elastic-stack-dump/build-zip/logs/*.log', 'build/packages/*.sig']),
'profiles-command': generateTestCommandStage(command: 'test-profiles-command')
]
Expand Down
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,17 @@ test-stack-command-8x:

test-stack-command: test-stack-command-default test-stack-command-7x test-stack-command-800 test-stack-command-8x

test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent
test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent test-check-packages-benchmarks

test-check-packages-with-kind:
PACKAGE_TEST_TYPE=with-kind ./scripts/test-check-packages.sh

test-check-packages-other:
PACKAGE_TEST_TYPE=other ./scripts/test-check-packages.sh

test-check-packages-benchmarks:
PACKAGE_TEST_TYPE=benchmarks ./scripts/test-check-packages.sh

test-check-packages-parallel:
PACKAGE_TEST_TYPE=parallel ./scripts/test-check-packages.sh

Expand Down
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,17 @@ The command output shell completions information (for `bash`, `zsh`, `fish` and

Run `elastic-package completion` and follow the instruction for your shell.

### `elastic-package benchmark`

_Context: package_

Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:

#### Pipeline Benchmarks
These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.

For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](https://github.com/elastic/elastic-package/blob/main/docs/howto/pipeline_benchmarking.md).

### `elastic-package build`

_Context: package_
Expand Down
194 changes: 194 additions & 0 deletions cmd/benchrunner.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.

package cmd

import (
"fmt"
"strings"

"github.com/pkg/errors"
"github.com/spf13/cobra"

"github.com/elastic/elastic-package/internal/benchrunner"
"github.com/elastic/elastic-package/internal/benchrunner/reporters/formats"
"github.com/elastic/elastic-package/internal/benchrunner/reporters/outputs"
_ "github.com/elastic/elastic-package/internal/benchrunner/runners" // register all benchmark runners
"github.com/elastic/elastic-package/internal/cobraext"
"github.com/elastic/elastic-package/internal/common"
"github.com/elastic/elastic-package/internal/elasticsearch"
"github.com/elastic/elastic-package/internal/packages"
"github.com/elastic/elastic-package/internal/signal"
"github.com/elastic/elastic-package/internal/testrunner"
)

const benchLongDescription = `Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:

#### Pipeline Benchmarks
These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.

For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](https://github.com/elastic/elastic-package/blob/main/docs/howto/pipeline_benchmarking.md).`

func setupBenchmarkCommand() *cobraext.Command {
var benchTypeCmdActions []cobraext.CommandAction

cmd := &cobra.Command{
Use: "benchmark",
Short: "Run benchmarks for the package",
Long: benchLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
cmd.Println("Run benchmarks for the package")

if len(args) > 0 {
return fmt.Errorf("unsupported benchmark type: %s", args[0])
}

return cobraext.ComposeCommandActions(cmd, args, benchTypeCmdActions...)
}}

cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription)
cmd.PersistentFlags().StringP(cobraext.ReportFormatFlagName, "", string(formats.ReportFormatHuman), cobraext.ReportFormatFlagDescription)
cmd.PersistentFlags().StringP(cobraext.ReportOutputFlagName, "", string(outputs.ReportOutputSTDOUT), cobraext.ReportOutputFlagDescription)
cmd.PersistentFlags().BoolP(cobraext.BenchWithTestSamplesFlagName, "", true, cobraext.BenchWithTestSamplesFlagDescription)

for benchType, runner := range benchrunner.BenchRunners() {
action := benchTypeCommandActionFactory(runner)
benchTypeCmdActions = append(benchTypeCmdActions, action)

benchTypeCmd := &cobra.Command{
Use: string(benchType),
Short: fmt.Sprintf("Run %s benchmarks", runner.String()),
Long: fmt.Sprintf("Run %s benchmarks for the package.", runner.String()),
RunE: action,
}

benchTypeCmd.Flags().StringSliceP(cobraext.DataStreamsFlagName, "d", nil, cobraext.DataStreamsFlagDescription)

cmd.AddCommand(benchTypeCmd)
}

return cobraext.NewCommand(cmd, cobraext.ContextPackage)
}

func benchTypeCommandActionFactory(runner benchrunner.BenchRunner) cobraext.CommandAction {
benchType := runner.Type()
return func(cmd *cobra.Command, args []string) error {
cmd.Printf("Run %s benchmarks for the package\n", benchType)

failOnMissing, err := cmd.Flags().GetBool(cobraext.FailOnMissingFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.FailOnMissingFlagName)
}
Comment on lines +79 to +82
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it relevant for benchmark tests?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess it depends. I was wondering if would be practical to add a config option to use the pipeline tests events as samples, to avoid repetition and basically to be able to leverage benchmarks for any package with pipeline tests without changes. If added, would fallback to lookup pipeline test samples, and I guess than in a scenario like this failing could be useful? WDYT?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If added, would fallback to lookup pipeline test samples, and I guess than in a scenario like this failing could be useful

Yes, in this case, it makes sense.

Based on your experience, how many cases would be covered by borrowing pipeline test events? Is it the majority or just w few samples? If that feature doesn't look to be popular, I would rather drop it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think would be nice to aim to have this in some capacity as part of the CI, so I think all packages would end up using it in one way or another. cc @leehinman


reportFormat, err := cmd.Flags().GetString(cobraext.ReportFormatFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.ReportFormatFlagName)
}

reportOutput, err := cmd.Flags().GetString(cobraext.ReportOutputFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.ReportOutputFlagName)
}

useTestSamples, err := cmd.Flags().GetBool(cobraext.BenchWithTestSamplesFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.BenchWithTestSamplesFlagName)
}

packageRootPath, found, err := packages.FindPackageRoot()
if !found {
return errors.New("package root not found")
}
if err != nil {
return errors.Wrap(err, "locating package root failed")
}

signal.Enable()

var (
benchFolders []testrunner.TestFolder
dataStreams []string
)
// We check for the existence of the data streams flag before trying to
// parse it because if the root benchmark command is run instead of one of the
// subcommands of benchmark, the data streams flag will not be defined.
if cmd.Flags().Lookup(cobraext.DataStreamsFlagName) != nil {
dataStreams, err = cmd.Flags().GetStringSlice(cobraext.DataStreamsFlagName)
common.TrimStringSlice(dataStreams)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
}

err = validateDataStreamsFlag(packageRootPath, dataStreams)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
}
}

benchFolders, err = benchrunner.FindBenchmarkFolders(packageRootPath, dataStreams, benchType)
if err != nil {
return errors.Wrap(err, "unable to determine benchmark folder paths")
}

if useTestSamples {
testFolders, err := testrunner.FindTestFolders(packageRootPath, dataStreams, testrunner.TestType(benchType))
if err != nil {
return errors.Wrap(err, "unable to determine test folder paths")
}
benchFolders = append(benchFolders, testFolders...)
}

if failOnMissing && len(benchFolders) == 0 {
if len(dataStreams) > 0 {
return fmt.Errorf("no %s benchmarks found for %s data stream(s)", benchType, strings.Join(dataStreams, ","))
}
return fmt.Errorf("no %s benchmarks found", benchType)
}

esClient, err := elasticsearch.Client()
if err != nil {
return errors.Wrap(err, "can't create Elasticsearch client")
}

var results []*benchrunner.Result
for _, folder := range benchFolders {
r, err := benchrunner.Run(benchType, benchrunner.BenchOptions{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be great if you can run the benchmark test as part of our CI pipeline. We could test it in a continuous way.

Folder: folder,
PackageRootPath: packageRootPath,
API: esClient.API,
})

if err != nil {
return errors.Wrapf(err, "error running package %s benchmarks", benchType)
}

results = append(results, r)
}

format := benchrunner.BenchReportFormat(reportFormat)
benchReports, err := benchrunner.FormatReport(format, results)
if err != nil {
return errors.Wrap(err, "error formatting benchmark report")
}

m, err := packages.ReadPackageManifestFromPackageRoot(packageRootPath)
if err != nil {
return errors.Wrapf(err, "reading package manifest failed (path: %s)", packageRootPath)
}

for idx, report := range benchReports {
if err := benchrunner.WriteReport(fmt.Sprintf("%s-%d", m.Name, idx+1), benchrunner.BenchReportOutput(reportOutput), report, format); err != nil {
return errors.Wrap(err, "error writing benchmark report")
}
}

// Check if there is any error or failure reported
for _, r := range results {
if r.ErrorMsg != "" {
return fmt.Errorf("one or more benchmarks failed: %v", r.ErrorMsg)
}
}
return nil
}
}
1 change: 1 addition & 0 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
)

var commands = []*cobraext.Command{
setupBenchmarkCommand(),
setupBuildCommand(),
setupChangelogCommand(),
setupCheckCommand(),
Expand Down
Loading