-
Couldn't load subscription status.
- Fork 128
Add benchmarks to pipeline tests #906
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
d86dd39
9364cff
ded422d
e097ac1
f828106
ea3760c
5c16fe2
8f756bc
1f4c474
26ba4cc
254a262
689bb4b
b836bf9
4d0f3d3
0d81b4f
0456c35
1ba3790
3c50c8e
a029a95
51eafe2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,194 @@ | ||
| // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| // or more contributor license agreements. Licensed under the Elastic License; | ||
| // you may not use this file except in compliance with the Elastic License. | ||
|
|
||
| package cmd | ||
|
|
||
| import ( | ||
| "fmt" | ||
| "strings" | ||
|
|
||
| "github.com/pkg/errors" | ||
| "github.com/spf13/cobra" | ||
|
|
||
| "github.com/elastic/elastic-package/internal/benchrunner" | ||
| "github.com/elastic/elastic-package/internal/benchrunner/reporters/formats" | ||
| "github.com/elastic/elastic-package/internal/benchrunner/reporters/outputs" | ||
| _ "github.com/elastic/elastic-package/internal/benchrunner/runners" // register all benchmark runners | ||
| "github.com/elastic/elastic-package/internal/cobraext" | ||
| "github.com/elastic/elastic-package/internal/common" | ||
| "github.com/elastic/elastic-package/internal/elasticsearch" | ||
| "github.com/elastic/elastic-package/internal/packages" | ||
| "github.com/elastic/elastic-package/internal/signal" | ||
| "github.com/elastic/elastic-package/internal/testrunner" | ||
| ) | ||
|
|
||
| const benchLongDescription = `Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available: | ||
|
|
||
| #### Pipeline Benchmarks | ||
| These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages. | ||
|
|
||
| For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](https://github.com/elastic/elastic-package/blob/main/docs/howto/pipeline_benchmarking.md).` | ||
|
|
||
| func setupBenchmarkCommand() *cobraext.Command { | ||
| var benchTypeCmdActions []cobraext.CommandAction | ||
|
|
||
| cmd := &cobra.Command{ | ||
| Use: "benchmark", | ||
| Short: "Run benchmarks for the package", | ||
| Long: benchLongDescription, | ||
| RunE: func(cmd *cobra.Command, args []string) error { | ||
| cmd.Println("Run benchmarks for the package") | ||
|
|
||
| if len(args) > 0 { | ||
| return fmt.Errorf("unsupported benchmark type: %s", args[0]) | ||
| } | ||
|
|
||
| return cobraext.ComposeCommandActions(cmd, args, benchTypeCmdActions...) | ||
| }} | ||
|
|
||
| cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription) | ||
| cmd.PersistentFlags().StringP(cobraext.ReportFormatFlagName, "", string(formats.ReportFormatHuman), cobraext.ReportFormatFlagDescription) | ||
| cmd.PersistentFlags().StringP(cobraext.ReportOutputFlagName, "", string(outputs.ReportOutputSTDOUT), cobraext.ReportOutputFlagDescription) | ||
| cmd.PersistentFlags().BoolP(cobraext.BenchWithTestSamplesFlagName, "", true, cobraext.BenchWithTestSamplesFlagDescription) | ||
|
|
||
| for benchType, runner := range benchrunner.BenchRunners() { | ||
| action := benchTypeCommandActionFactory(runner) | ||
| benchTypeCmdActions = append(benchTypeCmdActions, action) | ||
|
|
||
| benchTypeCmd := &cobra.Command{ | ||
| Use: string(benchType), | ||
| Short: fmt.Sprintf("Run %s benchmarks", runner.String()), | ||
| Long: fmt.Sprintf("Run %s benchmarks for the package.", runner.String()), | ||
| RunE: action, | ||
| } | ||
|
|
||
| benchTypeCmd.Flags().StringSliceP(cobraext.DataStreamsFlagName, "d", nil, cobraext.DataStreamsFlagDescription) | ||
|
|
||
| cmd.AddCommand(benchTypeCmd) | ||
| } | ||
|
|
||
| return cobraext.NewCommand(cmd, cobraext.ContextPackage) | ||
| } | ||
|
|
||
| func benchTypeCommandActionFactory(runner benchrunner.BenchRunner) cobraext.CommandAction { | ||
| benchType := runner.Type() | ||
| return func(cmd *cobra.Command, args []string) error { | ||
| cmd.Printf("Run %s benchmarks for the package\n", benchType) | ||
|
|
||
| failOnMissing, err := cmd.Flags().GetBool(cobraext.FailOnMissingFlagName) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.FailOnMissingFlagName) | ||
| } | ||
|
Comment on lines
+79
to
+82
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it relevant for benchmark tests? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess it depends. I was wondering if would be practical to add a config option to use the pipeline tests events as samples, to avoid repetition and basically to be able to leverage benchmarks for any package with pipeline tests without changes. If added, would fallback to lookup pipeline test samples, and I guess than in a scenario like this failing could be useful? WDYT? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yes, in this case, it makes sense. Based on your experience, how many cases would be covered by borrowing pipeline test events? Is it the majority or just w few samples? If that feature doesn't look to be popular, I would rather drop it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think would be nice to aim to have this in some capacity as part of the CI, so I think all packages would end up using it in one way or another. cc @leehinman |
||
|
|
||
| reportFormat, err := cmd.Flags().GetString(cobraext.ReportFormatFlagName) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.ReportFormatFlagName) | ||
| } | ||
|
|
||
| reportOutput, err := cmd.Flags().GetString(cobraext.ReportOutputFlagName) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.ReportOutputFlagName) | ||
| } | ||
|
|
||
| useTestSamples, err := cmd.Flags().GetBool(cobraext.BenchWithTestSamplesFlagName) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.BenchWithTestSamplesFlagName) | ||
| } | ||
|
|
||
| packageRootPath, found, err := packages.FindPackageRoot() | ||
| if !found { | ||
| return errors.New("package root not found") | ||
| } | ||
| if err != nil { | ||
| return errors.Wrap(err, "locating package root failed") | ||
| } | ||
|
|
||
| signal.Enable() | ||
|
|
||
| var ( | ||
| benchFolders []testrunner.TestFolder | ||
| dataStreams []string | ||
| ) | ||
| // We check for the existence of the data streams flag before trying to | ||
| // parse it because if the root benchmark command is run instead of one of the | ||
| // subcommands of benchmark, the data streams flag will not be defined. | ||
| if cmd.Flags().Lookup(cobraext.DataStreamsFlagName) != nil { | ||
| dataStreams, err = cmd.Flags().GetStringSlice(cobraext.DataStreamsFlagName) | ||
| common.TrimStringSlice(dataStreams) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName) | ||
| } | ||
|
|
||
| err = validateDataStreamsFlag(packageRootPath, dataStreams) | ||
| if err != nil { | ||
| return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName) | ||
| } | ||
| } | ||
|
|
||
| benchFolders, err = benchrunner.FindBenchmarkFolders(packageRootPath, dataStreams, benchType) | ||
| if err != nil { | ||
| return errors.Wrap(err, "unable to determine benchmark folder paths") | ||
| } | ||
|
|
||
| if useTestSamples { | ||
| testFolders, err := testrunner.FindTestFolders(packageRootPath, dataStreams, testrunner.TestType(benchType)) | ||
| if err != nil { | ||
| return errors.Wrap(err, "unable to determine test folder paths") | ||
| } | ||
| benchFolders = append(benchFolders, testFolders...) | ||
| } | ||
|
|
||
| if failOnMissing && len(benchFolders) == 0 { | ||
| if len(dataStreams) > 0 { | ||
| return fmt.Errorf("no %s benchmarks found for %s data stream(s)", benchType, strings.Join(dataStreams, ",")) | ||
| } | ||
| return fmt.Errorf("no %s benchmarks found", benchType) | ||
| } | ||
|
|
||
| esClient, err := elasticsearch.Client() | ||
| if err != nil { | ||
| return errors.Wrap(err, "can't create Elasticsearch client") | ||
| } | ||
|
|
||
| var results []*benchrunner.Result | ||
| for _, folder := range benchFolders { | ||
| r, err := benchrunner.Run(benchType, benchrunner.BenchOptions{ | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be great if you can run the benchmark test as part of our CI pipeline. We could test it in a continuous way. |
||
| Folder: folder, | ||
| PackageRootPath: packageRootPath, | ||
| API: esClient.API, | ||
| }) | ||
|
|
||
| if err != nil { | ||
| return errors.Wrapf(err, "error running package %s benchmarks", benchType) | ||
| } | ||
|
|
||
| results = append(results, r) | ||
| } | ||
|
|
||
| format := benchrunner.BenchReportFormat(reportFormat) | ||
| benchReports, err := benchrunner.FormatReport(format, results) | ||
| if err != nil { | ||
| return errors.Wrap(err, "error formatting benchmark report") | ||
| } | ||
|
|
||
| m, err := packages.ReadPackageManifestFromPackageRoot(packageRootPath) | ||
| if err != nil { | ||
| return errors.Wrapf(err, "reading package manifest failed (path: %s)", packageRootPath) | ||
| } | ||
|
|
||
| for idx, report := range benchReports { | ||
| if err := benchrunner.WriteReport(fmt.Sprintf("%s-%d", m.Name, idx+1), benchrunner.BenchReportOutput(reportOutput), report, format); err != nil { | ||
| return errors.Wrap(err, "error writing benchmark report") | ||
| } | ||
| } | ||
|
|
||
| // Check if there is any error or failure reported | ||
| for _, r := range results { | ||
| if r.ErrorMsg != "" { | ||
| return fmt.Errorf("one or more benchmarks failed: %v", r.ErrorMsg) | ||
| } | ||
| } | ||
| return nil | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.