Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d86dd39
Add benchmarks to pipeline tests
adriansr Jun 29, 2022
9364cff
gofumpt
adriansr Jul 21, 2022
ded422d
Minor rename
adriansr Jul 22, 2022
e097ac1
Generate benchmark output files per datastream
adriansr Jul 22, 2022
f828106
Filter-out detailed reports in xUnit
adriansr Jul 22, 2022
ea3760c
Cleanup config options
adriansr Jul 22, 2022
5c16fe2
Move benchmark code to its own command
marc-gr Sep 1, 2022
8f756bc
Extract common ingest pipeline code
marc-gr Sep 5, 2022
1f4c474
Remove unused code from benchmark runner
marc-gr Sep 5, 2022
26ba4cc
Benchmark runner reporting
marc-gr Sep 5, 2022
254a262
Make benchmarks have a dedicated _dev config folder
marc-gr Sep 5, 2022
689bb4b
Add doc
marc-gr Sep 5, 2022
b836bf9
Merge remote-tracking branch 'upstream/main' into bench_pipelines
marc-gr Sep 5, 2022
4d0f3d3
Remove unused method after merge
marc-gr Sep 5, 2022
0d81b4f
Re-generate readme
marc-gr Sep 5, 2022
0456c35
Fix benchmark commands in doc
marc-gr Sep 5, 2022
1ba3790
Add fallback to use pipeline test samples
marc-gr Sep 6, 2022
3c50c8e
Add CI testing for benchmarks
marc-gr Sep 6, 2022
a029a95
Add output to doc, undo script change, change test packages PR number
marc-gr Sep 7, 2022
51eafe2
Merge remote-tracking branch 'upstream/main' into bench_pipelines
marc-gr Sep 7, 2022
cf2d739
Make suggested changes:
marc-gr Sep 8, 2022
0124831
readme update
marc-gr Sep 8, 2022
b66b39a
Merge remote-tracking branch 'upstream/main' into bench_pipelines
marc-gr Sep 8, 2022
f5e8b8e
Merge remote-tracking branch 'upstream/main' into bench_pipelines
marc-gr Sep 8, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .ci/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ pipeline {
'check-packages-with-kind': generateTestCommandStage(command: 'test-check-packages-with-kind', artifacts: ['build/test-results/*.xml', 'build/kubectl-dump.txt', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-other': generateTestCommandStage(command: 'test-check-packages-other', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-with-custom-agent': generateTestCommandStage(command: 'test-check-packages-with-custom-agent', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
'check-packages-benchmarks': generateTestCommandStage(command: 'test-check-packages-benchmarks', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: false),
'build-zip': generateTestCommandStage(command: 'test-build-zip', artifacts: ['build/elastic-stack-dump/build-zip/logs/*.log', 'build/packages/*.sig']),
'profiles-command': generateTestCommandStage(command: 'test-profiles-command')
]
Expand Down
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,17 @@ test-stack-command-8x:

test-stack-command: test-stack-command-default test-stack-command-7x test-stack-command-800 test-stack-command-8x

test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent
test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent test-check-packages-benchmarks

test-check-packages-with-kind:
PACKAGE_TEST_TYPE=with-kind ./scripts/test-check-packages.sh

test-check-packages-other:
PACKAGE_TEST_TYPE=other ./scripts/test-check-packages.sh

test-check-packages-benchmarks:
PACKAGE_TEST_TYPE=benchmarks ./scripts/test-check-packages.sh

test-check-packages-parallel:
PACKAGE_TEST_TYPE=parallel ./scripts/test-check-packages.sh

Expand Down
12 changes: 12 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,18 @@ The command output shell completions information (for `bash`, `zsh`, `fish` and

Run `elastic-package completion` and follow the instruction for your shell.

### `elastic-package benchmark`

_Context: package_

Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:

#### Pipeline Benchmarks

These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.

For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](./docs/howto/pipeline_benchmarking.md).

### `elastic-package build`

_Context: package_
Expand Down
196 changes: 196 additions & 0 deletions cmd/benchmark.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.

package cmd

import (
"fmt"
"strings"

"github.com/pkg/errors"
"github.com/spf13/cobra"

"github.com/elastic/elastic-package/internal/benchrunner"
"github.com/elastic/elastic-package/internal/benchrunner/reporters/formats"
"github.com/elastic/elastic-package/internal/benchrunner/reporters/outputs"
_ "github.com/elastic/elastic-package/internal/benchrunner/runners" // register all benchmark runners
"github.com/elastic/elastic-package/internal/cobraext"
"github.com/elastic/elastic-package/internal/common"
"github.com/elastic/elastic-package/internal/elasticsearch"
"github.com/elastic/elastic-package/internal/packages"
"github.com/elastic/elastic-package/internal/signal"
"github.com/elastic/elastic-package/internal/testrunner"
)

const benchLongDescription = `Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:

#### Pipeline Benchmarks

These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.

For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](./docs/howto/pipeline_benchmarking.md).`

func setupBenchmarkCommand() *cobraext.Command {
var benchTypeCmdActions []cobraext.CommandAction

cmd := &cobra.Command{
Use: "benchmark",
Short: "Run benchmarks for the package",
Long: benchLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
cmd.Println("Run benchmarks for the package")

if len(args) > 0 {
return fmt.Errorf("unsupported benchmark type: %s", args[0])
}

return cobraext.ComposeCommandActions(cmd, args, benchTypeCmdActions...)
}}

cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription)
cmd.PersistentFlags().StringP(cobraext.ReportFormatFlagName, "", string(formats.ReportFormatHuman), cobraext.ReportFormatFlagDescription)
cmd.PersistentFlags().StringP(cobraext.ReportOutputFlagName, "", string(outputs.ReportOutputSTDOUT), cobraext.ReportOutputFlagDescription)
cmd.PersistentFlags().BoolP(cobraext.BenchWithTestSamplesFlagName, "", true, cobraext.BenchWithTestSamplesFlagDescription)
cmd.PersistentFlags().IntP(cobraext.BenchNumTopProcsFlagName, "", 10, cobraext.BenchNumTopProcsFlagDescription)
cmd.PersistentFlags().StringSliceP(cobraext.DataStreamsFlagName, "", nil, cobraext.DataStreamsFlagDescription)

for benchType, runner := range benchrunner.BenchRunners() {
action := benchTypeCommandActionFactory(runner)
benchTypeCmdActions = append(benchTypeCmdActions, action)

benchTypeCmd := &cobra.Command{
Use: string(benchType),
Short: fmt.Sprintf("Run %s benchmarks", runner.String()),
Long: fmt.Sprintf("Run %s benchmarks for the package.", runner.String()),
RunE: action,
}

benchTypeCmd.Flags().StringSliceP(cobraext.DataStreamsFlagName, "d", nil, cobraext.DataStreamsFlagDescription)

cmd.AddCommand(benchTypeCmd)
}

return cobraext.NewCommand(cmd, cobraext.ContextPackage)
}

func benchTypeCommandActionFactory(runner benchrunner.BenchRunner) cobraext.CommandAction {
benchType := runner.Type()
return func(cmd *cobra.Command, args []string) error {
cmd.Printf("Run %s benchmarks for the package\n", benchType)

failOnMissing, err := cmd.Flags().GetBool(cobraext.FailOnMissingFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.FailOnMissingFlagName)
}

reportFormat, err := cmd.Flags().GetString(cobraext.ReportFormatFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.ReportFormatFlagName)
}

reportOutput, err := cmd.Flags().GetString(cobraext.ReportOutputFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.ReportOutputFlagName)
}

useTestSamples, err := cmd.Flags().GetBool(cobraext.BenchWithTestSamplesFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.BenchWithTestSamplesFlagName)
}

numTopProcs, err := cmd.Flags().GetInt(cobraext.BenchNumTopProcsFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.BenchNumTopProcsFlagName)
}

packageRootPath, found, err := packages.FindPackageRoot()
if !found {
return errors.New("package root not found")
}
if err != nil {
return errors.Wrap(err, "locating package root failed")
}

dataStreams, err := cmd.Flags().GetStringSlice(cobraext.DataStreamsFlagName)
if err != nil {
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
}

if len(dataStreams) > 0 {
common.TrimStringSlice(dataStreams)

if err := validateDataStreamsFlag(packageRootPath, dataStreams); err != nil {
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
}
}

signal.Enable()

benchFolders, err := benchrunner.FindBenchmarkFolders(packageRootPath, dataStreams, benchType)
if err != nil {
return errors.Wrap(err, "unable to determine benchmark folder paths")
}

if useTestSamples {
testFolders, err := testrunner.FindTestFolders(packageRootPath, dataStreams, testrunner.TestType(benchType))
if err != nil {
return errors.Wrap(err, "unable to determine test folder paths")
}
benchFolders = append(benchFolders, testFolders...)
}

if failOnMissing && len(benchFolders) == 0 {
if len(dataStreams) > 0 {
return fmt.Errorf("no %s benchmarks found for %s data stream(s)", benchType, strings.Join(dataStreams, ","))
}
return fmt.Errorf("no %s benchmarks found", benchType)
}

esClient, err := elasticsearch.Client()
if err != nil {
return errors.Wrap(err, "can't create Elasticsearch client")
}

var results []*benchrunner.Result
for _, folder := range benchFolders {
r, err := benchrunner.Run(benchType, benchrunner.BenchOptions{
Folder: folder,
PackageRootPath: packageRootPath,
API: esClient.API,
NumTopProcs: numTopProcs,
})

if err != nil {
return errors.Wrapf(err, "error running package %s benchmarks", benchType)
}

results = append(results, r)
}

format := benchrunner.BenchReportFormat(reportFormat)
benchReports, err := benchrunner.FormatReport(format, results)
if err != nil {
return errors.Wrap(err, "error formatting benchmark report")
}

m, err := packages.ReadPackageManifestFromPackageRoot(packageRootPath)
if err != nil {
return errors.Wrapf(err, "reading package manifest failed (path: %s)", packageRootPath)
}

for idx, report := range benchReports {
if err := benchrunner.WriteReport(fmt.Sprintf("%s-%d", m.Name, idx+1), benchrunner.BenchReportOutput(reportOutput), report, format); err != nil {
return errors.Wrap(err, "error writing benchmark report")
}
}

// Check if there is any error or failure reported
for _, r := range results {
if r.ErrorMsg != "" {
return fmt.Errorf("one or more benchmarks failed: %v", r.ErrorMsg)
}
}
return nil
}
}
1 change: 1 addition & 0 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
)

var commands = []*cobraext.Command{
setupBenchmarkCommand(),
setupBuildCommand(),
setupChangelogCommand(),
setupCheckCommand(),
Expand Down
Loading