Skip to content

Commit e0f49ac

Browse files
marc-gradriansr
andauthored
Add benchmarks to pipeline tests (#958)
* Add benchmarks to pipeline tests * gofumpt * Minor rename * Generate benchmark output files per datastream xUnit format doesn't allow multiple results in a single file. * Filter-out detailed reports in xUnit * Cleanup config options * Move benchmark code to its own command * Extract common ingest pipeline code * Remove unused code from benchmark runner * Benchmark runner reporting * Make benchmarks have a dedicated _dev config folder * Add doc * Remove unused method after merge * Re-generate readme * Fix benchmark commands in doc * Add fallback to use pipeline test samples * Add CI testing for benchmarks * Add output to doc, undo script change, change test packages PR number * Make suggested changes: - Rename benchrunner.go -> benchmark.go - Add data-streams as a persistent flag - Add num-top-procs flag - Implement Stringer for BenchmarkValue - Rename benchFmtd -> benchFormatted - Make constants global * readme update Co-authored-by: Adrian Serrano <[email protected]>
1 parent 8d330f0 commit e0f49ac

File tree

46 files changed

+2220
-96
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+2220
-96
lines changed

.ci/Jenkinsfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ pipeline {
103103
'check-packages-with-kind': generateTestCommandStage(command: 'test-check-packages-with-kind', artifacts: ['build/test-results/*.xml', 'build/kubectl-dump.txt', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
104104
'check-packages-other': generateTestCommandStage(command: 'test-check-packages-other', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
105105
'check-packages-with-custom-agent': generateTestCommandStage(command: 'test-check-packages-with-custom-agent', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: true),
106+
'check-packages-benchmarks': generateTestCommandStage(command: 'test-check-packages-benchmarks', artifacts: ['build/test-results/*.xml', 'build/elastic-stack-dump/check-*/logs/*.log', 'build/elastic-stack-dump/check-*/logs/fleet-server-internal/*'], junitArtifacts: true, publishCoverage: false),
106107
'build-zip': generateTestCommandStage(command: 'test-build-zip', artifacts: ['build/elastic-stack-dump/build-zip/logs/*.log', 'build/packages/*.sig']),
107108
'profiles-command': generateTestCommandStage(command: 'test-profiles-command')
108109
]

Makefile

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,17 @@ test-stack-command-8x:
6565

6666
test-stack-command: test-stack-command-default test-stack-command-7x test-stack-command-800 test-stack-command-8x
6767

68-
test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent
68+
test-check-packages: test-check-packages-with-kind test-check-packages-other test-check-packages-parallel test-check-packages-with-custom-agent test-check-packages-benchmarks
6969

7070
test-check-packages-with-kind:
7171
PACKAGE_TEST_TYPE=with-kind ./scripts/test-check-packages.sh
7272

7373
test-check-packages-other:
7474
PACKAGE_TEST_TYPE=other ./scripts/test-check-packages.sh
7575

76+
test-check-packages-benchmarks:
77+
PACKAGE_TEST_TYPE=benchmarks ./scripts/test-check-packages.sh
78+
7679
test-check-packages-parallel:
7780
PACKAGE_TEST_TYPE=parallel ./scripts/test-check-packages.sh
7881

README.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,18 @@ The command output shell completions information (for `bash`, `zsh`, `fish` and
8888

8989
Run `elastic-package completion` and follow the instruction for your shell.
9090

91+
### `elastic-package benchmark`
92+
93+
_Context: package_
94+
95+
Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:
96+
97+
#### Pipeline Benchmarks
98+
99+
These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.
100+
101+
For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](./docs/howto/pipeline_benchmarking.md).
102+
91103
### `elastic-package build`
92104

93105
_Context: package_

cmd/benchmark.go

Lines changed: 196 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,196 @@
1+
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
2+
// or more contributor license agreements. Licensed under the Elastic License;
3+
// you may not use this file except in compliance with the Elastic License.
4+
5+
package cmd
6+
7+
import (
8+
"fmt"
9+
"strings"
10+
11+
"github.com/pkg/errors"
12+
"github.com/spf13/cobra"
13+
14+
"github.com/elastic/elastic-package/internal/benchrunner"
15+
"github.com/elastic/elastic-package/internal/benchrunner/reporters/formats"
16+
"github.com/elastic/elastic-package/internal/benchrunner/reporters/outputs"
17+
_ "github.com/elastic/elastic-package/internal/benchrunner/runners" // register all benchmark runners
18+
"github.com/elastic/elastic-package/internal/cobraext"
19+
"github.com/elastic/elastic-package/internal/common"
20+
"github.com/elastic/elastic-package/internal/elasticsearch"
21+
"github.com/elastic/elastic-package/internal/packages"
22+
"github.com/elastic/elastic-package/internal/signal"
23+
"github.com/elastic/elastic-package/internal/testrunner"
24+
)
25+
26+
const benchLongDescription = `Use this command to run benchmarks on a package. Currently, the following types of benchmarks are available:
27+
28+
#### Pipeline Benchmarks
29+
30+
These benchmarks allow you to benchmark any Ingest Node Pipelines defined by your packages.
31+
32+
For details on how to configure pipeline benchmarks for a package, review the [HOWTO guide](./docs/howto/pipeline_benchmarking.md).`
33+
34+
func setupBenchmarkCommand() *cobraext.Command {
35+
var benchTypeCmdActions []cobraext.CommandAction
36+
37+
cmd := &cobra.Command{
38+
Use: "benchmark",
39+
Short: "Run benchmarks for the package",
40+
Long: benchLongDescription,
41+
RunE: func(cmd *cobra.Command, args []string) error {
42+
cmd.Println("Run benchmarks for the package")
43+
44+
if len(args) > 0 {
45+
return fmt.Errorf("unsupported benchmark type: %s", args[0])
46+
}
47+
48+
return cobraext.ComposeCommandActions(cmd, args, benchTypeCmdActions...)
49+
}}
50+
51+
cmd.PersistentFlags().BoolP(cobraext.FailOnMissingFlagName, "m", false, cobraext.FailOnMissingFlagDescription)
52+
cmd.PersistentFlags().StringP(cobraext.ReportFormatFlagName, "", string(formats.ReportFormatHuman), cobraext.ReportFormatFlagDescription)
53+
cmd.PersistentFlags().StringP(cobraext.ReportOutputFlagName, "", string(outputs.ReportOutputSTDOUT), cobraext.ReportOutputFlagDescription)
54+
cmd.PersistentFlags().BoolP(cobraext.BenchWithTestSamplesFlagName, "", true, cobraext.BenchWithTestSamplesFlagDescription)
55+
cmd.PersistentFlags().IntP(cobraext.BenchNumTopProcsFlagName, "", 10, cobraext.BenchNumTopProcsFlagDescription)
56+
cmd.PersistentFlags().StringSliceP(cobraext.DataStreamsFlagName, "", nil, cobraext.DataStreamsFlagDescription)
57+
58+
for benchType, runner := range benchrunner.BenchRunners() {
59+
action := benchTypeCommandActionFactory(runner)
60+
benchTypeCmdActions = append(benchTypeCmdActions, action)
61+
62+
benchTypeCmd := &cobra.Command{
63+
Use: string(benchType),
64+
Short: fmt.Sprintf("Run %s benchmarks", runner.String()),
65+
Long: fmt.Sprintf("Run %s benchmarks for the package.", runner.String()),
66+
RunE: action,
67+
}
68+
69+
benchTypeCmd.Flags().StringSliceP(cobraext.DataStreamsFlagName, "d", nil, cobraext.DataStreamsFlagDescription)
70+
71+
cmd.AddCommand(benchTypeCmd)
72+
}
73+
74+
return cobraext.NewCommand(cmd, cobraext.ContextPackage)
75+
}
76+
77+
func benchTypeCommandActionFactory(runner benchrunner.BenchRunner) cobraext.CommandAction {
78+
benchType := runner.Type()
79+
return func(cmd *cobra.Command, args []string) error {
80+
cmd.Printf("Run %s benchmarks for the package\n", benchType)
81+
82+
failOnMissing, err := cmd.Flags().GetBool(cobraext.FailOnMissingFlagName)
83+
if err != nil {
84+
return cobraext.FlagParsingError(err, cobraext.FailOnMissingFlagName)
85+
}
86+
87+
reportFormat, err := cmd.Flags().GetString(cobraext.ReportFormatFlagName)
88+
if err != nil {
89+
return cobraext.FlagParsingError(err, cobraext.ReportFormatFlagName)
90+
}
91+
92+
reportOutput, err := cmd.Flags().GetString(cobraext.ReportOutputFlagName)
93+
if err != nil {
94+
return cobraext.FlagParsingError(err, cobraext.ReportOutputFlagName)
95+
}
96+
97+
useTestSamples, err := cmd.Flags().GetBool(cobraext.BenchWithTestSamplesFlagName)
98+
if err != nil {
99+
return cobraext.FlagParsingError(err, cobraext.BenchWithTestSamplesFlagName)
100+
}
101+
102+
numTopProcs, err := cmd.Flags().GetInt(cobraext.BenchNumTopProcsFlagName)
103+
if err != nil {
104+
return cobraext.FlagParsingError(err, cobraext.BenchNumTopProcsFlagName)
105+
}
106+
107+
packageRootPath, found, err := packages.FindPackageRoot()
108+
if !found {
109+
return errors.New("package root not found")
110+
}
111+
if err != nil {
112+
return errors.Wrap(err, "locating package root failed")
113+
}
114+
115+
dataStreams, err := cmd.Flags().GetStringSlice(cobraext.DataStreamsFlagName)
116+
if err != nil {
117+
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
118+
}
119+
120+
if len(dataStreams) > 0 {
121+
common.TrimStringSlice(dataStreams)
122+
123+
if err := validateDataStreamsFlag(packageRootPath, dataStreams); err != nil {
124+
return cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName)
125+
}
126+
}
127+
128+
signal.Enable()
129+
130+
benchFolders, err := benchrunner.FindBenchmarkFolders(packageRootPath, dataStreams, benchType)
131+
if err != nil {
132+
return errors.Wrap(err, "unable to determine benchmark folder paths")
133+
}
134+
135+
if useTestSamples {
136+
testFolders, err := testrunner.FindTestFolders(packageRootPath, dataStreams, testrunner.TestType(benchType))
137+
if err != nil {
138+
return errors.Wrap(err, "unable to determine test folder paths")
139+
}
140+
benchFolders = append(benchFolders, testFolders...)
141+
}
142+
143+
if failOnMissing && len(benchFolders) == 0 {
144+
if len(dataStreams) > 0 {
145+
return fmt.Errorf("no %s benchmarks found for %s data stream(s)", benchType, strings.Join(dataStreams, ","))
146+
}
147+
return fmt.Errorf("no %s benchmarks found", benchType)
148+
}
149+
150+
esClient, err := elasticsearch.Client()
151+
if err != nil {
152+
return errors.Wrap(err, "can't create Elasticsearch client")
153+
}
154+
155+
var results []*benchrunner.Result
156+
for _, folder := range benchFolders {
157+
r, err := benchrunner.Run(benchType, benchrunner.BenchOptions{
158+
Folder: folder,
159+
PackageRootPath: packageRootPath,
160+
API: esClient.API,
161+
NumTopProcs: numTopProcs,
162+
})
163+
164+
if err != nil {
165+
return errors.Wrapf(err, "error running package %s benchmarks", benchType)
166+
}
167+
168+
results = append(results, r)
169+
}
170+
171+
format := benchrunner.BenchReportFormat(reportFormat)
172+
benchReports, err := benchrunner.FormatReport(format, results)
173+
if err != nil {
174+
return errors.Wrap(err, "error formatting benchmark report")
175+
}
176+
177+
m, err := packages.ReadPackageManifestFromPackageRoot(packageRootPath)
178+
if err != nil {
179+
return errors.Wrapf(err, "reading package manifest failed (path: %s)", packageRootPath)
180+
}
181+
182+
for idx, report := range benchReports {
183+
if err := benchrunner.WriteReport(fmt.Sprintf("%s-%d", m.Name, idx+1), benchrunner.BenchReportOutput(reportOutput), report, format); err != nil {
184+
return errors.Wrap(err, "error writing benchmark report")
185+
}
186+
}
187+
188+
// Check if there is any error or failure reported
189+
for _, r := range results {
190+
if r.ErrorMsg != "" {
191+
return fmt.Errorf("one or more benchmarks failed: %v", r.ErrorMsg)
192+
}
193+
}
194+
return nil
195+
}
196+
}

cmd/root.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
)
1616

1717
var commands = []*cobraext.Command{
18+
setupBenchmarkCommand(),
1819
setupBuildCommand(),
1920
setupChangelogCommand(),
2021
setupCheckCommand(),

0 commit comments

Comments
 (0)