diff --git a/.github/workflows/benchmark_regression_test.yml b/.github/workflows/benchmark_regression_test.yml new file mode 100644 index 000000000..e49ead97b --- /dev/null +++ b/.github/workflows/benchmark_regression_test.yml @@ -0,0 +1,104 @@ +name: Benchmark Regression Check + +on: + pull_request: + branches: [ main ] + paths: + - '**.go' + - 'go.*' + - 'cmd/go.*' + - 'Makefile' + - 'Dockerfile' + - 'integration/**' + - 'scripts/**' + - '.github/workflows/**' + +jobs: + test-twice: + runs-on: ubuntu-20.04 + + steps: + - uses: actions/setup-go@v4 + with: + go-version: '1.18.10' + - name: Checkout main + uses: actions/checkout@v3 + with: + ref: main + - run: make + - name: Run benchmark + run: make benchmarks-perf-test + - name: Upload latest benchmark result + uses: actions/upload-artifact@v3 + with: + name: benchmark-result-artifact-main + path: ${{github.workspace}}/benchmark/performanceTest/output/results.json + - name: remove output directory + run: sudo rm -rf ${{ github.workspace }}/benchmark/performanceTest/output + - name: Stash uncommitted changes + run: git stash push --keep-index --include-untracked -m "Stashing changes for tests" + - name: Check out PR + uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + - run: make + - name: Run benchmark + run: make benchmarks-perf-test + - name: Upload latest benchmark result + uses: actions/upload-artifact@v3 + with: + name: benchmark-result-artifact-pr + path: ${{github.workspace}}/benchmark/performanceTest/output/results.json + + download_and_perform_comparison: + runs-on: ubuntu-20.04 + needs: test-twice + steps: + - uses: actions/setup-go@v4 + with: + go-version: '1.18.10' + - name: Checkout main + uses: actions/checkout@v3 + with: + ref: main + - run: make + + - name: Create previous directory + run: mkdir -v ${{ github.workspace }}/previous + - name: Create current directory + run: mkdir -v ${{ github.workspace }}/current + - name: Download previous benchmark result + uses: actions/download-artifact@v3 + with: + name: benchmark-result-artifact-main + path: ${{github.workspace}}/previous + - name: Download current benchmark result + uses: actions/download-artifact@v3 + with: + name: benchmark-result-artifact-pr + path: ${{github.workspace}}/current + - name: Perform Comparison and log results + id: run-compare + run: | + sudo chmod +x ${{ github.workspace }}/scripts/check_regression.sh + if sudo ${{ github.workspace }}/scripts/check_regression.sh ${{ github.workspace }}/previous/results.json ${{github.workspace}}/current/results.json; then + echo "Comparison successful. All P90 values are within the acceptable range." + else + echo "Comparison failed. Current P90 values exceed 110% of the corresponding past values." + echo "regression-detected=true" >> $GITHUB_OUTPUT + fi + - name: Stop the workflow if regression is detected + if: steps.run-compare.outputs.regression-detected == 'true' + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const comment = ` + :warning: **Regression Detected** :warning: + + The benchmark comparison indicates that there has been a performance regression. + Please investigate and address the issue. + To Investigate check logs of the previous job above. + `; + + core.setFailed(comment); \ No newline at end of file diff --git a/Makefile b/Makefile index ec32cac7f..e10fb90cd 100644 --- a/Makefile +++ b/Makefile @@ -104,7 +104,7 @@ build-benchmarks: benchmarks-perf-test: @echo "$@" - @cd benchmark/performanceTest ; sudo rm -rf output ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/PerfTests . && sudo ../bin/PerfTests -show-commit + @cd benchmark/performanceTest ; sudo rm -rf output ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/PerfTests . && sudo ../bin/PerfTests -show-commit -count 2 benchmarks-stargz: @echo "$@" diff --git a/benchmark/framework/framework.go b/benchmark/framework/framework.go index 28c3a7dd2..c449d23d2 100644 --- a/benchmark/framework/framework.go +++ b/benchmark/framework/framework.go @@ -94,6 +94,7 @@ func (frame *BenchmarkFramework) Run(ctx context.Context) { } } + print("should We add timeout here for testing?") json, err := json.MarshalIndent(frame, "", " ") if err != nil { fmt.Printf("JSON Marshalling Error: %v\n", err) @@ -128,6 +129,8 @@ func (testStats *BenchmarkTestStats) calculateTestStat() { fmt.Printf("Error Calculating Mean: %v\n", err) testStats.Mean = -1 } + + print("testStats.BenchmarkTimes: ", testStats.BenchmarkTimes) testStats.Min, err = stats.Min(testStats.BenchmarkTimes) if err != nil { fmt.Printf("Error Calculating Min: %v\n", err) diff --git a/benchmark/performanceTest/main.go b/benchmark/performanceTest/main.go index 83afc1011..1b54d444f 100644 --- a/benchmark/performanceTest/main.go +++ b/benchmark/performanceTest/main.go @@ -47,7 +47,6 @@ func main() { flag.BoolVar(&showCom, "show-commit", false, "tag the commit hash to the benchmark results") flag.IntVar(&numberOfTests, "count", 5, "Describes the number of runs a benchmarker should run. Default: 5") flag.StringVar(&configCsv, "f", "default", "Path to a csv file describing image details in this order ['Name','Image ref', 'Ready line', 'manifest ref'].") - flag.Parse() if showCom {