-
Notifications
You must be signed in to change notification settings - Fork 261
Trigger performance test after successful CI run. #818
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
e0db2d0
5fd2662
17ea1e8
1042da8
3bf3652
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1033,12 +1033,12 @@ jobs: | |
| version: | ||
| - 'VERSION' | ||
|
|
||
| - name: Trigger soaking | ||
| - name: Trigger performance test | ||
| if: steps.filter.outputs.version == 'true' | ||
| uses: peter-evans/[email protected] | ||
| with: | ||
| token: "${{ secrets.REPO_WRITE_ACCESS_TOKEN }}" | ||
| event-type: bump-version | ||
| event-type: trigger-perf | ||
| client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' | ||
|
|
||
| clean: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,12 +13,9 @@ | |
|
|
||
| name: 'Performance test' | ||
| on: | ||
| schedule: | ||
| - cron: '0 9 * * 0' # Sunday at 9 am UTC: pst 1 am. | ||
|
|
||
| # we can manually trigger this workflow by using dispatch for debuging | ||
| # we can manually trigger this workflow by using dispatch for debugging | ||
| repository_dispatch: | ||
| types: [manual-perf] | ||
| types: [trigger-perf] | ||
| workflow_dispatch: | ||
| inputs: | ||
| sha: | ||
|
|
@@ -27,7 +24,11 @@ on: | |
|
|
||
| env: | ||
| TF_VAR_aws_access_key_id: ${{ secrets.INTEG_TEST_AWS_KEY_ID }} | ||
| TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }} | ||
| TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }} | ||
| GH_PAGES_BRANCH: gh-pages | ||
| MAX_BENCHMARKS_TO_KEEP: 100 | ||
| COMMIT_USER: Github Actions | ||
| COMMIT_EMAIL: [email protected] | ||
|
|
||
| jobs: | ||
| get-testing-version: | ||
|
|
@@ -147,28 +148,26 @@ jobs: | |
| with: | ||
| path: artifacts | ||
|
|
||
| - name: Produce performance model table | ||
| run: python e2etest/get-performance-model-table.py | ||
|
|
||
| - name: Create a new branch | ||
| uses: peterjgrainger/[email protected] | ||
| env: | ||
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
| with: | ||
| branch: "perf-test-${{ github.run_id }}" | ||
|
|
||
| - name: Commit to a branch | ||
| uses: stefanzweifel/git-auto-commit-action@v4 | ||
| with: | ||
| commit_message: "Update benchmarking" | ||
| branch: "perf-test-${{ github.run_id }}" | ||
| file_pattern: docs/performance_model.md | ||
|
|
||
| - name: pull-request | ||
| uses: repo-sync/pull-request@v2 | ||
| - name: Produce performance report | ||
| run: python e2etest/get-performance-model-table.py ${{ needs.get-testing-version.outputs.testing_version }} | ||
jefchien marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| - name: Store benchmark result | ||
| uses: jefchien/[email protected] | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Anyway for us to know the parameters that are required in your private GitHub action thought?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's a fork of the existing https://github.com/benchmark-action/github-action-benchmark. The change was needed to pass the workflow dispatch input SHA into the action so the benchmarks are tagged with the correct commit. I have a PR benchmark-action/github-action-benchmark#93 open for the change, but until that gets merged or another method of inserting the commit is found, we'll need to use the fork.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Got cha. Make more sense to me now. Currently reviewing yours other PR as well.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add a comment |
||
| with: | ||
| source_branch: "perf-test-${{ github.run_id }}" | ||
| destination_branch: "main" | ||
| github_token: ${{ secrets.GITHUB_TOKEN }} | ||
| pr_title: "Update Performance Model" | ||
| pr_body: "Generated by performance test workflow [#${{ github.run_number }}](https://github.com/aws-observability/aws-otel-collector/actions/runs/${{ github.run_id }}) using https://github.com/aws-observability/aws-otel-collector/commit/${{ needs.get-testing-version.outputs.commit_id }}." | ||
| tool: "customSmallerIsBetter" | ||
| output-file-path: performance-data.json | ||
| benchmark-data-dir-path: benchmark/trend | ||
| max-items-in-chart: ${{ env.MAX_BENCHMARKS_TO_KEEP }} | ||
| gh-pages-branch: ${{ env.GH_PAGES_BRANCH }} | ||
| github-token: ${{ secrets.GITHUB_TOKEN }} | ||
| commit-sha: ${{ github.event.inputs.sha }} | ||
| auto-push: false | ||
jefchien marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| - name: Commit to gh-pages branch | ||
| run: | | ||
| git switch ${{ env.GH_PAGES_BRANCH }} | ||
| rsync -avv performance-report.md benchmark/report.md | ||
| rsync -avv performance-data.json benchmark/data/ | ||
| git add benchmark/data/* benchmark/report.md | ||
| git -c user.name="${{ env.COMMIT_USER }}" -c user.email="${{ env.COMMIT_EMAIL }}" commit --amend --reset-author -m "Update benchmarking for ${{ github.event.inputs.sha }}" | ||
| git push origin ${{ env.GH_PAGES_BRANCH }}:${{ env.GH_PAGES_BRANCH }} | ||
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,7 +36,7 @@ def add_performance_model(model): | |
| performance_models[data_mode][data_rate].append(model) | ||
|
|
||
|
|
||
| def flatten_performance_models(models): | ||
| def flatten_performance_models(): | ||
| """ | ||
| Flattens performance model into list of grouped models where each group | ||
| corresponds to a table in the report. | ||
|
|
@@ -58,9 +58,38 @@ def flatten_performance_models(models): | |
| x["data_mode"], x["data_rate"])) | ||
| return models_list | ||
|
|
||
| def get_benchmark_data(): | ||
| """ | ||
| Splits models by test | ||
| """ | ||
| benchmark_data = [] | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IMO, instead of using only [], using list(dict) would have more clarification in defining what is added into the list. |
||
|
|
||
| for data_mode, data_rates in performance_models.items(): | ||
| for data_rate, models in data_rates.items(): | ||
| for model in models: | ||
| benchmark_cpu = {} | ||
| benchmark_cpu["name"] = model["testcase"] | ||
| benchmark_cpu["value"] = model["avgCpu"] | ||
| benchmark_cpu["unit"] = "%" | ||
| benchmark_cpu["extra"] = f"{data_mode} (TPS: {data_rate}) - Average CPU Usage" | ||
|
|
||
| benchmark_mem = {} | ||
| benchmark_mem["name"] = model["testcase"] | ||
| benchmark_mem["value"] = model["avgMem"] | ||
| benchmark_mem["unit"] = "MB" | ||
| benchmark_mem["extra"] = f"{data_mode} (TPS: {data_rate}) - Average Memory Usage" | ||
|
|
||
| benchmark_data.append(benchmark_cpu) | ||
| benchmark_data.append(benchmark_mem) | ||
|
|
||
| return benchmark_data | ||
|
|
||
| if __name__ == "__main__": | ||
| aoc_version = Path('VERSION').read_text() | ||
| try: | ||
| aoc_version = sys.argv[1].rstrip() | ||
| except IndexError: | ||
| print(f"Usage: {sys.argv[0]} <version>") | ||
| sys.exit(1) | ||
|
|
||
| from jinja2 import Environment, PackageLoader, select_autoescape | ||
| templateLoader = jinja2.FileSystemLoader(searchpath="e2etest/templates/") | ||
|
|
@@ -79,7 +108,7 @@ def flatten_performance_models(models): | |
| testing_ami = model["testingAmi"] | ||
| add_performance_model(model) | ||
|
|
||
| models_list = flatten_performance_models(performance_models) | ||
| models_list = flatten_performance_models() | ||
|
|
||
| # render performance models into markdown | ||
| template = env.get_template('performance_model.tpl') | ||
|
|
@@ -92,6 +121,10 @@ def flatten_performance_models(models): | |
| }) | ||
| print(rendered_result) | ||
|
|
||
| # write rendered result to docs/performance_model.md | ||
| with open("docs/performance_model.md", "w") as f: | ||
| # write rendered result to report.md | ||
| with open("performance-report.md", "w+") as f: | ||
| f.write(rendered_result) | ||
|
|
||
| # write benchmark-data.json | ||
| with open("performance-data.json", "w+") as f: | ||
| f.write(json.dumps(get_benchmark_data())) | ||
Uh oh!
There was an error while loading. Please reload this page.