Skip to content

Commit c926da5

Browse files
authored
Trigger performance test after successful CI run. (#818)
1 parent 76d949a commit c926da5

File tree

4 files changed

+71
-135
lines changed

4 files changed

+71
-135
lines changed

.github/workflows/CI.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,12 +1048,12 @@ jobs:
10481048
version:
10491049
- 'VERSION'
10501050
1051-
- name: Trigger soaking
1051+
- name: Trigger performance test
10521052
if: steps.filter.outputs.version == 'true'
10531053
uses: peter-evans/[email protected]
10541054
with:
10551055
token: "${{ secrets.REPO_WRITE_ACCESS_TOKEN }}"
1056-
event-type: bump-version
1056+
event-type: trigger-perf
10571057
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
10581058

10591059
clean:

.github/workflows/perf.yml

Lines changed: 31 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,9 @@
1313

1414
name: 'Performance test'
1515
on:
16-
schedule:
17-
- cron: '0 9 * * 0' # Sunday at 9 am UTC: pst 1 am.
18-
19-
# we can manually trigger this workflow by using dispatch for debuging
16+
# we can manually trigger this workflow by using dispatch for debugging
2017
repository_dispatch:
21-
types: [manual-perf]
18+
types: [trigger-perf]
2219
workflow_dispatch:
2320
inputs:
2421
sha:
@@ -27,7 +24,11 @@ on:
2724

2825
env:
2926
TF_VAR_aws_access_key_id: ${{ secrets.INTEG_TEST_AWS_KEY_ID }}
30-
TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }}
27+
TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }}
28+
GH_PAGES_BRANCH: gh-pages
29+
MAX_BENCHMARKS_TO_KEEP: 100
30+
COMMIT_USER: Github Actions
31+
COMMIT_EMAIL: [email protected]
3132

3233
jobs:
3334
get-testing-version:
@@ -148,28 +149,28 @@ jobs:
148149
with:
149150
path: artifacts
150151

151-
- name: Produce performance model table
152-
run: python e2etest/get-performance-model-table.py
153-
154-
- name: Create a new branch
155-
uses: peterjgrainger/[email protected]
156-
env:
157-
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
158-
with:
159-
branch: "perf-test-${{ github.run_id }}"
160-
161-
- name: Commit to a branch
162-
uses: stefanzweifel/git-auto-commit-action@v4
163-
with:
164-
commit_message: "Update benchmarking"
165-
branch: "perf-test-${{ github.run_id }}"
166-
file_pattern: docs/performance_model.md
167-
168-
- name: pull-request
169-
uses: repo-sync/pull-request@v2
152+
- name: Produce performance report
153+
run: python e2etest/get-performance-model-table.py -v ${{ needs.get-testing-version.outputs.testing_version }}
154+
155+
# Uses github-action-benchmark to update historic benchmark data
156+
# Temporarily using forked action in order to pass in commit SHA
157+
- name: Store benchmark result
158+
uses: jefchien/[email protected]
170159
with:
171-
source_branch: "perf-test-${{ github.run_id }}"
172-
destination_branch: "main"
173-
github_token: ${{ secrets.GITHUB_TOKEN }}
174-
pr_title: "Update Performance Model"
175-
pr_body: "Generated by performance test workflow [#${{ github.run_number }}](https://github.com/aws-observability/aws-otel-collector/actions/runs/${{ github.run_id }}) using https://github.com/aws-observability/aws-otel-collector/commit/${{ needs.get-testing-version.outputs.commit_id }}."
160+
tool: "customSmallerIsBetter"
161+
output-file-path: performance-data.json
162+
benchmark-data-dir-path: benchmark/trend
163+
max-items-in-chart: ${{ env.MAX_BENCHMARKS_TO_KEEP }}
164+
gh-pages-branch: ${{ env.GH_PAGES_BRANCH }}
165+
github-token: ${{ secrets.GITHUB_TOKEN }}
166+
commit-sha: ${{ github.event.inputs.sha }}
167+
auto-push: false
168+
169+
- name: Commit to gh-pages branch
170+
run: |
171+
git switch ${{ env.GH_PAGES_BRANCH }}
172+
rsync -avv performance-report.md benchmark/report.md
173+
rsync -avv performance-data.json benchmark/data/
174+
git add benchmark/data/* benchmark/report.md
175+
git -c user.name="${{ env.COMMIT_USER }}" -c user.email="${{ env.COMMIT_EMAIL }}" commit --amend --reset-author -m "Update benchmarking for ${{ github.event.inputs.sha }}"
176+
git push origin ${{ env.GH_PAGES_BRANCH }}:${{ env.GH_PAGES_BRANCH }}

docs/performance_model.md

Lines changed: 0 additions & 92 deletions
This file was deleted.

e2etest/get-performance-model-table.py

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import os
2-
import json
3-
import sys
4-
from pathlib import Path
1+
import argparse
52
import jinja2
3+
import json
4+
import os
65
import string
6+
import sys
77

88
# Schema: performance_models[data_mode][tps] = [model]
99
performance_models = {}
@@ -36,7 +36,7 @@ def add_performance_model(model):
3636
performance_models[data_mode][data_rate].append(model)
3737

3838

39-
def flatten_performance_models(models):
39+
def flatten_performance_models():
4040
"""
4141
Flattens performance model into list of grouped models where each group
4242
corresponds to a table in the report.
@@ -58,13 +58,36 @@ def flatten_performance_models(models):
5858
x["data_mode"], x["data_rate"]))
5959
return models_list
6060

61+
def get_benchmark_entry(model, data_mode, data_rate, value_field, unit, subgroup):
62+
benchmark_entry = {}
63+
benchmark_entry["name"] = model["testcase"]
64+
benchmark_entry["value"] = model[value_field]
65+
benchmark_entry["unit"] = unit
66+
benchmark_entry["extra"] = f"{data_mode} (TPS: {data_rate}) - {subgroup}"
67+
return benchmark_entry
68+
69+
def get_benchmark_data():
70+
"""
71+
Splits models by testcase and groups by data mode, data rate, and field type.
72+
"""
73+
benchmark_data = []
74+
75+
for data_mode, data_rates in performance_models.items():
76+
for data_rate, models in data_rates.items():
77+
for model in models:
78+
benchmark_data.append(get_benchmark_entry(model, data_mode, data_rate, "avgCpu", "%", "Average CPU Usage"))
79+
benchmark_data.append(get_benchmark_entry(model, data_mode, data_rate, "avgMem", "MB", "Average Memory Usage"))
80+
81+
return benchmark_data
6182

6283
if __name__ == "__main__":
63-
aoc_version = Path('VERSION').read_text()
84+
parser = argparse.ArgumentParser("Generate performance-report.md and performance-data.json from artifacts")
85+
parser.add_argument('-v', '--version', help="version to tag the report with", required=True)
86+
args = parser.parse_args()
87+
aoc_version = args.version
6488

65-
from jinja2 import Environment, PackageLoader, select_autoescape
6689
templateLoader = jinja2.FileSystemLoader(searchpath="e2etest/templates/")
67-
env = Environment(autoescape=select_autoescape(['html', 'xml', 'tpl', 'yaml', 'yml']), loader=templateLoader)
90+
env = jinja2.Environment(autoescape=jinja2.select_autoescape(['html', 'xml', 'tpl', 'yaml', 'yml']), loader=templateLoader)
6891

6992
# get performance models from artifacts
7093
artifacts_path = "artifacts/"
@@ -79,7 +102,7 @@ def flatten_performance_models(models):
79102
testing_ami = model["testingAmi"]
80103
add_performance_model(model)
81104

82-
models_list = flatten_performance_models(performance_models)
105+
models_list = flatten_performance_models()
83106

84107
# render performance models into markdown
85108
template = env.get_template('performance_model.tpl')
@@ -92,6 +115,10 @@ def flatten_performance_models(models):
92115
})
93116
print(rendered_result)
94117

95-
# write rendered result to docs/performance_model.md
96-
with open("docs/performance_model.md", "w") as f:
118+
# write rendered result to report.md
119+
with open("performance-report.md", "w+") as f:
97120
f.write(rendered_result)
121+
122+
# write benchmark-data.json
123+
with open("performance-data.json", "w+") as f:
124+
json.dump(get_benchmark_data(), f, indent=4)

0 commit comments

Comments
 (0)