1- # Longitudinal Benchmarking
1+ # Longitudinal Benchmarks
22#
33# This workflow will run the benchmarks defined in the environment variable BENCHMARKS.
44# It will collect and aggreate the benchmark output, format it and feed it to github-action-benchmark.
55#
66# The benchmark charts are live at https://input-output-hk.github.io/plutus/dev/bench
77# The benchmark data is available at https://input-output-hk.github.io/plutus/dev/bench/data.js
88
9- name : Longitudinal Benchmarking
9+ name : Longitudinal Benchmarks
1010
1111on :
1212 push :
@@ -20,9 +20,9 @@ permissions:
2020 contents : write
2121
2222jobs :
23- new-benchmark :
23+ longitudinal-benchmarks :
2424 name : Performance regression check
25- runs-on : ubuntu-latest
25+ runs-on : [self-hosted, plutus-benchmark]
2626 steps :
27272828
@@ -33,33 +33,13 @@ jobs:
3333 for bench in $BENCHMARKS; do
3434 2>&1 cabal run "$bench" | tee "$bench-output.txt"
3535 done
36+ python ./scripts/format-benchmark-output.py
3637
37- read -r -d '' PYTHON_SCRIPT <<- END_SCRIPT
38- import json
39- result = []
40- for benchmark in "$BENCHMARKS".split():
41- with open(f"{benchmark}-output.txt", "r") as file:
42- name = ""
43- for line in file.readlines():
44- if line.startswith("benchmarking"):
45- name = line.split()[1]
46- elif line.startswith("mean"):
47- parts = line.split()
48- mean = parts[1]
49- unit = parts[2]
50- result.append({
51- "name": f"{benchmark}-{name}",
52- "unit": unit,
53- "value": float(mean)
54- })
55- with open("output.json", "w") as file:
56- json.dump(result, file)
57- END_SCRIPT
5838
5939 - name : Store benchmark result
6040 uses :
benchmark-action/[email protected] 6141 with :
62- name : My Project Go Benchmark
42+ name : Plutus Benchmarks
6343 tool : ' customSmallerIsBetter'
6444 output-file-path : output.json
6545 github-token : ${{ secrets.GITHUB_TOKEN }}
0 commit comments