diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index c10a163261c13..f90808f730787 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -27,16 +27,25 @@ runs: shell: bash env: TARGET_DEVICE: ${{ inputs.target_devices }} + RUNNER_NAME: ${{ runner.name }} run: | case "$RUNNER_TAG" in - '["Linux", "gen12"]' | '["Linux", "pvc"]') ;; + '["PVC_PERF"]' ) ;; *) echo "#" - echo "# WARNING: Only gen12/pvc on Linux is fully supported." + echo "# WARNING: Only specific tuned runners are fully supported." echo "# This workflow is not guaranteed to work with other runners." echo "#" ;; esac + # Ensure runner name has nothing injected + # TODO: in terms of security, is this overkill? + if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then + echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]." + exit 1 + fi + echo "RUNNER_NAME=$RUNNER_NAME" >> $GITHUB_ENV + # input.target_devices is not directly used, as this allows code injection case "$TARGET_DEVICE" in level_zero:*) ;; @@ -46,11 +55,11 @@ runs: echo "# This workflow is not guaranteed to work with other backends." echo "#" ;; esac + echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV + - name: Compute CPU core range to run benchmarks on shell: bash run: | - # Taken from ur-benchmark-reusable.yml: - # Compute the core range for the first NUMA node; second node is used by # UMF. Skip the first 4 cores as the kernel is likely to schedule more # work on these. @@ -67,65 +76,62 @@ runs: ZE_AFFINITY_MASK=0 echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV + - name: Checkout results repo + shell: bash + run: | + git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results - name: Run compute-benchmarks shell: bash run: | - cat << EOF - # - # NOTE TO DEVELOPERS: - # - - Check latter steps of the workflow: This job produces an artifact with: - - benchmark results from passing/failing tests - - log containing all failing (too slow) benchmarks - - log containing all erroring benchmarks - - While this step in the workflow provides debugging output describing this - information, it might be easier to inspect the logs from the artifact - instead. - - EOF - export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}" + # TODO generate summary + display helpful message here export CMPLR_ROOT=./toolchain echo "-----" sycl-ls echo "-----" pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt - taskset -c "$CORES" ./devops/scripts/benchmarks/main.py "$(realpath ./llvm_test_workdir)" --sycl "$(realpath ./toolchain)" --save baseline --preset Minimal + echo "-----" + mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME" + taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \ + "$(realpath ./llvm_test_workdir)" \ + --sycl "$(realpath ./toolchain)" \ + --save baseline \ + --output-html remote \ + --results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ + --output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ + --preset Minimal echo "-----" ls -# - name: Push compute-benchmarks results -# if: always() -# shell: bash -# run: | -# # TODO -- waiting on security clearance -# # Load configuration values -# $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) -# -# cd "./llvm-ci-perf-results" -# git config user.name "SYCL Benchmarking Bot" -# git config user.email "sys_sycl_benchmarks@intel.com" -# git pull -# git add . -# # Make sure changes have been made -# if git diff --quiet && git diff --cached --quiet; then -# echo "No new results added, skipping push." -# else -# git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" -# git push "https://$GITHUB_TOKEN@github.com/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH" -# fi - - name: Find benchmark result artifact here + - name: Push compute-benchmarks results if: always() shell: bash run: | - cat << EOF - # - # Artifact link for benchmark results here: - # - EOF - - name: Archive compute-benchmark results - if: always() - uses: actions/upload-artifact@v4 - with: - name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }}) - path: ./artifact + # TODO redo configuration + # $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) + + cd "./llvm-ci-perf-results" + git config user.name "SYCL Benchmarking Bot" + git config user.email "sys_sycl_benchmarks@intel.com" + git pull + git add . + # Make sure changes have been made + if git diff --quiet && git diff --cached --quiet; then + echo "No new results added, skipping push." + else + git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" + git push "https://$GITHUB_TOKEN@github.com/intel/llvm-ci-perf-results.git" unify-ci + fi +# - name: Find benchmark result artifact here +# if: always() +# shell: bash +# run: | +# cat << EOF +# # +# # Artifact link for benchmark results here: +# # +# EOF +# - name: Archive compute-benchmark results +# if: always() +# uses: actions/upload-artifact@v4 +# with: +# name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }}) +# path: ./artifact diff --git a/devops/scripts/benchmarks/main.py b/devops/scripts/benchmarks/main.py index 91f84917f8698..1a15e5407daf3 100755 --- a/devops/scripts/benchmarks/main.py +++ b/devops/scripts/benchmarks/main.py @@ -265,11 +265,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter): this_name, chart_data, failures, options.output_markdown ) - with open("benchmark_results.md", "w") as file: + md_path = options.output_directory + if options.output_directory is None: + md_path = os.getcwd() + + with open(os.path.join(md_path, "benchmark_results.md"), "w") as file: file.write(markdown_content) print( - f"Markdown with benchmark results has been written to {os.getcwd()}/benchmark_results.md" + f"Markdown with benchmark results has been written to {md_path}/benchmark_results.md" ) saved_name = save_name if save_name is not None else this_name @@ -283,7 +287,10 @@ def main(directory, additional_env_vars, save_name, compare_names, filter): compare_names.append(saved_name) if options.output_html: - generate_html(history.runs, compare_names) + html_path = options.output_directory + if options.output_directory is None: + html_path = os.path.join(os.path.dirname(__file__), "html") + generate_html(history.runs, compare_names, html_path) def validate_and_parse_env_args(env_args): @@ -398,6 +405,12 @@ def validate_and_parse_env_args(env_args): const=options.output_html, choices=["local", "remote"], ) + parser.add_argument( + "--output-dir", + type=str, + help="Location for output files, if --output-html or --output_markdown was specified.", + default=None + ) parser.add_argument( "--dry-run", help="Do not run any actual benchmarks", @@ -486,6 +499,10 @@ def validate_and_parse_env_args(env_args): if args.compute_runtime is not None: options.build_compute_runtime = True options.compute_runtime_tag = args.compute_runtime + if args.output_dir is not None: + if not os.path.isdir(args.output_dir): + parser.error("Specified --output-dir is not a valid path") + options.output_directory = os.path.abspath(args.output_dir) benchmark_filter = re.compile(args.filter) if args.filter else None diff --git a/devops/scripts/benchmarks/options.py b/devops/scripts/benchmarks/options.py index 7600942acd1e5..332d1615bc78d 100644 --- a/devops/scripts/benchmarks/options.py +++ b/devops/scripts/benchmarks/options.py @@ -31,6 +31,7 @@ class Options: compare_max: int = 10 # average/median over how many results output_markdown: MarkdownSize = MarkdownSize.SHORT output_html: str = "local" + output_directory: str = None dry_run: bool = False stddev_threshold: float = 0.02 iterations_stddev: int = 5 diff --git a/devops/scripts/benchmarks/output_html.py b/devops/scripts/benchmarks/output_html.py index 53dd4b1e8f968..49b4d1d84a214 100644 --- a/devops/scripts/benchmarks/output_html.py +++ b/devops/scripts/benchmarks/output_html.py @@ -8,9 +8,7 @@ from options import options -def generate_html(benchmark_runs: list, compare_names: list[str]): - # create path to data.js in html folder - html_path = os.path.join(os.path.dirname(__file__), "html") +def generate_html(benchmark_runs: list, compare_names: list[str], html_path: str): benchmark_runs.sort(key=lambda run: run.date, reverse=True) if options.output_html == "local":