Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 0 additions & 125 deletions .github/workflows/_linux-benchmark-abtest-h100.yml

This file was deleted.

84 changes: 60 additions & 24 deletions .github/workflows/_linux-benchmark-h100.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,38 @@ on:
description: |
Tritonbench Scribe Graph Access Token
inputs:
benchmark_name:
test_type:
required: True
type: string
description: |
Benchmark name
conda_env:
Type of the test (single or abtest)
benchmark_name:
required: True
type: string
description: |
Conda environment to activate when testing Triton
Benchmark name
side_a_triton:
required: False
type: string
required: False
default: "triton-lang/triton"
description: |
Triton repo name
Triton repository to test on side A, e.g., "triton-lang/triton"
side_a_commit:
type: string
required: False
description: |
Triton commit or tag to test on side A, e.g., "main"
side_b_triton:
type: string
required: False
default: "triton-lang/triton"
description: |
Triton repo commit
Triton repository to test on side B, e.g., "triton-lang/triton"
side_b_commit:
type: string
required: False
description: |
Triton commit or tag to test on side B, e.g., "main"

jobs:
linux-benchmark-h100:
Expand All @@ -39,9 +51,9 @@ jobs:
contents: read
env:
SETUP_SCRIPT: "/workspace/setup_instance.sh"
CONDA_ENV: ${{ inputs.conda_env }}
RUNNER_TYPE: "gcp-h100-runner"
JOB_NAME: tritonbench-h100-${{ inputs.conda_env }}-${{ inputs.benchmark_name }}
JOB_NAME: tritonbench-h100-benchmark-${{ inputs.test_type }}-${{ inputs.benchmark_name }}
TRITONBENCH_SIDE_A_ENV: "triton-main"
TRITONBENCH_SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.TRITONBENCH_SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
Expand All @@ -62,18 +74,29 @@ jobs:
# The max duration enforced by the server side
role-duration-seconds: 18000
aws-region: us-east-1
- name: Compile Triton (On Demand)
- name: Compile Triton on Demand (Side A)
if: ${{ inputs.side_a_triton && inputs.side_a_commit }}
run: |
bash ./.ci/triton/compile.sh --repo ${{ inputs.side_a_triton }} --commit ${{ inputs.side_a_commit }} --side a
- name: Benchmarking
bash ./.ci/triton/install.sh --repo ${{ inputs.side_a_triton }} --commit ${{ inputs.side_a_commit }} --side a
export 'TRITONBENCH_SIDE_A_ENV=triton-side-a' >> $GITHUB_ENV
- name: Benchmark Triton (Side A)
run: |
if [ -n "${{ inputs.side_a_triton }}" ] && [ -n "${{ inputs.side_a_commit }}" ]; then
bash .ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env triton-side-a
else
bash .ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }}
fi
cp -r ".benchmarks/${{ inputs.benchmark_name }}" benchmark-output
bash ./.ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env ${TRITONBENCH_SIDE_A_ENV}
mkdir -p benchmark-output
cp -r .benchmarks/${{ inputs.benchmark_name }} benchmark-output/${TRITONBENCH_SIDE_A_ENV}
rm -rf .benchmarks || true
- name: Compile Triton on Demand (Side B)
if: ${{ inputs.test_type == 'abtest' && inputs.side_b_triton && inputs.side_b_commit }}
run: |
bash ./.ci/triton/install.sh --repo ${{ inputs.side_b_triton }} --commit ${{ inputs.side_b_commit }} --side b
export 'TRITONBENCH_SIDE_A_ENV=triton-side-a' >> $GITHUB_ENV
- name: Benchmark Triton (Side B)
if: ${{ inputs.test_type == 'abtest' && inputs.side_b_triton && inputs.side_b_commit }}
run: |
bash ./.ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env --conda-env ${TRITONBENCH_SIDE_B_ENV}
mkdir -p benchmark-output
cp -r ".benchmarks/${{ inputs.benchmark_name }}" benchmark-output/${TRITONBENCH_SIDE_B_ENV}
rm -rf .benchmarks || true
- name: Upload result to GH Actions Artifact
uses: actions/upload-artifact@v4
with:
Expand All @@ -82,21 +105,34 @@ jobs:
- name: Upload result to Scribe
run: |
. "${SETUP_SCRIPT}"
latest_result_json=$(find ./benchmark-output -name "result.json" | sort -r | head -n 1)
python ./.ci/upload/scribe.py --json ${latest_result_json}
if [[ -n "${TRITONBENCH_SIDE_A_ENV}" ]]; then
triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_A_ENV} -name "result.json" | sort -r | head -n 1)
python ./.ci/upload/scribe.py --json ${triton_side_a_json}
fi
if [[ -n "${TRITONBENCH_SIDE_B_ENV}" ]]; then
triton_side_b_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_B_ENV} -name "result.json" | sort -r | head -n 1)
python ./.ci/upload/scribe.py --json ${triton_side_b_json}
fi
- name: Rewrite Tritonbench json to ClickHouse style
run: |
. "${SETUP_SCRIPT}"
latest_result_json=$(find ./benchmark-output -name "result.json" | sort -r | head -n 1)
python ./.ci/test_infra/oss_ci_benchmark_v3.py --json ${latest_result_json} \
--output benchmark-output/results/result.json
if [[ -n "${TRITONBENCH_SIDE_A_ENV}"" ]]; then
triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_A_ENV} -name "result.json" | sort -r | head -n 1)
python ./.ci/test_infra/oss_ci_benchmark_v3.py --json "${triton_side_a_json}" \
--output "benchmark-output/clickhouse-results/result-${TRITONBENCH_SIDE_A_ENV}.json"
fi
if [[ -n "${TRITONBENCH_SIDE_B_ENV}"" ]]; then
triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_B_ENV} -name "result.json" | sort -r | head -n 1)
python ./.ci/test_infra/oss_ci_benchmark_v3.py --json "${triton_side_a_json}" \
--output "benchmark-output/clickhouse-results/result-${TRITONBENCH_SIDE_B_ENV}.json"
fi
- name: Setup uploader dependencies
run: |
sudo apt-get install -y python3-pip
- name: Upload result to ClickHouse
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
with:
benchmark-results-dir: benchmark-output/results
benchmark-results-dir: benchmark-output/clickhouse-results
dry-run: false
schema-version: v3
github-token: ${{ secrets.GITHUB_TOKEN }}
Expand Down
Loading