7
7
description : |
8
8
Tritonbench Scribe Graph Access Token
9
9
inputs :
10
- benchmark_name :
10
+ test_type :
11
11
required : True
12
12
type : string
13
- description : |
14
- Benchmark name
15
- conda_env :
13
+ descript : |
14
+ Type of the test (single or abtest)
15
+ benchmark_name :
16
16
required : True
17
17
type : string
18
18
description : |
19
- Conda environment to activate when testing Triton
19
+ Benchmark name
20
20
side_a_triton :
21
- required : False
22
21
type : string
22
+ required : False
23
+ default : " triton-lang/triton"
23
24
description : |
24
- Triton repo name
25
+ Triton repository to test on side A, e.g., "triton-lang/triton"
25
26
side_a_commit :
27
+ type : string
28
+ required : False
29
+ description : |
30
+ Triton commit or tag to test on side A, e.g., "main"
31
+ side_b_triton :
32
+ type : string
26
33
required : False
34
+ default : " triton-lang/triton"
35
+ description : |
36
+ Triton repository to test on side B, e.g., "triton-lang/triton"
37
+ side_b_commit :
27
38
type : string
39
+ required : False
28
40
description : |
29
- Triton repo commit
41
+ Triton commit or tag to test on side B, e.g., "main"
30
42
31
43
jobs :
32
44
linux-benchmark-h100 :
39
51
contents : read
40
52
env :
41
53
SETUP_SCRIPT : " /workspace/setup_instance.sh"
42
- CONDA_ENV : ${{ inputs.conda_env }}
43
54
RUNNER_TYPE : " gcp-h100-runner"
44
- JOB_NAME : tritonbench-h100-${{ inputs.conda_env }}-${{ inputs.benchmark_name }}
55
+ JOB_NAME : tritonbench-h100-benchmark-${{ inputs.test_type }}-${{ inputs.benchmark_name }}
56
+ TRITONBENCH_SIDE_A_ENV : " triton-main"
45
57
TRITONBENCH_SCRIBE_GRAPHQL_ACCESS_TOKEN : ${{ secrets.TRITONBENCH_SCRIBE_GRAPHQL_ACCESS_TOKEN }}
46
58
AWS_ACCESS_KEY_ID : ${{ secrets.AWS_ACCESS_KEY_ID }}
47
59
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -62,18 +74,29 @@ jobs:
62
74
# The max duration enforced by the server side
63
75
role-duration-seconds : 18000
64
76
aws-region : us-east-1
65
- - name : Compile Triton (On Demand)
77
+ - name : Compile Triton on Demand (Side A )
66
78
if : ${{ inputs.side_a_triton && inputs.side_a_commit }}
67
79
run : |
68
- bash ./.ci/triton/compile.sh --repo ${{ inputs.side_a_triton }} --commit ${{ inputs.side_a_commit }} --side a
69
- - name : Benchmarking
80
+ bash ./.ci/triton/install.sh --repo ${{ inputs.side_a_triton }} --commit ${{ inputs.side_a_commit }} --side a
81
+ export 'TRITONBENCH_SIDE_A_ENV=triton-side-a' >> $GITHUB_ENV
82
+ - name : Benchmark Triton (Side A)
70
83
run : |
71
- if [ -n "${{ inputs.side_a_triton }}" ] && [ -n "${{ inputs.side_a_commit }}" ]; then
72
- bash .ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env triton-side-a
73
- else
74
- bash .ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }}
75
- fi
76
- cp -r ".benchmarks/${{ inputs.benchmark_name }}" benchmark-output
84
+ bash ./.ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env ${TRITONBENCH_SIDE_A_ENV}
85
+ mkdir -p benchmark-output
86
+ cp -r .benchmarks/${{ inputs.benchmark_name }} benchmark-output/${TRITONBENCH_SIDE_A_ENV}
87
+ rm -rf .benchmarks || true
88
+ - name : Compile Triton on Demand (Side B)
89
+ if : ${{ inputs.test_type == 'abtest' && inputs.side_b_triton && inputs.side_b_commit }}
90
+ run : |
91
+ bash ./.ci/triton/install.sh --repo ${{ inputs.side_b_triton }} --commit ${{ inputs.side_b_commit }} --side b
92
+ export 'TRITONBENCH_SIDE_A_ENV=triton-side-a' >> $GITHUB_ENV
93
+ - name : Benchmark Triton (Side B)
94
+ if : ${{ inputs.test_type == 'abtest' && inputs.side_b_triton && inputs.side_b_commit }}
95
+ run : |
96
+ bash ./.ci/tritonbench/run-benchmark.sh ${{ inputs.benchmark_name }} --conda-env --conda-env ${TRITONBENCH_SIDE_B_ENV}
97
+ mkdir -p benchmark-output
98
+ cp -r ".benchmarks/${{ inputs.benchmark_name }}" benchmark-output/${TRITONBENCH_SIDE_B_ENV}
99
+ rm -rf .benchmarks || true
77
100
- name : Upload result to GH Actions Artifact
78
101
uses : actions/upload-artifact@v4
79
102
with :
@@ -82,21 +105,34 @@ jobs:
82
105
- name : Upload result to Scribe
83
106
run : |
84
107
. "${SETUP_SCRIPT}"
85
- latest_result_json=$(find ./benchmark-output -name "result.json" | sort -r | head -n 1)
86
- python ./.ci/upload/scribe.py --json ${latest_result_json}
108
+ if [ -n "${TRITONBENCH_SIDE_A_ENV}" ]; then
109
+ triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_A_ENV} -name "result.json" | sort -r | head -n 1)
110
+ python ./.ci/upload/scribe.py --json ${triton_side_a_json}
111
+ fi
112
+ if [ -n "${TRITONBENCH_SIDE_B_ENV}" ]; then
113
+ triton_side_b_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_B_ENV} -name "result.json" | sort -r | head -n 1)
114
+ python ./.ci/upload/scribe.py --json ${triton_side_b_json}
115
+ fi
87
116
- name : Rewrite Tritonbench json to ClickHouse style
88
117
run : |
89
118
. "${SETUP_SCRIPT}"
90
- latest_result_json=$(find ./benchmark-output -name "result.json" | sort -r | head -n 1)
91
- python ./.ci/test_infra/oss_ci_benchmark_v3.py --json ${latest_result_json} \
92
- --output benchmark-output/results/result.json
119
+ if [ -n "${TRITONBENCH_SIDE_A_ENV}"" ]; then
120
+ triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_A_ENV} -name "result.json" | sort -r | head -n 1)
121
+ python ./.ci/test_infra/oss_ci_benchmark_v3.py --json "${triton_side_a_json}" \
122
+ --output "benchmark-output/clickouse-results/result-${TRITONBENCH_SIDE_A_ENV}.json"
123
+ fi
124
+ if [ -n "${TRITONBENCH_SIDE_B_ENV}"" ]; then
125
+ triton_side_a_json=$(find ./benchmark-output/${TRITONBENCH_SIDE_B_ENV} -name "result.json" | sort -r | head -n 1)
126
+ python ./.ci/test_infra/oss_ci_benchmark_v3.py --json "${triton_side_a_json}" \
127
+ --output "benchmark-output/clickouse-results/result-${TRITONBENCH_SIDE_B_ENV}.json"
128
+ fi
93
129
- name : Setup uploader dependencies
94
130
run : |
95
131
sudo apt-get install -y python3-pip
96
132
- name : Upload result to ClickHouse
97
133
uses : pytorch/test-infra/.github/actions/upload-benchmark-results@main
98
134
with :
99
- benchmark-results-dir : benchmark-output/results
135
+ benchmark-results-dir : benchmark-output/clickouse- results
100
136
dry-run : false
101
137
schema-version : v3
102
138
github-token : ${{ secrets.GITHUB_TOKEN }}
0 commit comments