@@ -224,6 +224,101 @@ jobs:
224
224
extra-cache-key : ${{ env.PYTORCH_VERSION }}
225
225
workspace : /c/gh${{ github.run_id }}
226
226
227
+ - name : Clone pytorch benchmark
228
+ if : inputs.suite == 'all' || inputs.suite == 'torchbench'
229
+ uses : actions/checkout@v4
230
+ with :
231
+ repository : ${{ env.BENCHMARK_REPO }}
232
+ ref : ${{ env.BENCHMARK_COMMIT_ID }}
233
+ submodules : recursive
234
+ path : benchmark
235
+
236
+ - name : Install pytorch benchmark
237
+ if : inputs.suite == 'all' || inputs.suite == 'torchbench'
238
+ run : |
239
+ .venv\Scripts\activate.ps1
240
+ Invoke-BatchFile "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
241
+ bash -c '
242
+ cd benchmark
243
+ if [[ "${{ inputs.only_one_model }}" ]]; then
244
+ python install.py "${{ inputs.only_one_model }}"
245
+ else
246
+ # install all models
247
+ python install.py
248
+ fi
249
+ pip install -e .
250
+ '
251
+
252
+ - name : Run e2e tests
253
+ env :
254
+ HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
255
+ run : |
256
+ .venv\Scripts\activate.ps1
257
+ Invoke-BatchFile "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
258
+
259
+ bash -c '
260
+ cd /c/pytorch
261
+
262
+ export WORKSPACE="/c/gh${{ github.run_id }}"
263
+
264
+ if [[ "${{ inputs.TORCH_COMPILE_DEBUG }}" = "1" ]] ; then
265
+ export TORCH_COMPILE_DEBUG="1"
266
+ # torch will save debug logs to $TORCH_COMPILE_DEBUG_DIR/torch_compile_debug
267
+ export TORCH_COMPILE_DEBUG_DIR=$WORKSPACE
268
+ fi
269
+
270
+ if [[ "${{ inputs.suite }}" = "all" ]]; then
271
+ suites=("huggingface" "timm_models" "torchbench")
272
+ else
273
+ suites=("${{ inputs.suite }}")
274
+ fi
275
+
276
+ if [[ "${{ inputs.mode }}" = "all" ]]; then
277
+ modes=("inference" "inference-with-freezing" "training")
278
+ else
279
+ modes=("${{ inputs.mode }}")
280
+ fi
281
+
282
+ if [[ "${{ inputs.dtype }}" = "all" ]]; then
283
+ dtypes=("amp_bf16" "amp_fp16" "bfloat16" "float16" "float32")
284
+ else
285
+ dtypes=("${{ inputs.dtype }}")
286
+ fi
287
+
288
+ # if "only_one_model" is set, then test this model
289
+ # if "models" == "subset", then test the models from .github/models/{accuracy,performance}/{suite}.txt
290
+ # otherwise test all models.
291
+
292
+ for suite in ${suites[@]}; do
293
+ for mode in ${modes[@]}; do
294
+ for dtype in ${dtypes[@]}; do
295
+
296
+ if [[ "${{ inputs.only_one_model }}" ]]; then
297
+ bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0 ${{ inputs.only_one_model }}
298
+ elif [[ "${{ inputs.models }}" == "subset" ]]; then
299
+ models_subset_file="$WORKSPACE/.github/models/${{ inputs.test_mode }}/$suite.txt"
300
+ while read model; do
301
+ bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0 $model
302
+ done < $models_subset_file
303
+ if [[ "${{ inputs.check_all_subset_models }}" == true ]]; then
304
+ python $WORKSPACE/scripts/check_inductor_report.py --models-file="$models_subset_file" \
305
+ --suite=$suite \
306
+ --dtype=$dtype \
307
+ --mode=$mode \
308
+ --test_mode=${{ inputs.test_mode }} \
309
+ --device=xpu \
310
+ --inductor-log-dir="$WORKSPACE/inductor_log"
311
+ fi
312
+ else
313
+ bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0
314
+ fi
315
+
316
+ done
317
+ done
318
+ done
319
+ '
320
+
321
+
227
322
- name : Identify GPU
228
323
run : |
229
324
# Initializing oneAPI to enable sycl-ls, which is used in capture-hw-details.sh on Windows.
0 commit comments