Skip to content

Commit a29f867

Browse files
authored
Workflow to run E2E tests on Windows (#4518)
The same parameters as E2E on Linux, but without parallel jobs (yet). Fixes #3591. --------- Signed-off-by: Pavel Chekin <[email protected]>
1 parent 93ac1c1 commit a29f867

File tree

2 files changed

+98
-3
lines changed

2 files changed

+98
-3
lines changed

.github/workflows/e2e-reusable.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -215,16 +215,16 @@ jobs:
215215
env:
216216
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
217217
run: |
218+
cd pytorch
219+
218220
export WORKSPACE=$GITHUB_WORKSPACE
219221
220-
if [[ "${{ inputs.TORCH_COMPILE_DEBUG }}" == "1" ]] ; then
222+
if [[ "${{ inputs.TORCH_COMPILE_DEBUG }}" = "1" ]] ; then
221223
export TORCH_COMPILE_DEBUG="1"
222224
# torch will save debug logs to $TORCH_COMPILE_DEBUG_DIR/torch_compile_debug
223225
export TORCH_COMPILE_DEBUG_DIR=$GITHUB_WORKSPACE
224226
fi
225227
226-
cd pytorch
227-
228228
# if "only_one_model" is set, then test this model
229229
# if "models" == "subset", then test the models from .github/models/{accuracy,performance}/{suite}.txt
230230
# otherwise test all models.

.github/workflows/e2e-windows.yml

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,101 @@ jobs:
224224
extra-cache-key: ${{ env.PYTORCH_VERSION }}
225225
workspace: /c/gh${{ github.run_id }}
226226

227+
- name: Clone pytorch benchmark
228+
if: inputs.suite == 'all' || inputs.suite == 'torchbench'
229+
uses: actions/checkout@v4
230+
with:
231+
repository: ${{ env.BENCHMARK_REPO }}
232+
ref: ${{ env.BENCHMARK_COMMIT_ID }}
233+
submodules: recursive
234+
path: benchmark
235+
236+
- name: Install pytorch benchmark
237+
if: inputs.suite == 'all' || inputs.suite == 'torchbench'
238+
run: |
239+
.venv\Scripts\activate.ps1
240+
Invoke-BatchFile "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
241+
bash -c '
242+
cd benchmark
243+
if [[ "${{ inputs.only_one_model }}" ]]; then
244+
python install.py "${{ inputs.only_one_model }}"
245+
else
246+
# install all models
247+
python install.py
248+
fi
249+
pip install -e .
250+
'
251+
252+
- name: Run e2e tests
253+
env:
254+
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
255+
run: |
256+
.venv\Scripts\activate.ps1
257+
Invoke-BatchFile "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
258+
259+
bash -c '
260+
cd /c/pytorch
261+
262+
export WORKSPACE="/c/gh${{ github.run_id }}"
263+
264+
if [[ "${{ inputs.TORCH_COMPILE_DEBUG }}" = "1" ]] ; then
265+
export TORCH_COMPILE_DEBUG="1"
266+
# torch will save debug logs to $TORCH_COMPILE_DEBUG_DIR/torch_compile_debug
267+
export TORCH_COMPILE_DEBUG_DIR=$WORKSPACE
268+
fi
269+
270+
if [[ "${{ inputs.suite }}" = "all" ]]; then
271+
suites=("huggingface" "timm_models" "torchbench")
272+
else
273+
suites=("${{ inputs.suite }}")
274+
fi
275+
276+
if [[ "${{ inputs.mode }}" = "all" ]]; then
277+
modes=("inference" "inference-with-freezing" "training")
278+
else
279+
modes=("${{ inputs.mode }}")
280+
fi
281+
282+
if [[ "${{ inputs.dtype }}" = "all" ]]; then
283+
dtypes=("amp_bf16" "amp_fp16" "bfloat16" "float16" "float32")
284+
else
285+
dtypes=("${{ inputs.dtype }}")
286+
fi
287+
288+
# if "only_one_model" is set, then test this model
289+
# if "models" == "subset", then test the models from .github/models/{accuracy,performance}/{suite}.txt
290+
# otherwise test all models.
291+
292+
for suite in ${suites[@]}; do
293+
for mode in ${modes[@]}; do
294+
for dtype in ${dtypes[@]}; do
295+
296+
if [[ "${{ inputs.only_one_model }}" ]]; then
297+
bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0 ${{ inputs.only_one_model }}
298+
elif [[ "${{ inputs.models }}" == "subset" ]]; then
299+
models_subset_file="$WORKSPACE/.github/models/${{ inputs.test_mode }}/$suite.txt"
300+
while read model; do
301+
bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0 $model
302+
done < $models_subset_file
303+
if [[ "${{ inputs.check_all_subset_models }}" == true ]]; then
304+
python $WORKSPACE/scripts/check_inductor_report.py --models-file="$models_subset_file" \
305+
--suite=$suite \
306+
--dtype=$dtype \
307+
--mode=$mode \
308+
--test_mode=${{ inputs.test_mode }} \
309+
--device=xpu \
310+
--inductor-log-dir="$WORKSPACE/inductor_log"
311+
fi
312+
else
313+
bash -e $WORKSPACE/scripts/inductor_xpu_test.sh $suite $dtype $mode ${{ inputs.test_mode }} xpu 0 static 1 0
314+
fi
315+
316+
done
317+
done
318+
done
319+
'
320+
321+
227322
- name: Identify GPU
228323
run: |
229324
# Initializing oneAPI to enable sycl-ls, which is used in capture-hw-details.sh on Windows.

0 commit comments

Comments
 (0)