@@ -78,29 +78,14 @@ jobs:
7878 - name : Checkout repository
7979 uses : actions/checkout@v4
8080
81- - name : Get pinned PyTorch ref
82- shell : bash --noprofile --norc -eo pipefail {0}
83- if : inputs.env_manager != 'base' && inputs.ref == ''
84- run : |
85- PYTORCH_REF="$(<.github/pins/pytorch-upstream.txt)"
86- echo "PYTORCH_REF=$PYTORCH_REF" | tee -a "$GITHUB_ENV"
87-
88- - name : Clone PyTorch repository
89- if : inputs.env_manager != 'base'
90- uses : actions/checkout@v4
91- with :
92- repository : pytorch/pytorch
93- ref : ${{ inputs.pytorch_ref || env.PYTORCH_REF }}
94- path : pytorch
95-
9681 - name : Load conda cache
9782 id : conda-cache
9883 uses : ./.github/actions/load
9984 env :
10085 CACHE_NUMBER : 6
10186 with :
10287 path : $HOME/miniforge3/envs/triton
103- key : conda-${{ inputs.env_manager }}-py${{ matrix.python }}-${{ hashFiles('scripts/triton.yml', 'python/pyproject.toml', 'python/setup.py', 'pytorch/.ci/docker/ci_commit_pins/huggingface.txt' ) }}-${{ env.CACHE_NUMBER }}
88+ key : conda-${{ inputs.env_manager }}-py${{ matrix.python }}-${{ hashFiles('scripts/triton.yml', 'python/pyproject.toml', 'python/setup.py') }}-${{ env.CACHE_NUMBER }}
10489
10590 - name : Install Manager Environment
10691 shell : bash --noprofile --norc -eo pipefail {0}
@@ -143,10 +128,6 @@ jobs:
143128 run : |
144129 pip install torch --index-url https://download.pytorch.org/whl/nightly/xpu
145130
146- - name : Install pass_rate dependencies
147- run : |
148- pip install defusedxml
149-
150131 - name : Setup Triton
151132 if : inputs.env_manager == 'base'
152133 uses : ./.github/actions/setup-triton
@@ -161,6 +142,12 @@ jobs:
161142 pip install pybind11
162143 pip install --no-build-isolation -e '.[build,tests,tutorials]'
163144
145+ - name : Run E2E test
146+ run : |
147+ #FIXME move back test when ready
148+ scripts/inductor_xpu_test.sh huggingface float32 inference accuracy xpu 0 static 1 0 AlbertForMaskedLM
149+ exit 1
150+
164151 - name : Create test-triton command line
165152 run : |
166153 if [[ -n "${{ inputs.skip_list }}" ]]; then
@@ -217,33 +204,6 @@ jobs:
217204 ref : ${{ env.TRANSFORMERS_VERSION }}
218205 try-tag-prefix : v
219206
220- - name : Install transformers dependencies
221- if : inputs.env_manager == 'base'
222- run : |
223- pip install pyyaml pandas scipy numpy psutil pyre_extensions torchrec
224-
225- - name : Install transformers
226- if : (inputs.env_manager != 'base') && (steps.conda-cache.outputs.status == 'miss')
227- run : |
228- git clone --recursive https://github.com/huggingface/transformers
229- cd transformers
230- git checkout ${{ env.TRANSFORMERS_VERSION }}
231- python setup.py bdist_wheel
232- pip install dist/*.whl
233- python -c "import transformers; print(transformers.__version__)"
234- pip install pyyaml pandas scipy numpy psutil pyre_extensions torchrec
235-
236- - name : Run E2E test
237- run : |
238- # Set WORKSPACE for inductor_xpu_test.sh to make sure it creates "inductor_log" outside of pytorch cloned directory
239- export WORKSPACE=$GITHUB_WORKSPACE
240- cd pytorch
241- # TODO: Find the fastest Hugging Face model
242- $GITHUB_WORKSPACE/scripts/inductor_xpu_test.sh huggingface float32 inference accuracy xpu 0 static 1 0 AlbertForMaskedLM
243- # The script above always returns 0, so we need an additional check to see if the accuracy test passed
244- cat $WORKSPACE/inductor_log/*/*/*.csv
245- grep AlbertForMaskedLM $WORKSPACE/inductor_log/*/*/*.csv | grep -q ,pass,
246-
247207 - name : Save pip cache
248208 if : (inputs.env_manager == 'base') && (steps.pip-cache.outputs.status == 'miss')
249209 uses : ./.github/actions/save
@@ -260,6 +220,7 @@ jobs:
260220
261221 - name : Pass rate
262222 run : |
223+ pip install defusedxml
263224 source ./scripts/capture-hw-details.sh
264225 python3 scripts/pass_rate.py --reports reports ${{ env.SKIPLIST }}
265226 python3 scripts/pass_rate.py --reports reports --json ${{ env.SKIPLIST }} > pass_rate.json
0 commit comments