@@ -1016,34 +1016,34 @@ jobs:
1016
1016
# # Test llama2
1017
1017
# PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh -model stories110M -build_tool cmake -dtype "${DTYPE}" -mode "${MODE}"
1018
1018
1019
- # # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
1020
- # # test-llava-runner-macos:
1021
- # # name: test-llava-runner-macos
1022
- # # uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
1023
- # # strategy:
1024
- # # fail-fast: false
1025
- # # with:
1026
- # # runner: macos-14-xlarge
1027
- # # python-version: '3.11'
1028
- # # submodules: 'recursive'
1029
- # # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
1030
- # # timeout: 900
1031
- # # script: |
1032
- # # BUILD_TOOL=cmake
1033
-
1034
- # # bash .ci/scripts/setup-conda.sh
1035
- # # # Setup MacOS dependencies as there is no Docker support on MacOS atm
1036
- # # GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
1037
-
1038
- # # # install Llava requirements
1039
- # # ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
1040
- # # ${CONDA_RUN} bash examples/models/llava/install_requirements.sh
1041
-
1042
- # # # run python unittest
1043
- # # ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava
1044
-
1045
- # # # run e2e (export, tokenizer and runner)
1046
- # # PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh
1019
+ # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
1020
+ # test-llava-runner-macos:
1021
+ # name: test-llava-runner-macos
1022
+ # uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
1023
+ # strategy:
1024
+ # fail-fast: false
1025
+ # with:
1026
+ # runner: macos-14-xlarge
1027
+ # python-version: '3.11'
1028
+ # submodules: 'recursive'
1029
+ # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
1030
+ # timeout: 900
1031
+ # script: |
1032
+ # BUILD_TOOL=cmake
1033
+
1034
+ # bash .ci/scripts/setup-conda.sh
1035
+ # # Setup MacOS dependencies as there is no Docker support on MacOS atm
1036
+ # GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
1037
+
1038
+ # # install Llava requirements
1039
+ # ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
1040
+ # ${CONDA_RUN} bash examples/models/llava/install_requirements.sh
1041
+
1042
+ # # run python unittest
1043
+ # ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava
1044
+
1045
+ # # run e2e (export, tokenizer and runner)
1046
+ # PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh
1047
1047
1048
1048
# test-qnn-model:
1049
1049
# name: test-qnn-model
0 commit comments