@@ -585,34 +585,34 @@ jobs:
585
585
# # Test llama2
586
586
# PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh -model stories110M -build_tool cmake -dtype "${DTYPE}" -mode "${MODE}"
587
587
588
- # # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
589
- # # test-llava-runner-macos:
590
- # # name: test-llava-runner-macos
591
- # # uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
592
- # # strategy:
593
- # # fail-fast: false
594
- # # with:
595
- # # runner: macos-14-xlarge
596
- # # python-version: '3.11'
597
- # # submodules: 'recursive'
598
- # # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
599
- # # timeout: 900
600
- # # script: |
601
- # # BUILD_TOOL=cmake
602
-
603
- # # bash .ci/scripts/setup-conda.sh
604
- # # # Setup MacOS dependencies as there is no Docker support on MacOS atm
605
- # # GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
606
-
607
- # # # install Llava requirements
608
- # # ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
609
- # # ${CONDA_RUN} bash examples/models/llava/install_requirements.sh
610
-
611
- # # # run python unittest
612
- # # ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava
613
-
614
- # # # run e2e (export, tokenizer and runner)
615
- # # PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh
588
+ # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
589
+ # test-llava-runner-macos:
590
+ # name: test-llava-runner-macos
591
+ # uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
592
+ # strategy:
593
+ # fail-fast: false
594
+ # with:
595
+ # runner: macos-14-xlarge
596
+ # python-version: '3.11'
597
+ # submodules: 'recursive'
598
+ # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
599
+ # timeout: 900
600
+ # script: |
601
+ # BUILD_TOOL=cmake
602
+
603
+ # bash .ci/scripts/setup-conda.sh
604
+ # # Setup MacOS dependencies as there is no Docker support on MacOS atm
605
+ # GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
606
+
607
+ # # install Llava requirements
608
+ # ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
609
+ # ${CONDA_RUN} bash examples/models/llava/install_requirements.sh
610
+
611
+ # # run python unittest
612
+ # ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava
613
+
614
+ # # run e2e (export, tokenizer and runner)
615
+ # PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh
616
616
617
617
# test-qnn-model:
618
618
# name: test-qnn-model
0 commit comments