@@ -632,32 +632,33 @@ jobs:
632
632
# run eval_llama wikitext task
633
633
PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_wikitext.sh
634
634
635
- test-eval_llama-mmlu-linux :
636
- name : test-eval_llama-mmlu-linux
637
- uses : pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
638
- permissions :
639
- id-token : write
640
- contents : read
641
- strategy :
642
- fail-fast : false
643
- with :
644
- runner : linux.24xlarge
645
- docker-image : ci-image:executorch-ubuntu-22.04-clang12
646
- submodules : ' recursive'
647
- ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
648
- timeout : 90
649
- script : |
650
- # The generic Linux job chooses to use base env, not the one setup by the image
651
- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
652
- conda activate "${CONDA_ENV}"
653
-
654
- PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake"
655
-
656
- # install llama requirements
657
- bash examples/models/llama/install_requirements.sh
658
-
659
- # run eval_llama mmlu task
660
- PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh
635
+ # TODO(larryliu0820): Fix this issue before reenabling it: https://gist.github.com/larryliu0820/7377ecd0d79dbc06076cec8d9f2b85d2
636
+ # test-eval_llama-mmlu-linux:
637
+ # name: test-eval_llama-mmlu-linux
638
+ # uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
639
+ # permissions:
640
+ # id-token: write
641
+ # contents: read
642
+ # strategy:
643
+ # fail-fast: false
644
+ # with:
645
+ # runner: linux.24xlarge
646
+ # docker-image: ci-image:executorch-ubuntu-22.04-clang12
647
+ # submodules: 'recursive'
648
+ # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
649
+ # timeout: 90
650
+ # script: |
651
+ # # The generic Linux job chooses to use base env, not the one setup by the image
652
+ # CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
653
+ # conda activate "${CONDA_ENV}"
654
+
655
+ # PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake"
656
+
657
+ # # install llama requirements
658
+ # bash examples/models/llama/install_requirements.sh
659
+
660
+ # # run eval_llama mmlu task
661
+ # PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh
661
662
662
663
test-llama_runner_eager-linux :
663
664
name : test-llama_runner_eager-linux
0 commit comments