File tree Expand file tree Collapse file tree 2 files changed +36
-36
lines changed
Expand file tree Collapse file tree 2 files changed +36
-36
lines changed Original file line number Diff line number Diff line change @@ -591,42 +591,6 @@ jobs:
591591 # Test static llama weight sharing and accuracy
592592 PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_static_llama.sh
593593
594- test-static-llama-qnn-eval-linux :
595- name : test-static-llama-qnn-eval-linux
596- uses : pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
597- permissions :
598- id-token : write
599- contents : read
600- strategy :
601- fail-fast : false
602- matrix :
603- config :
604- - name : " baseline"
605- flags : " "
606- threshold : 62.0
607- with :
608- runner : linux.2xlarge
609- docker-image : ci-image:executorch-ubuntu-22.04-qnn-sdk
610- submodules : ' recursive'
611- ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
612- timeout : 180
613- script : |
614- # The generic Linux job chooses to use base env, not the one setup by the image
615- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
616- conda activate "${CONDA_ENV}"
617- BUILD_TOOL="cmake"
618- PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
619- PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
620- # Setup executorch
621- PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
622- # Setup install_requirements for llama
623- PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh
624-
625- echo ">>> Running config: ${{ matrix.config.name }}"
626- PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_static_llama_eval.sh \
627- --flags "${{ matrix.config.flags }}" \
628- --threshold "${{ matrix.config.threshold }}"
629-
630594 test-qnn-models-linux :
631595 name : test-qnn-models-linux
632596 uses : pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
Original file line number Diff line number Diff line change @@ -973,6 +973,42 @@ jobs:
973973 # Test llama2
974974 PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -mode "${MODE}" -dtype "${DTYPE}" -pt2e_quantize "${PT2E_QUANTIZE}"
975975
976+ test-static-llama-qnn-eval-linux :
977+ name : test-static-llama-qnn-eval-linux
978+ uses : pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
979+ permissions :
980+ id-token : write
981+ contents : read
982+ strategy :
983+ fail-fast : false
984+ matrix :
985+ config :
986+ - name : " baseline"
987+ flags : " "
988+ threshold : 62.0
989+ with :
990+ runner : linux.2xlarge
991+ docker-image : ci-image:executorch-ubuntu-22.04-qnn-sdk
992+ submodules : ' recursive'
993+ ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
994+ timeout : 180
995+ script : |
996+ # The generic Linux job chooses to use base env, not the one setup by the image
997+ CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
998+ conda activate "${CONDA_ENV}"
999+ BUILD_TOOL="cmake"
1000+ PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
1001+ PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
1002+ # Setup executorch
1003+ PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
1004+ # Setup install_requirements for llama
1005+ PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh
1006+
1007+ echo ">>> Running config: ${{ matrix.config.name }}"
1008+ PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_static_llama_eval.sh \
1009+ --flags "${{ matrix.config.flags }}" \
1010+ --threshold "${{ matrix.config.threshold }}"
1011+
9761012 unittest-release :
9771013 uses : ./.github/workflows/_unittest.yml
9781014 permissions :
You can’t perform that action at this time.
0 commit comments