@@ -447,3 +447,84 @@ jobs:
447447
448448 # run e2e (export, tokenizer and runner)
449449 PYTHON_EXECUTABLE=python bash .ci/scripts/test_phi_3_mini.sh
450+
451+ test-eval_llama-wikitext-linux :
452+ name : test-eval_llama-wikitext-linux
453+ uses : pytorch/test-infra/.github/workflows/linux_job.yml@main
454+ strategy :
455+ fail-fast : false
456+ with :
457+ runner : linux.24xlarge
458+ docker-image : executorch-ubuntu-22.04-clang12
459+ submodules : ' true'
460+ ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
461+ timeout : 90
462+ script : |
463+ # The generic Linux job chooses to use base env, not the one setup by the image
464+ CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
465+ conda activate "${CONDA_ENV}"
466+
467+ PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
468+
469+ # install pybind
470+ bash install_requirements.sh --pybind xnnpack
471+
472+ # install llama requirements
473+ bash examples/models/llama/install_requirements.sh
474+
475+ # run eval_llama wikitext task
476+ PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_wikitext.sh
477+
478+ test-eval_llama-mmlu-linux :
479+ name : test-eval_llama-mmlu-linux
480+ uses : pytorch/test-infra/.github/workflows/linux_job.yml@main
481+ strategy :
482+ fail-fast : false
483+ with :
484+ runner : linux.24xlarge
485+ docker-image : executorch-ubuntu-22.04-clang12
486+ submodules : ' true'
487+ ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
488+ timeout : 90
489+ script : |
490+ # The generic Linux job chooses to use base env, not the one setup by the image
491+ CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
492+ conda activate "${CONDA_ENV}"
493+
494+ PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
495+
496+ # install pybind
497+ bash install_requirements.sh --pybind xnnpack
498+
499+ # install llama requirements
500+ bash examples/models/llama/install_requirements.sh
501+
502+ # run eval_llama mmlu task
503+ PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh
504+
505+ test-llama_runner_eager-linux :
506+ name : test-llama_runner_eager-linux
507+ uses : pytorch/test-infra/.github/workflows/linux_job.yml@main
508+ strategy :
509+ fail-fast : false
510+ with :
511+ runner : linux.24xlarge
512+ docker-image : executorch-ubuntu-22.04-clang12
513+ submodules : ' true'
514+ ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
515+ timeout : 90
516+ script : |
517+ # The generic Linux job chooses to use base env, not the one setup by the image
518+ CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
519+ conda activate "${CONDA_ENV}"
520+
521+ PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
522+
523+ # install pybind
524+ bash install_requirements.sh --pybind xnnpack
525+
526+ # install llama requirements
527+ bash examples/models/llama/install_requirements.sh
528+
529+ # run llama runner in eager mode
530+ PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama_runner_eager.sh
0 commit comments