diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index 84b7208dc9b..3e3d1aed1b1 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -800,11 +800,26 @@ jobs: echo "Recipe: $RECIPE" echo "Quantize: $QUANTIZE" - echo "::group::Set up ExecuTorch" # The generic Linux job chooses to use base env, not the one setup by the image CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") conda activate "${CONDA_ENV}" - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake + + echo "::group::Setup ExecuTorch" + PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake" + echo "::endgroup::" + + echo "::group::Setup Huggingface" + pip install -U "huggingface_hub[cli]" accelerate + huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN + OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) + pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} + echo "::endgroup::" + + echo "::group::Test MODEL: $MODEL RECIPE: $RECIPE QUANTIZE: $QUANTIZE" + export OUTPUT_DIR="$(pwd)/${MODEL}_${RECIPE}_${QUANTIZE}" + python .ci/scripts/test_huggingface_optimum_model.py --model "$MODEL" --recipe "$RECIPE" $QUANTIZE --model_dir "$OUTPUT_DIR" + echo "::endgroup::" + # Build executor_runner with ETdump enabled PYTHON_EXECUTABLE=python cmake -DPYTHON_EXECUTABLE=python \ -DCMAKE_INSTALL_PREFIX=cmake-out \ @@ -822,25 +837,6 @@ jobs: -DEXECUTORCH_ENABLE_EVENT_TRACER=ON \ -Bcmake-out . cmake --build cmake-out -j16 --target install --config Release - echo "::endgroup::" - - echo "::group::Set up Hugging Face" - pip install -U "huggingface_hub[cli]" - huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN - OPTIMUM_ET_COMMIT=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) - git clone https://github.com/huggingface/optimum-executorch - pushd optimum-executorch - # There is no release yet, for CI stability, always test from the same commit on main - git checkout $OPTIMUM_ET_COMMIT - python install_dev.py --skip_override_torch - popd - pip list - echo "::endgroup::" - - echo "::group::Run tests" - export OUTPUT_DIR="$(pwd)/${MODEL}_${RECIPE}_${QUANTIZE}" - python .ci/scripts/test_huggingface_optimum_model.py --model ${MODEL} --recipe ${RECIPE} ${QUANTIZE} --model_dir ${OUTPUT_DIR} - echo "::endgroup::" echo "::group::Generate artifacts for performance profiling" ./cmake-out/executor_runner \ @@ -907,16 +903,11 @@ jobs: ${CONDA_RUN} python install_executorch.py echo "::endgroup::" - echo "::group::Set up Hugging Face" - pip install -U "huggingface_hub[cli]" - huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN - OPTIMUM_ET_COMMIT=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) - git clone https://github.com/huggingface/optimum-executorch - pushd optimum-executorch - # There is no release yet, for CI stability, always test from the same commit on main - git checkout $OPTIMUM_ET_COMMIT - ${CONDA_RUN} python install_dev.py --skip_override_torch - popd + echo "::group::Set up Huggingface" + ${CONDA_RUN} pip install -U "huggingface_hub[cli]" accelerate + ${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN + OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) + ${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} ${CONDA_RUN} pip list echo "::endgroup::"