diff --git a/.ci/scripts/test_wheel_package_qnn.sh b/.ci/scripts/test_wheel_package_qnn.sh index b4f789d1b90..7d431542378 100644 --- a/.ci/scripts/test_wheel_package_qnn.sh +++ b/.ci/scripts/test_wheel_package_qnn.sh @@ -139,7 +139,7 @@ run_core_tests () { echo "=== [$LABEL] Installing wheel & deps ===" "$PIPBIN" install --upgrade pip "$PIPBIN" install "$WHEEL_FILE" - "$PIPBIN" install torch=="2.9.0" + "$PIPBIN" install torch=="2.9.1" "$PIPBIN" install --pre torchao --index-url "https://download.pytorch.org/whl/nightly/cpu" echo "=== [$LABEL] Import smoke tests ===" diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index 72a32f38076..8051a4d6773 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -612,7 +612,7 @@ jobs: conda activate "${CONDA_ENV}" PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake - pip install -U "huggingface_hub[cli]" + pip install -U "huggingface_hub[cli]<1.0" bash .ci/scripts/test_torchao_huggingface_checkpoints.sh ${{ matrix.model }} ${{ matrix.test_with_runner && '--test_with_runner' || '' }} @@ -809,7 +809,7 @@ jobs: echo "::endgroup::" echo "::group::Setup Huggingface" - pip install -U "huggingface_hub[cli]" accelerate + pip install -U "huggingface_hub[cli]<1.0" accelerate huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} @@ -905,7 +905,7 @@ jobs: echo "::endgroup::" echo "::group::Set up Huggingface" - ${CONDA_RUN} pip install -U "huggingface_hub[cli]" accelerate + ${CONDA_RUN} pip install -U "huggingface_hub[cli]<1.0" accelerate ${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt) ${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION} diff --git a/examples/models/llama/install_requirements.sh b/examples/models/llama/install_requirements.sh index 580a152a322..0c6b68785c0 100755 --- a/examples/models/llama/install_requirements.sh +++ b/examples/models/llama/install_requirements.sh @@ -10,7 +10,7 @@ # Install tokenizers for hf .json tokenizer. # Install snakeviz for cProfile flamegraph # Install lm-eval for Model Evaluation with lm-evalution-harness. -pip install hydra-core huggingface_hub tiktoken torchtune sentencepiece tokenizers snakeviz lm_eval==0.4.5 blobfile +pip install hydra-core "huggingface_hub<1.0" tiktoken torchtune sentencepiece tokenizers snakeviz lm_eval==0.4.5 blobfile # Call the install helper for further setup python examples/models/llama/install_requirement_helper.py