Skip to content

Commit 057e71d

Browse files
authored
Manually cherry-pick "Pin huggingface_hub<1.0" (#15667)
### Summary This is a manual cherry-pick of #15399 and #15403 to resolve CI failures caused by an incompatibility between the newly released huggingface_hub package and our pinned version of transformers. Due to differences between main and release/1.0, the automated cherry-pick failed, so I've reverted to a manual pick and some small manual edits to examples/models/llama/install_requirements.sh. ### Test Plan This PR is running trunk CI.
1 parent a9ef570 commit 057e71d

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

.ci/scripts/test_wheel_package_qnn.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ run_core_tests () {
139139
echo "=== [$LABEL] Installing wheel & deps ==="
140140
"$PIPBIN" install --upgrade pip
141141
"$PIPBIN" install "$WHEEL_FILE"
142-
"$PIPBIN" install torch=="2.9.0"
142+
"$PIPBIN" install torch=="2.9.1"
143143
"$PIPBIN" install --pre torchao --index-url "https://download.pytorch.org/whl/nightly/cpu"
144144

145145
echo "=== [$LABEL] Import smoke tests ==="

.github/workflows/trunk.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,7 @@ jobs:
612612
conda activate "${CONDA_ENV}"
613613
614614
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake
615-
pip install -U "huggingface_hub[cli]"
615+
pip install -U "huggingface_hub[cli]<1.0"
616616
617617
bash .ci/scripts/test_torchao_huggingface_checkpoints.sh ${{ matrix.model }} ${{ matrix.test_with_runner && '--test_with_runner' || '' }}
618618
@@ -809,7 +809,7 @@ jobs:
809809
echo "::endgroup::"
810810
811811
echo "::group::Setup Huggingface"
812-
pip install -U "huggingface_hub[cli]" accelerate
812+
pip install -U "huggingface_hub[cli]<1.0" accelerate
813813
huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
814814
OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
815815
pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION}
@@ -905,7 +905,7 @@ jobs:
905905
echo "::endgroup::"
906906
907907
echo "::group::Set up Huggingface"
908-
${CONDA_RUN} pip install -U "huggingface_hub[cli]" accelerate
908+
${CONDA_RUN} pip install -U "huggingface_hub[cli]<1.0" accelerate
909909
${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
910910
OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
911911
${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION}

examples/models/llama/install_requirements.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
# Install tokenizers for hf .json tokenizer.
1111
# Install snakeviz for cProfile flamegraph
1212
# Install lm-eval for Model Evaluation with lm-evalution-harness.
13-
pip install hydra-core huggingface_hub tiktoken torchtune sentencepiece tokenizers snakeviz lm_eval==0.4.5 blobfile
13+
pip install hydra-core "huggingface_hub<1.0" tiktoken torchtune sentencepiece tokenizers snakeviz lm_eval==0.4.5 blobfile
1414

1515
# Call the install helper for further setup
1616
python examples/models/llama/install_requirement_helper.py

0 commit comments

Comments
 (0)