From b5f714bf2da2b43723be1e091849cd3d22aa5297 Mon Sep 17 00:00:00 2001 From: Alvaro Bartolome <36760800+alvarobartt@users.noreply.github.com> Date: Tue, 8 Apr 2025 13:19:51 +0200 Subject: [PATCH 1/2] Update `docs/source/en/custom_container.md` --- docs/source/en/custom_container.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/en/custom_container.md b/docs/source/en/custom_container.md index ab2913c4..c670026c 100644 --- a/docs/source/en/custom_container.md +++ b/docs/source/en/custom_container.md @@ -37,6 +37,9 @@ Once you have determined the compute capability is determined, set it as the `ru the container as shown in the example below: ```shell +# Get submodule dependencies +git submodule update --init + runtime_compute_cap=80 docker build . -f Dockerfile-cuda --build-arg CUDA_COMPUTE_CAP=$runtime_compute_cap From e179517410da0df64878ed1ebabed55ffc5e741f Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 8 Apr 2025 13:46:02 +0200 Subject: [PATCH 2/2] Fixup. --- backends/python/server/requirements-intel.txt | 2 +- docs/source/en/intel_container.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/python/server/requirements-intel.txt b/backends/python/server/requirements-intel.txt index 5accea69..8ff67fe4 100644 --- a/backends/python/server/requirements-intel.txt +++ b/backends/python/server/requirements-intel.txt @@ -42,4 +42,4 @@ win32-setctime==1.1.0 ; python_version >= "3.9" and python_version < "3.13" and wrapt==1.15.0 ; python_version >= "3.9" and python_version < "3.13" transformers==4.40.0 ; python_version >= "3.9" and python_version < "3.13" pyrsistent==0.20.0 ; python_version >= "3.9" and python_version < "3.13" -einops==0.8.0 ; python_version >= "3.9" and python_version < "3.13" \ No newline at end of file +einops==0.8.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/docs/source/en/intel_container.md b/docs/source/en/intel_container.md index f260fb4e..f0fae218 100644 --- a/docs/source/en/intel_container.md +++ b/docs/source/en/intel_container.md @@ -107,4 +107,4 @@ docker pull ghcr.io/huggingface/text-embeddings-inference:xpu-ipex-latest To use the prebuilt image optimized for IntelĀ® HPUs (Gaudi), run: ```shell docker pull ghcr.io/huggingface/text-embeddings-inference:hpu-latest -``` \ No newline at end of file +```