Skip to content

Commit afcb6ad

Browse files
Add pytorch/inference/{gpu,cpu}/2.3.1/transformers/4.46.0/py311 (#117)
* Add `pytorch/inference/gpu/2.3.1/transformers/4.46.0/py311` (WIP) - Include missing `requirements.txt` installation in `entrypoint.sh` (required to install custom dependencies with custom models) - Fix Python 3.11 installation as it was not properly installed and Python 3.10 was used instead - Use `uv` to install the dependencies as it's way faster than default `pip` - Also `uv` is able to successfully install `kenlm` which is a `transformers` dependency that `pip` is not able to install when building the `Dockerfile` - Tested with some of the latest models that those bumped dependencies support as Gemma2, Llama3.2, StableDiffusion 3.5, and much more * Remove `uv` and don't upgrade `setuptools` Just by fixing the Python 3.11 and the `pip` installation, the installation issue affecting `kenlm` is solved already; so no need to add `uv` for the moment even though it would be a nice addition * Add `pytorch/inference/cpu/2.3.1/transformers/4.46.0/py311` * Update `pip install` syntax when installing from URL * Add `exec` to `uvicorn` in `entrypoint.sh` Kudos to @co42 for the catch at huggingface/huggingface-inference-toolkit#94 * Remove extra line-break in `Dockerfile` * Update `HF_INFERENCE_TOOLKIT_VERSION` to 0.5.1 See the latest `huggingface-inference-toolkit` release at https://github.com/huggingface/huggingface-inference-toolkit/releases/tag/0.5.1 * Bump `transformers` to 4.46.1 in `huggingface-inference-toolkit` `transformers` 4.46.0 was yanked because Python 3.8 support was unintentionally dropped, whilst also fixes some issues affecting both `torch.fx` and `onnx` Co-authored-by: Philipp Schmid <[email protected]> --------- Co-authored-by: Philipp Schmid <[email protected]>
1 parent 183c9c8 commit afcb6ad

File tree

4 files changed

+210
-0
lines changed

4 files changed

+210
-0
lines changed
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
FROM ubuntu:22.04
2+
SHELL ["/bin/bash", "-c"]
3+
4+
LABEL maintainer="Hugging Face"
5+
6+
ENV DEBIAN_FRONTEND=noninteractive
7+
8+
WORKDIR /app
9+
10+
# Install required dependencies
11+
RUN apt-get update && \
12+
apt-get install software-properties-common -y && \
13+
add-apt-repository ppa:deadsnakes/ppa && \
14+
apt-get -y upgrade --only-upgrade systemd openssl cryptsetup && \
15+
apt-get install -y \
16+
build-essential \
17+
bzip2 \
18+
curl \
19+
git \
20+
git-lfs \
21+
tar \
22+
gcc \
23+
g++ \
24+
cmake \
25+
libprotobuf-dev \
26+
protobuf-compiler \
27+
python3.11 \
28+
python3.11-dev \
29+
libsndfile1-dev \
30+
ffmpeg && \
31+
rm -rf /var/lib/apt/lists/*
32+
33+
# Set Python 3.11 as the default python version
34+
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \
35+
ln -sf /usr/bin/python3.11 /usr/bin/python
36+
37+
# Install pip from source
38+
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
39+
python get-pip.py && \
40+
rm get-pip.py
41+
42+
# Hugging Face Inference Toolkit
43+
ARG HF_INFERENCE_TOOLKIT_VERSION=0.5.2
44+
ARG HF_INFERENCE_TOOLKIT_URL=git+https://github.com/huggingface/huggingface-inference-toolkit.git@${HF_INFERENCE_TOOLKIT_VERSION}
45+
RUN pip install --upgrade "huggingface-inference-toolkit[torch,diffusers,st,google] @ ${HF_INFERENCE_TOOLKIT_URL}" --no-cache-dir
46+
47+
ENV HF_HUB_ENABLE_HF_TRANSFER="1"
48+
49+
# Install Google CLI single command
50+
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \
51+
| tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
52+
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg \
53+
| apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \
54+
apt-get update -y && \
55+
apt-get install google-cloud-sdk -y && \
56+
apt-get clean autoremove --yes && \
57+
rm -rf /var/lib/{apt,dpkg,cache,log}
58+
59+
# Copy entrypoint and change permissions
60+
COPY --chmod=0755 containers/pytorch/inference/cpu/2.3.1/transformers/4.46.1/py311/entrypoint.sh entrypoint.sh
61+
ENTRYPOINT ["bash", "-c", "./entrypoint.sh"]
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#!/bin/bash
2+
3+
# Define the default port
4+
PORT=5000
5+
6+
# Check if AIP_MODE is set and adjust the port for Vertex AI
7+
if [[ ! -z "${AIP_MODE}" ]]; then
8+
PORT=${AIP_HTTP_PORT}
9+
fi
10+
11+
# Check if MODEL_ID starts with "gcs://"
12+
if [[ $AIP_STORAGE_URI == gs://* ]]; then
13+
echo "AIP_STORAGE_URI set and starts with 'gs://', proceeding to download from GCS."
14+
echo "AIP_STORAGE_URI: $AIP_STORAGE_URI"
15+
16+
# Define the target directory
17+
TARGET_DIR="/opt/huggingface/model"
18+
mkdir -p "$TARGET_DIR"
19+
20+
# Use gsutil to copy the content from GCS to the target directory
21+
echo "Running: gsutil -m cp -e -r "$AIP_STORAGE_URI/*" "$TARGET_DIR""
22+
gsutil -m cp -e -r "$AIP_STORAGE_URI/*" "$TARGET_DIR"
23+
24+
# Check if gsutil command was successful
25+
if [ $? -eq 0 ]; then
26+
echo "Model downloaded successfully to ${TARGET_DIR}."
27+
# Update MODEL_ID to point to the local directory
28+
echo "Updating MODEL_ID to point to the local directory."
29+
export HF_MODEL_DIR="$TARGET_DIR"
30+
export AIP_STORAGE_URI=""
31+
else
32+
echo "Failed to download model from GCS."
33+
exit 1
34+
fi
35+
36+
# Check if requirements.txt exists and if so install dependencies
37+
if [ -f "${HF_MODEL_DIR}/requirements.txt" ]; then
38+
echo "Installing custom dependencies from ${HF_MODEL_DIR}/requirements.txt"
39+
pip install -r ${HF_MODEL_DIR}/requirements.txt --no-cache-dir
40+
fi
41+
fi
42+
43+
# Start the server
44+
exec uvicorn huggingface_inference_toolkit.webservice_starlette:app --host 0.0.0.0 --port ${PORT}
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
FROM nvidia/cuda:12.1.0-devel-ubuntu22.04
2+
SHELL ["/bin/bash", "-c"]
3+
4+
LABEL maintainer="Hugging Face"
5+
6+
ENV DEBIAN_FRONTEND=noninteractive
7+
8+
WORKDIR /app
9+
10+
# Install required dependencies
11+
RUN apt-get update && \
12+
apt-get install software-properties-common -y && \
13+
add-apt-repository ppa:deadsnakes/ppa && \
14+
apt-get -y upgrade --only-upgrade systemd openssl cryptsetup && \
15+
apt-get install -y \
16+
build-essential \
17+
bzip2 \
18+
curl \
19+
git \
20+
git-lfs \
21+
tar \
22+
gcc \
23+
g++ \
24+
cmake \
25+
libprotobuf-dev \
26+
protobuf-compiler \
27+
python3.11 \
28+
python3.11-dev \
29+
libsndfile1-dev \
30+
ffmpeg && \
31+
rm -rf /var/lib/apt/lists/*
32+
33+
# Set Python 3.11 as the default python version
34+
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \
35+
ln -sf /usr/bin/python3.11 /usr/bin/python
36+
37+
# Install pip from source
38+
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
39+
python get-pip.py && \
40+
rm get-pip.py
41+
42+
# Hugging Face Inference Toolkit
43+
ARG HF_INFERENCE_TOOLKIT_VERSION=0.5.2
44+
ARG HF_INFERENCE_TOOLKIT_URL=git+https://github.com/huggingface/huggingface-inference-toolkit.git@${HF_INFERENCE_TOOLKIT_VERSION}
45+
RUN pip install --upgrade "huggingface-inference-toolkit[torch,diffusers,st,google] @ ${HF_INFERENCE_TOOLKIT_URL}" --no-cache-dir
46+
47+
ENV HF_HUB_ENABLE_HF_TRANSFER="1"
48+
49+
# Install Google CLI single command
50+
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \
51+
| tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
52+
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg \
53+
| apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \
54+
apt-get update -y && \
55+
apt-get install google-cloud-sdk -y && \
56+
apt-get clean autoremove --yes && \
57+
rm -rf /var/lib/{apt,dpkg,cache,log}
58+
59+
# Copy entrypoint and change permissions
60+
COPY --chmod=0755 containers/pytorch/inference/gpu/2.3.1/transformers/4.46.1/py311/entrypoint.sh entrypoint.sh
61+
ENTRYPOINT ["bash", "-c", "./entrypoint.sh"]
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#!/bin/bash
2+
3+
# Define the default port
4+
PORT=5000
5+
6+
# Check if AIP_MODE is set and adjust the port for Vertex AI
7+
if [[ ! -z "${AIP_MODE}" ]]; then
8+
PORT=${AIP_HTTP_PORT}
9+
fi
10+
11+
# Check if MODEL_ID starts with "gcs://"
12+
if [[ $AIP_STORAGE_URI == gs://* ]]; then
13+
echo "AIP_STORAGE_URI set and starts with 'gs://', proceeding to download from GCS."
14+
echo "AIP_STORAGE_URI: $AIP_STORAGE_URI"
15+
16+
# Define the target directory
17+
TARGET_DIR="/opt/huggingface/model"
18+
mkdir -p "$TARGET_DIR"
19+
20+
# Use gsutil to copy the content from GCS to the target directory
21+
echo "Running: gsutil -m cp -e -r "$AIP_STORAGE_URI/*" "$TARGET_DIR""
22+
gsutil -m cp -e -r "$AIP_STORAGE_URI/*" "$TARGET_DIR"
23+
24+
# Check if gsutil command was successful
25+
if [ $? -eq 0 ]; then
26+
echo "Model downloaded successfully to ${TARGET_DIR}."
27+
# Update MODEL_ID to point to the local directory
28+
echo "Updating MODEL_ID to point to the local directory."
29+
export HF_MODEL_DIR="$TARGET_DIR"
30+
export AIP_STORAGE_URI=""
31+
else
32+
echo "Failed to download model from GCS."
33+
exit 1
34+
fi
35+
36+
# Check if requirements.txt exists and if so install dependencies
37+
if [ -f "${HF_MODEL_DIR}/requirements.txt" ]; then
38+
echo "Installing custom dependencies from ${HF_MODEL_DIR}/requirements.txt"
39+
pip install -r ${HF_MODEL_DIR}/requirements.txt --no-cache-dir
40+
fi
41+
fi
42+
43+
# Start the server
44+
exec uvicorn huggingface_inference_toolkit.webservice_starlette:app --host 0.0.0.0 --port ${PORT}

0 commit comments

Comments
 (0)