Skip to content

Commit 3ae177a

Browse files
authored
Update CUDA Dockerfile for Python environment setup
1 parent 4206406 commit 3ae177a

File tree

1 file changed

+15
-14
lines changed

1 file changed

+15
-14
lines changed

Docker/cuda/cuda.Dockerfile

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -8,42 +8,43 @@ ENV DEBIAN_FRONTEND=noninteractive \
88

99
WORKDIR /app
1010

11-
# Dépendances système pour Python + build C/C++ + OpenCL/BLAS
1211
RUN apt-get update && \
1312
apt-get upgrade -y && \
1413
apt-get install -y --no-install-recommends \
15-
git build-essential \
1614
python3 python3-pip python3-dev \
15+
python3.12-venv \ # <<< IMPORTANT
16+
git build-essential \
1717
cmake ninja-build \
1818
gcc g++ wget \
1919
ocl-icd-opencl-dev opencl-headers clinfo \
2020
libclblast-dev libopenblas-dev \
21-
&& mkdir -p /etc/OpenCL/vendors \
22-
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd \
23-
&& rm -rf /var/lib/apt/lists/*
21+
&& mkdir -p /etc/OpenCL/vendors && \
22+
echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd && \
23+
rm -rf /var/lib/apt/lists/*
24+
25+
# Création du venv (maintenant possible)
26+
RUN python3 -m venv /opt/venv
27+
ENV PATH="/opt/venv/bin:${PATH}"
2428

2529
# Copie du projet
2630
COPY . .
2731

28-
# Variables pour la build CUDA de llama-cpp-python
32+
# Vars pour la build CUDA
2933
ENV CUDA_DOCKER_ARCH=all \
3034
GGML_CUDA=1 \
3135
FORCE_CMAKE=1 \
3236
CMAKE_ARGS="-DGGML_CUDA=on"
3337

34-
# Installer les deps Python générales
35-
RUN python3 -m venv /opt/venv && \
36-
/opt/venv/bin/pip install --upgrade --no-cache-dir pip wheel && \
37-
/opt/venv/bin/pip install --no-cache-dir \
38+
# Install deps Python dans le venv
39+
RUN pip install --upgrade --no-cache-dir pip wheel && \
40+
pip install --no-cache-dir \
3841
pytest scikit-build setuptools \
3942
fastapi uvicorn sse-starlette \
4043
pydantic-settings starlette-context
4144

42-
ENV PATH="/opt/venv/bin:${PATH}"
43-
44-
# Installer llama-cpp-python avec CUDA
45+
# Install llama-cpp-python avec CUDA
4546
RUN pip install --no-cache-dir "llama-cpp-python" --verbose
4647

4748
EXPOSE 8000
4849

49-
CMD ["python3", "-m", "llama_cpp.server", "--config_file", "config-cuda.json"]
50+
CMD ["python", "-m", "llama_cpp.server", "--config_file", "config-cuda.json"]

0 commit comments

Comments
 (0)