Skip to content

Commit 87069cc

Browse files
Fix docker python version (#2845)
1 parent 7e45107 commit 87069cc

File tree

2 files changed

+6
-17
lines changed

2 files changed

+6
-17
lines changed

Dockerfile

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,8 @@
44
#################### BASE BUILD IMAGE ####################
55
FROM nvidia/cuda:12.1.0-devel-ubuntu22.04 AS dev
66

7-
# Set the DEBIAN_FRONTEND variable to noninteractive to avoid interactive prompts
8-
ENV DEBIAN_FRONTEND=noninteractive
9-
10-
# Preconfigure tzdata for US Central Time (build running in us-central-1 but this really doesn't matter.)
11-
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
12-
&& echo 'tzdata tzdata/Zones/America select Chicago' | debconf-set-selections
13-
14-
# We install an older version of python here for testing to make sure vllm works with older versions of Python.
15-
# For the actual openai compatible server, we will use the latest version of Python.
167
RUN apt-get update -y \
17-
&& apt-get install -y software-properties-common \
18-
&& add-apt-repository ppa:deadsnakes/ppa -y \
19-
&& apt-get update -y \
20-
&& apt-get install -y python3.8 python3.8-dev python3.8-venv python3-pip git \
21-
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1
8+
&& apt-get install -y python3-pip git
229

2310
# Workaround for https://github.com/openai/triton/issues/2507 and
2411
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
@@ -88,8 +75,10 @@ RUN --mount=type=cache,target=/root/.cache/pip VLLM_USE_PRECOMPILED=1 pip instal
8875

8976

9077
#################### RUNTIME BASE IMAGE ####################
91-
# use CUDA base as CUDA runtime dependencies are already installed via pip
92-
FROM nvidia/cuda:12.1.0-base-ubuntu22.04 AS vllm-base
78+
# We used base cuda image because pytorch installs its own cuda libraries.
79+
# However cupy depends on cuda libraries so we had to switch to the runtime image
80+
# In the future it would be nice to get a container with pytorch and cuda without duplicating cuda
81+
FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS vllm-base
9382

9483
# libnccl required for ray
9584
RUN apt-get update -y \

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@ pydantic >= 2.0 # Required for OpenAI server.
1212
aioprometheus[starlette]
1313
pynvml == 11.5.0
1414
triton >= 2.1.0
15-
cupy-cuda12x == 12.3.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead.
15+
cupy-cuda12x == 12.1.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead.

0 commit comments

Comments
 (0)