Skip to content

Commit 497bc83

Browse files
authored
[CI/Build] Use uv in the Dockerfile (#13566)
1 parent 3738e6f commit 497bc83

File tree

1 file changed

+19
-13
lines changed

1 file changed

+19
-13
lines changed

Dockerfile

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,9 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
2727
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
2828
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \
2929
&& python3 --version && python3 -m pip --version
30+
# Install uv for faster pip installs
31+
RUN --mount=type=cache,target=/root/.cache/pip \
32+
python3 -m pip install uv
3033

3134
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
3235
# as it was causing spam when compiling the CUTLASS kernels
@@ -52,13 +55,13 @@ WORKDIR /workspace
5255
# after this step
5356
RUN --mount=type=cache,target=/root/.cache/pip \
5457
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
55-
python3 -m pip install --index-url https://download.pytorch.org/whl/nightly/cu126 "torch==2.7.0.dev20250121+cu126" "torchvision==0.22.0.dev20250121"; \
58+
uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu126 "torch==2.7.0.dev20250121+cu126" "torchvision==0.22.0.dev20250121"; \
5659
fi
5760

5861
COPY requirements-common.txt requirements-common.txt
5962
COPY requirements-cuda.txt requirements-cuda.txt
6063
RUN --mount=type=cache,target=/root/.cache/pip \
61-
python3 -m pip install -r requirements-cuda.txt
64+
uv pip install --system -r requirements-cuda.txt
6265

6366
# cuda arch list used by torch
6467
# can be useful for both `dev` and `test`
@@ -79,7 +82,7 @@ ARG TARGETPLATFORM
7982
COPY requirements-build.txt requirements-build.txt
8083

8184
RUN --mount=type=cache,target=/root/.cache/pip \
82-
python3 -m pip install -r requirements-build.txt
85+
uv pip install --system -r requirements-build.txt
8386

8487
COPY . .
8588
ARG GIT_REPO_CHECK=0
@@ -144,7 +147,7 @@ COPY requirements-lint.txt requirements-lint.txt
144147
COPY requirements-test.txt requirements-test.txt
145148
COPY requirements-dev.txt requirements-dev.txt
146149
RUN --mount=type=cache,target=/root/.cache/pip \
147-
python3 -m pip install -r requirements-dev.txt
150+
uv pip install --system -r requirements-dev.txt
148151
#################### DEV IMAGE ####################
149152

150153
#################### vLLM installation IMAGE ####################
@@ -174,6 +177,9 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
174177
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
175178
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \
176179
&& python3 --version && python3 -m pip --version
180+
# Install uv for faster pip installs
181+
RUN --mount=type=cache,target=/root/.cache/pip \
182+
python3 -m pip install uv
177183

178184
# Workaround for https://github.com/openai/triton/issues/2507 and
179185
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
@@ -187,13 +193,13 @@ RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
187193
# after this step
188194
RUN --mount=type=cache,target=/root/.cache/pip \
189195
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
190-
python3 -m pip install --index-url https://download.pytorch.org/whl/nightly/cu124 "torch==2.6.0.dev20241210+cu124" "torchvision==0.22.0.dev20241215"; \
196+
uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu124 "torch==2.6.0.dev20241210+cu124" "torchvision==0.22.0.dev20241215"; \
191197
fi
192198

193199
# Install vllm wheel first, so that torch etc will be installed.
194200
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
195201
--mount=type=cache,target=/root/.cache/pip \
196-
python3 -m pip install dist/*.whl --verbose
202+
uv pip install --system dist/*.whl --verbose
197203

198204
# If we need to build FlashInfer wheel before its release:
199205
# $ export FLASHINFER_ENABLE_AOT=1
@@ -210,7 +216,7 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist
210216
RUN --mount=type=cache,target=/root/.cache/pip \
211217
. /etc/environment && \
212218
if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \
213-
python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.1.post1/flashinfer_python-0.2.1.post1+cu124torch2.5-cp38-abi3-linux_x86_64.whl ; \
219+
uv pip install --system https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.1.post1/flashinfer_python-0.2.1.post1+cu124torch2.5-cp38-abi3-linux_x86_64.whl ; \
214220
fi
215221
COPY examples examples
216222

@@ -220,7 +226,7 @@ COPY examples examples
220226
# TODO: Remove this once FlashInfer AOT wheel is fixed
221227
COPY requirements-build.txt requirements-build.txt
222228
RUN --mount=type=cache,target=/root/.cache/pip \
223-
python3 -m pip install -r requirements-build.txt
229+
uv pip install --system -r requirements-build.txt
224230

225231
#################### vLLM installation IMAGE ####################
226232

@@ -233,15 +239,15 @@ ADD . /vllm-workspace/
233239

234240
# install development dependencies (for testing)
235241
RUN --mount=type=cache,target=/root/.cache/pip \
236-
python3 -m pip install -r requirements-dev.txt
242+
uv pip install --system -r requirements-dev.txt
237243

238244
# install development dependencies (for testing)
239245
RUN --mount=type=cache,target=/root/.cache/pip \
240-
python3 -m pip install -e tests/vllm_test_utils
246+
uv pip install --system -e tests/vllm_test_utils
241247

242248
# enable fast downloads from hf (for testing)
243249
RUN --mount=type=cache,target=/root/.cache/pip \
244-
python3 -m pip install hf_transfer
250+
uv pip install --system hf_transfer
245251
ENV HF_HUB_ENABLE_HF_TRANSFER 1
246252

247253
# Copy in the v1 package for testing (it isn't distributed yet)
@@ -262,9 +268,9 @@ FROM vllm-base AS vllm-openai-base
262268
# install additional dependencies for openai api server
263269
RUN --mount=type=cache,target=/root/.cache/pip \
264270
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
265-
pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.42.0' 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]; \
271+
uv pip install --system accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.42.0' 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]; \
266272
else \
267-
pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.45.0' 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]; \
273+
uv pip install --system accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.45.0' 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]; \
268274
fi
269275

270276
ENV VLLM_USAGE_SOURCE production-docker-image

0 commit comments

Comments
 (0)