Skip to content
This repository was archived by the owner on Sep 4, 2025. It is now read-only.

Commit 926c25c

Browse files
committed
Dockerfile.ubi: get rid of --link flags for COPY operations
not supported by openshift CI (buildah) See containers/buildah#4325
1 parent 8c548e4 commit 926c25c

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

Dockerfile.ubi

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ RUN curl -fsSL -o ~/miniforge3.sh -O "https://github.com/conda-forge/miniforge/
4343
## Python Base #################################################################
4444
FROM base as python-base
4545

46-
COPY --from=python-install --link /opt/vllm /opt/vllm
46+
COPY --from=python-install /opt/vllm /opt/vllm
4747

4848
ENV PATH=/opt/vllm/bin/:$PATH
4949

@@ -132,7 +132,7 @@ RUN ldconfig /usr/local/cuda-12.2/compat/
132132
## Development #################################################################
133133
FROM cuda-devel AS dev
134134

135-
COPY --from=python-torch-base --link /opt/vllm /opt/vllm
135+
COPY --from=python-torch-base /opt/vllm /opt/vllm
136136
ENV PATH=/opt/vllm/bin/:$PATH
137137

138138
# install build and runtime dependencies
@@ -260,9 +260,9 @@ FROM base AS vllm
260260

261261
WORKDIR /vllm-staging
262262
# COPY files from various places into a staging directory
263-
COPY --link vllm vllm
264-
COPY --from=build --link /workspace/vllm/*.so vllm/
265-
COPY --from=gen-protos --link /workspace/vllm/entrypoints/grpc/pb vllm/entrypoints/grpc/pb
263+
COPY vllm vllm
264+
COPY --from=build /workspace/vllm/*.so vllm/
265+
COPY --from=gen-protos /workspace/vllm/entrypoints/grpc/pb vllm/entrypoints/grpc/pb
266266

267267
# custom COPY command to use umask to control permissions and grant permissions
268268
# to the group
@@ -281,7 +281,7 @@ FROM cuda-runtime AS vllm-openai
281281
WORKDIR /workspace
282282

283283
# Create release python environment
284-
COPY --from=python-torch-base --link /opt/vllm /opt/vllm
284+
COPY --from=python-torch-base /opt/vllm /opt/vllm
285285
ENV PATH=/opt/vllm/bin/:$PATH
286286

287287
RUN --mount=type=cache,target=/root/.cache/pip \
@@ -301,7 +301,7 @@ RUN --mount=type=bind,from=flash-attn-builder,src=/usr/src/flash-attention-v2,ta
301301
pip3 install /usr/src/flash-attention-v2/*.whl --no-cache-dir
302302

303303
# vLLM will not be installed in site-packages
304-
COPY --from=vllm --link /workspace/ ./
304+
COPY --from=vllm /workspace/ ./
305305

306306
# Triton needs a CC compiler
307307
RUN microdnf install -y gcc \

0 commit comments

Comments
 (0)