forked from NVIDIA/TensorRT-LLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDockerfile.multi
More file actions
185 lines (144 loc) · 6.24 KB
/
Dockerfile.multi
File metadata and controls
185 lines (144 loc) · 6.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
# Multi-stage Dockerfile
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch
ARG TRITON_IMAGE=nvcr.io/nvidia/tritonserver
ARG BASE_TAG=25.10-py3
# [TODO] Update to NVIDIA Triton 25.10 when it's available
ARG TRITON_BASE_TAG=25.09-py3
ARG DEVEL_IMAGE=devel
FROM ${BASE_IMAGE}:${BASE_TAG} AS base
# Add NVIDIA EULA and AI Terms labels
LABEL com.nvidia.eula="https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-software-license-agreement/"
LABEL com.nvidia.ai-terms="https://www.nvidia.com/en-us/agreements/enterprise-software/product-specific-terms-for-ai-products/"
# https://www.gnu.org/software/bash/manual/html_node/Bash-Startup-Files.html
# The default values come from `nvcr.io/nvidia/pytorch`
ENV BASH_ENV=${BASH_ENV:-/etc/bash.bashrc}
ENV ENV=${ENV:-/etc/shinit_v2}
ARG GITHUB_MIRROR=""
RUN echo "Using GitHub mirror: $GITHUB_MIRROR"
ARG PYTHON_VERSION="3.12.3"
RUN echo "Using Python version: $PYTHON_VERSION"
SHELL ["/bin/bash", "-c"]
FROM base AS devel
#
# NB: PyTorch requires this to be < 1.0
ENV PYTORCH_CUDA_ALLOC_CONF="garbage_collection_threshold:0.99999"
# Copy all installation scripts at once to reduce layers
COPY docker/common/install.sh \
docker/common/install_base.sh \
docker/common/install_cmake.sh \
docker/common/install_ccache.sh \
docker/common/install_cuda_toolkit.sh \
docker/common/install_tensorrt.sh \
docker/common/install_polygraphy.sh \
docker/common/install_mpi4py.sh \
docker/common/install_pytorch.sh \
./
RUN GITHUB_MIRROR=${GITHUB_MIRROR} \
PYTHON_VERSION=${PYTHON_VERSION} \
bash ./install.sh --base && rm install_base.sh
RUN GITHUB_MIRROR=${GITHUB_MIRROR} bash ./install.sh --cmake && rm install_cmake.sh
RUN GITHUB_MIRROR=${GITHUB_MIRROR} bash ./install.sh --ccache && rm install_ccache.sh
RUN bash ./install.sh --cuda_toolkit && rm install_cuda_toolkit.sh
ARG TRT_VER
ARG CUDA_VER
ARG CUDNN_VER
ARG NCCL_VER
ARG CUBLAS_VER
RUN TRT_VER=${TRT_VER} \
CUDA_VER=${CUDA_VER} \
CUDNN_VER=${CUDNN_VER} \
NCCL_VER=${NCCL_VER} \
CUBLAS_VER=${CUBLAS_VER} \
bash ./install.sh --tensorrt && rm install_tensorrt.sh
RUN bash ./install.sh --polygraphy && rm install_polygraphy.sh
RUN GITHUB_MIRROR=${GITHUB_MIRROR} bash ./install.sh --mpi4py && rm install_mpi4py.sh
ARG TORCH_INSTALL_TYPE="skip"
RUN TORCH_INSTALL_TYPE=${TORCH_INSTALL_TYPE} bash ./install.sh --pytorch && rm install_pytorch.sh
RUN bash ./install.sh --opencv && rm install.sh
FROM ${TRITON_IMAGE}:${TRITON_BASE_TAG} AS triton
FROM devel AS tritondevel
COPY --from=triton /opt/tritonserver/backends/python /opt/tritonserver/backends/python
COPY --from=triton /opt/tritonserver/lib /opt/tritonserver/lib
COPY --from=triton /opt/tritonserver/include /opt/tritonserver/include
COPY --from=triton /opt/tritonserver/bin /opt/tritonserver/bin
COPY --from=triton /opt/tritonserver/caches /opt/tritonserver/caches
# Copy all installation scripts at once to reduce layers
COPY docker/common/install_triton.sh \
docker/common/install_ucx.sh \
docker/common/install_nixl.sh \
docker/common/install_etcd.sh \
./
RUN bash ./install_triton.sh && rm install_triton.sh
# Install UCX first
RUN bash ./install_ucx.sh && rm install_ucx.sh
# Install NIXL
RUN bash ./install_nixl.sh && rm install_nixl.sh
# Install etcd
RUN bash ./install_etcd.sh && rm install_etcd.sh
FROM ${DEVEL_IMAGE} AS wheel
WORKDIR /src/tensorrt_llm
COPY benchmarks benchmarks
COPY cpp cpp
COPY scripts scripts
COPY tensorrt_llm tensorrt_llm
COPY 3rdparty 3rdparty
COPY .gitmodules setup.py requirements.txt requirements-dev.txt constraints.txt README.md ./
# Create cache directories for pip and ccache
RUN mkdir -p /root/.cache/pip /root/.cache/ccache
ENV CCACHE_DIR=/root/.cache/ccache
# Build the TRT-LLM wheel
ARG GITHUB_MIRROR=""
ARG BUILD_WHEEL_ARGS="--clean --benchmarks"
ARG BUILD_WHEEL_SCRIPT="scripts/build_wheel.py"
RUN --mount=type=cache,target=/root/.cache/pip --mount=type=cache,target=${CCACHE_DIR} \
GITHUB_MIRROR=$GITHUB_MIRROR python3 ${BUILD_WHEEL_SCRIPT} ${BUILD_WHEEL_ARGS}
FROM ${DEVEL_IMAGE} AS release
# Create a cache directory for pip
RUN mkdir -p /root/.cache/pip
WORKDIR /app/tensorrt_llm
RUN --mount=type=cache,target=/root/.cache/pip --mount=type=bind,from=wheel,source=/src/tensorrt_llm/build,target=/tmp/wheel \
pip install /tmp/wheel/tensorrt_llm*.whl
COPY README.md ./
COPY docs docs
COPY cpp/include include
RUN ln -sv $(python3 -c 'import site; print(f"{site.getsitepackages()[0]}/tensorrt_llm/bin")') bin && \
test -f bin/executorWorker && \
ln -sv $(python3 -c 'import site; print(f"{site.getsitepackages()[0]}/tensorrt_llm/libs")') lib && \
test -f lib/libnvinfer_plugin_tensorrt_llm.so && \
echo "/app/tensorrt_llm/lib" > /etc/ld.so.conf.d/tensorrt_llm.conf && \
ldconfig && \
! ( ldd -v bin/executorWorker | grep tensorrt_llm | grep -q "not found" )
ARG SRC_DIR=/src/tensorrt_llm
COPY --from=wheel ${SRC_DIR}/benchmarks benchmarks
ARG CPP_BUILD_DIR=${SRC_DIR}/cpp/build
COPY --from=wheel \
${CPP_BUILD_DIR}/benchmarks/bertBenchmark \
${CPP_BUILD_DIR}/benchmarks/gptManagerBenchmark \
${CPP_BUILD_DIR}/benchmarks/disaggServerBenchmark \
benchmarks/cpp/
COPY examples examples
RUN chmod -R a+w examples && \
rm -v \
benchmarks/cpp/bertBenchmark.cpp \
benchmarks/cpp/gptManagerBenchmark.cpp \
benchmarks/cpp/disaggServerBenchmark.cpp \
benchmarks/cpp/CMakeLists.txt && \
rm -rf /root/.cache/pip
ARG GIT_COMMIT
ARG TRT_LLM_VER
ENV TRT_LLM_GIT_COMMIT=${GIT_COMMIT} \
TRT_LLM_VERSION=${TRT_LLM_VER}
FROM wheel AS tritonbuild
WORKDIR /src/tensorrt_llm
RUN pip install /src/tensorrt_llm/build/tensorrt_llm*.whl
COPY ./triton_backend/ ./triton_backend/
ARG TRITON_BASE_TAG
RUN bash ./triton_backend/inflight_batcher_llm/scripts/build.sh -s "r${TRITON_BASE_TAG%-py3}"
FROM release AS tritonrelease
WORKDIR /app/tensorrt_llm
COPY ./triton_backend/all_models ./triton_backend/all_models
COPY ./triton_backend/scripts ./triton_backend/scripts
COPY ./triton_backend/tools ./triton_backend/tools
COPY ./triton_backend/inflight_batcher_llm/scripts ./triton_backend/inflight_batcher_llm/scripts
COPY ./triton_backend/inflight_batcher_llm/client ./triton_backend/inflight_batcher_llm/client
COPY --from=tritonbuild /opt/tritonserver/backends/tensorrtllm /opt/tritonserver/backends/tensorrtllm