|
| 1 | +FROM nvcr.io/nvidia/tritonserver:22.12-pyt-python-py3 |
| 2 | + |
| 3 | +ARG CUDA=11.3 |
| 4 | +ARG TORCH_VERSION=1.10.0 |
| 5 | +ARG TORCHVISION_VERSION=0.11.0 |
| 6 | +ARG ONNXRUNTIME_VERSION=1.8.1 |
| 7 | +ARG PPLCV_VERSION=0.7.0 |
| 8 | +ENV FORCE_CUDA="1" |
| 9 | +ARG MMCV_VERSION=">=2.0.0rc2" |
| 10 | +ARG MMENGINE_VERSION=">=0.3.0" |
| 11 | + |
| 12 | +WORKDIR /root/workspace |
| 13 | + |
| 14 | +RUN wget https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-linux-x86_64.sh &&\ |
| 15 | + bash cmake-3.26.3-linux-x86_64.sh --skip-license --prefix=/usr |
| 16 | + |
| 17 | +RUN git clone --depth 1 --branch v${PPLCV_VERSION} https://github.com/openppl-public/ppl.cv.git &&\ |
| 18 | + cd ppl.cv &&\ |
| 19 | + ./build.sh cuda &&\ |
| 20 | + mv cuda-build/install ./ &&\ |
| 21 | + rm -rf cuda-build |
| 22 | +ENV pplcv_DIR=/root/workspace/ppl.cv/install/lib/cmake/ppl |
| 23 | + |
| 24 | +RUN apt-get update &&\ |
| 25 | + apt-get install -y libopencv-dev |
| 26 | + |
| 27 | +RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz \ |
| 28 | + && tar -zxvf onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz |
| 29 | +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION} |
| 30 | +ENV LD_LIBRARY_PATH=/root/workspace/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}/lib:$LD_LIBRARY_PATH |
| 31 | + |
| 32 | +RUN python3 -m pip install -U pip &&\ |
| 33 | + pip install torch==1.10.0+cu113 torchvision==0.11.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html &&\ |
| 34 | + pip install openmim &&\ |
| 35 | + mim install "mmcv"${MMCV_VERSION} onnxruntime-gpu==${ONNXRUNTIME_VERSION} mmengine${MMENGINE_VERSION} &&\ |
| 36 | + ln /usr/bin/python3 /usr/bin/python |
| 37 | + |
| 38 | +COPY TensorRT-8.2.3.0 /root/workspace/tensorrt |
| 39 | +RUN pip install /root/workspace/tensorrt/python/*cp38*whl |
| 40 | +ENV TENSORRT_DIR=/root/workspace/tensorrt |
| 41 | +ENV LD_LIBRARY_PATH=/root/workspace/tensorrt/lib:$LD_LIBRARY_PATH |
| 42 | + |
| 43 | +RUN apt-get install -y rapidjson-dev |
| 44 | + |
| 45 | +RUN git clone -b v1.0.0rc7 https://github.com/open-mmlab/mmpretrain.git &&\ |
| 46 | + cd mmpretrain && pip install . |
| 47 | + |
| 48 | +RUN git clone -b v3.0.0 https://github.com/open-mmlab/mmdetection.git &&\ |
| 49 | + cd mmdetection && pip install . |
| 50 | + |
| 51 | +RUN git clone -b v1.0.0 https://github.com/open-mmlab/mmsegmentation.git &&\ |
| 52 | + cd mmsegmentation && pip install . |
| 53 | + |
| 54 | +RUN git clone -b v1.0.0 https://github.com/open-mmlab/mmocr.git &&\ |
| 55 | + cd mmocr && pip install . |
| 56 | + |
| 57 | +RUN git clone -b v1.0.0rc1 https://github.com/open-mmlab/mmrotate.git &&\ |
| 58 | + cd mmrotate && pip install . |
| 59 | + |
| 60 | +RUN git clone -b v1.0.0 https://github.com/open-mmlab/mmpose.git &&\ |
| 61 | + cd mmpose && pip install . |
| 62 | + |
| 63 | +RUN git clone -b triton-server --recursive https://github.com/irexyc/mmdeploy &&\ |
| 64 | + cd mmdeploy && mkdir -p build && cd build &&\ |
| 65 | + cmake .. \ |
| 66 | + -DMMDEPLOY_BUILD_SDK=ON \ |
| 67 | + -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ |
| 68 | + -DMMDEPLOY_BUILD_TEST=OFF \ |
| 69 | + -DMMDEPLOY_TARGET_BACKENDS="trt;ort" \ |
| 70 | + -DMMDEPLOY_CODEBASES=all \ |
| 71 | + -Dpplcv_DIR=${pplcv_DIR} \ |
| 72 | + -DMMDEPLOY_BUILD_EXAMPLES=OFF \ |
| 73 | + -DMMDEPLOY_DYNAMIC_BACKEND=OFF \ |
| 74 | + -DTRITON_MMDEPLOY_BACKEND=ON \ |
| 75 | + -DTRITON_TAG="r22.12" &&\ |
| 76 | + make -j$(nproc) && make install &&\ |
| 77 | + cp -r install/backends /opt/tritonserver/ &&\ |
| 78 | + cd .. && pip install -e . --user |
0 commit comments