|
| 1 | +# syntax = docker/dockerfile:experimental |
| 2 | +# |
| 3 | +# This file can build images for cpu and gpu env. By default it builds image for CPU. |
| 4 | +# Use following option to build image for cuda/GPU: --build-arg BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 |
| 5 | +# Here is complete command for GPU/cuda - |
| 6 | +# $ DOCKER_BUILDKIT=1 docker build --file Dockerfile --build-arg BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 -t torchserve:latest . |
| 7 | +# |
| 8 | +# Following comments have been shamelessly copied from https://github.com/pytorch/pytorch/blob/master/Dockerfile |
| 9 | +# |
| 10 | +# NOTE: To build this you will need a docker version > 18.06 with |
| 11 | +# experimental enabled and DOCKER_BUILDKIT=1 |
| 12 | +# |
| 13 | +# If you do not use buildkit you are not going to have a good time |
| 14 | +# |
| 15 | +# For reference: |
| 16 | +# https://docs.docker.com/develop/develop-images/build_enhancements/ |
| 17 | + |
| 18 | +ARG BASE_IMAGE=ubuntu:18.04 |
| 19 | + |
| 20 | +FROM ${BASE_IMAGE} AS compile-image |
| 21 | +ARG BASE_IMAGE=ubuntu:18.04 |
| 22 | +ENV PYTHONUNBUFFERED TRUE |
| 23 | + |
| 24 | +RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ |
| 25 | + apt-get update && \ |
| 26 | + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ |
| 27 | + ca-certificates \ |
| 28 | + g++ \ |
| 29 | + python3-dev \ |
| 30 | + python3-distutils \ |
| 31 | + python3-venv \ |
| 32 | + openjdk-11-jre-headless \ |
| 33 | + curl \ |
| 34 | + && rm -rf /var/lib/apt/lists/* \ |
| 35 | + && cd /tmp \ |
| 36 | + && curl -O https://bootstrap.pypa.io/get-pip.py \ |
| 37 | + && python3 get-pip.py |
| 38 | + |
| 39 | +RUN python3 -m venv /home/venv |
| 40 | +ENV PATH="/home/venv/bin:$PATH" |
| 41 | + |
| 42 | +RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \ |
| 43 | + && update-alternatives --install /usr/local/bin/pip pip /usr/local/bin/pip3 1 |
| 44 | + |
| 45 | +# This is only useful for cuda env |
| 46 | +RUN export USE_CUDA=1 |
| 47 | + |
| 48 | +ARG CUDA_VERSION="" |
| 49 | + |
| 50 | +RUN TORCH_VER=$(curl --silent --location https://pypi.org/pypi/torch/json | python -c "import sys, json, pkg_resources; releases = json.load(sys.stdin)['releases']; print(sorted(releases, key=pkg_resources.parse_version)[-1])") && \ |
| 51 | + TORCH_VISION_VER=$(curl --silent --location https://pypi.org/pypi/torchvision/json | python -c "import sys, json, pkg_resources; releases = json.load(sys.stdin)['releases']; print(sorted(releases, key=pkg_resources.parse_version)[-1])") && \ |
| 52 | + if echo "$BASE_IMAGE" | grep -q "cuda:"; then \ |
| 53 | + # Install CUDA version specific binary when CUDA version is specified as a build arg |
| 54 | + if [ "$CUDA_VERSION" ]; then \ |
| 55 | + pip install --no-cache-dir torch==$TORCH_VER+$CUDA_VERSION torchvision==$TORCH_VISION_VER+$CUDA_VERSION -f https://download.pytorch.org/whl/torch_stable.html; \ |
| 56 | + # Install the binary with the latest CUDA version support |
| 57 | + else \ |
| 58 | + pip install --no-cache-dir torch torchvision; \ |
| 59 | + fi \ |
| 60 | + # Install the CPU binary |
| 61 | + else \ |
| 62 | + pip install --no-cache-dir torch==$TORCH_VER+cpu torchvision==$TORCH_VISION_VER+cpu -f https://download.pytorch.org/whl/torch_stable.html; \ |
| 63 | + fi |
| 64 | +RUN pip install --no-cache-dir captum torchtext torchserve torch-model-archiver |
| 65 | + |
| 66 | +# Final image for production |
| 67 | +FROM ${BASE_IMAGE} AS runtime-image |
| 68 | + |
| 69 | +ENV PYTHONUNBUFFERED TRUE |
| 70 | + |
| 71 | +RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ |
| 72 | + apt-get update && \ |
| 73 | + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ |
| 74 | + python3 \ |
| 75 | + python3-distutils \ |
| 76 | + python3-dev \ |
| 77 | + openjdk-11-jre-headless \ |
| 78 | + build-essential \ |
| 79 | + && rm -rf /var/lib/apt/lists/* \ |
| 80 | + && cd /tmp |
| 81 | + |
| 82 | +#RUN useradd -m model-server \ |
| 83 | +# && mkdir -p /home/model-server/tmp |
| 84 | +RUN mkdir -p /home/model-server/tmp |
| 85 | + |
| 86 | +#COPY --chown=model-server --from=compile-image |
| 87 | +COPY --from=compile-image /home/venv /home/venv |
| 88 | + |
| 89 | +ENV PATH="/home/venv/bin:$PATH" |
| 90 | + |
| 91 | +COPY dockerd-entrypoint.sh /usr/local/bin/dockerd-entrypoint.sh |
| 92 | + |
| 93 | +RUN chmod +x /usr/local/bin/dockerd-entrypoint.sh |
| 94 | +#RUN chown -R /home/model-server |
| 95 | + |
| 96 | +COPY config.properties /home/model-server/config.properties |
| 97 | +RUN mkdir /home/model-server/model-store |
| 98 | + |
| 99 | +#&& chown -R model-server /home/work/model-server/model-store |
| 100 | + |
| 101 | +EXPOSE 8080 8081 8082 7070 7071 |
| 102 | + |
| 103 | +#USER model-server |
| 104 | +#WORKDIR /home/model-server |
| 105 | +#ENV TEMP=/home/model-server/tmp |
| 106 | +#ENTRYPOINT ["/usr/local/bin/dockerd-entrypoint.sh"] |
| 107 | +#CMD ["serve"] |
| 108 | + |
| 109 | + |
| 110 | +# Backend.AI specifics |
| 111 | +COPY service-defs /etc/backend.ai/service-defs |
| 112 | +LABEL ai.backend.kernelspec="1" \ |
| 113 | + ai.backend.envs.corecount="OPENBLAS_NUM_THREADS,OMP_NUM_THREADS,NPROC" \ |
| 114 | + ai.backend.features="batch query uid-match user-input" \ |
| 115 | + ai.backend.resource.min.cpu="1" \ |
| 116 | + ai.backend.resource.min.mem="256m" \ |
| 117 | + ai.backend.base-distro="ubuntu18.04" \ |
| 118 | + ai.backend.runtime-type="python" \ |
| 119 | + ai.backend.runtime-path="/bin/false" \ |
| 120 | + ai.backend.service-ports="jupyter:http:8070" |
| 121 | + |
| 122 | +COPY policy.yml /etc/backend.ai/jail/policy.yml |
0 commit comments