Skip to content

Commit de72a24

Browse files
committed
Python Conda 3.8 & CUDA 10.1
1 parent 682c831 commit de72a24

File tree

1 file changed

+120
-70
lines changed

1 file changed

+120
-70
lines changed

python-conda/Dockerfile

Lines changed: 120 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM centos:8
1+
FROM centos:7
22
LABEL maintainer "Mario Cho <[email protected]>"
33

44
ENV DEBIAN_FRONTEND=noninteractive \
@@ -10,105 +10,129 @@ ENV DEBIAN_FRONTEND=noninteractive \
1010
PATH="/usr/local/nvidia/bin:/usr/local/cuda/bin:/opt/conda/bin:/usr/local/sbin:/usr/bin/cmake/bin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/tensorrt/bin" \
1111
LANG=C.UTF-8
1212

13-
ENV CUDA_VER 11.0
14-
ENV CUDA_VERSION 11.0.3
15-
ENV CUDA_PKG_VERSION 11-0=$CUDA_VERSION-1
16-
ENV NCCL_VERSION 2.9.6
17-
ENV CUDNN_VERSION 8.0.5.39
18-
ENV TH_VERSION 1.7.1
13+
ENV CUDA_VER 10.1
14+
ENV CUDA_VERSION 10.1.243
15+
ENV CUDA_PKG_VERSION 10-1-$CUDA_VERSION-1
16+
ENV NCCL_VERSION 2.8.4
17+
ENV CUDNN_VERSION 7.6.5.32
1918
LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}"
2019

2120
# nvidia-container-runtime
2221
ENV NVIDIA_VISIBLE_DEVICES all
2322
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
24-
ENV NVIDIA_REQUIRE_CUDA "cuda>=11.0 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 driver>=450"
23+
ENV NVIDIA_REQUIRE_CUDA "cuda>=10.1 brand=tesla,driver>=396,driver<397 brand=tesla,driver>=410,driver<411 brand=tesla,driver>=418,driver<419"
2524
ENV CUDA_HOME /usr/local/cuda
2625

2726
RUN yum update -y && \
27+
yum install -y epel-release && \
28+
# yum install -y https://download1.rpmfusion.org/free/el/rpmfusion-free-release-7.noarch.rpm && \
2829
yum install -y \
29-
fonts-nanum \
30-
fonts-nanum-coding \
31-
fonts-nanum-extra \
32-
libasound-dev \
33-
libcairo2-dev \
34-
libgif-dev \
35-
libjpeg-dev \
36-
liblapack-dev \
37-
libnuma-dev \
38-
libopenblas-dev \
39-
libpmi2-0-dev \
30+
atlas-devel \
31+
numactl-devel \
32+
alsa-lib \
33+
cairo \
34+
cairo-devel \
35+
gcc-c++ \
36+
giflib-devel \
37+
lapack-devel \
38+
libjpeg-turbo-devel \
39+
libtool \
40+
make \
4041
nano \
4142
numactl \
43+
numactl-libs \
44+
openblas-devel \
45+
openssh-server \
46+
pmix-devel \
4247
vim \
43-
openssh-client openssh-server \
44-
apt-utils \
48+
openssh-clients \
4549
autoconf \
4650
automake \
4751
bc \
48-
build-essential \
4952
ca-certificates \
5053
cmake \
5154
curl \
52-
debhelper dh-virtualenv \
5355
ffmpeg \
5456
flac \
5557
gawk \
56-
gfortran \
58+
gcc-gfortran \
5759
git \
58-
libatlas-base-dev \
59-
libatlas3-base libtool \
60-
libsndfile1-dev \
61-
libssl-dev \
62-
libtool \
63-
python2.7 \
60+
libsndfile-devel \
61+
openssl-devel \
6462
python3 \
6563
sox \
6664
subversion \
6765
unzip \
6866
wget \
6967
zip \
70-
zlib1g-dev
68+
zlib && \
69+
curl -sL https://rpm.nodesource.com/setup_14.x | bash - && \
70+
yum install -y nodejs
7171

7272
RUN NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \
73-
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \
74-
echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict -
73+
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \
74+
echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict -
7575

7676
COPY cuda.repo /etc/yum.repos.d/cuda.repo
7777
COPY nvidia-ml.repo /etc/yum.repos.d/nvidia-ml.repo
78-
COPY NGC-DL-CONTAINER-LICENSE /
78+
#COPY NGC-DL-CONTAINER-LICENSE /
7979

8080
RUN yum upgrade -y && yum install -y \
81-
cuda-cudart-11-0-11.0.221-1 \
82-
cuda-compat-11-0 \
83-
&& \
84-
ln -s cuda-11.0 /usr/local/cuda && \
81+
cuda-cudart-$CUDA_PKG_VERSION \
82+
cuda-compat-10-1 && \
83+
ln -s cuda-10.1 /usr/local/cuda && \
8584
echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
8685
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
8786

88-
RUN yum install -y \
89-
cuda-libraries-11-0-11.0.3-1 \
90-
cuda-nvtx-11-0-11.0.167-1 \
91-
libnpp-11-0-11.1.0.245-1 \
92-
libcublas-11-0-11.2.0.252-1 \
93-
libnccl-2.9.6-1+cuda11.0 \
94-
libnccl2=$NCCL_VERSION-1+cuda11.0
95-
96-
RUN yum install -y \
97-
make \
98-
cuda-command-line-tools-11-0-11.0.3-1 \
99-
cuda-libraries-devel-11-0-11.0.3-1 \
100-
cuda-minimal-build-11-0-11.0.3-1 \
101-
cuda-cudart-devel-11-0-11.0.221-1 \
102-
cuda-nvprof-11-0-11.0.221-1 \
103-
cuda-nvml-devel-11-0-11.0.167-1 \
104-
libcublas-devel-11-0-11.2.0.252-1 \
105-
libnpp-devel-11-0-11.1.0.245-1 \
106-
libnccl-devel-2.9.6-1+cuda11.0 \
107-
libcusparse-dev-11-0=11.1.1.245-1 \
108-
libcudnn8=$CUDNN_VERSION-1+cuda11.0 \
109-
libcudnn8-dev=$CUDNN_VERSION-1+cuda11.0
110-
111-
# Install miniconda 3.7
87+
88+
# setopt flag prevents yum from auto upgrading. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88
89+
RUN yum install --setopt=obsoletes=0 -y \
90+
cuda-libraries-$CUDA_PKG_VERSION \
91+
cuda-nvtx-$CUDA_PKG_VERSION \
92+
cuda-npp-$CUDA_PKG_VERSION \
93+
libcublas10-10.2.1.243-1 \
94+
&& yum clean all \
95+
&& rm -rf /var/cache/yum/*
96+
97+
RUN yum install -y yum-plugin-versionlock && yum versionlock libcublas10
98+
99+
# setopt flag prevents yum from auto upgrading. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88
100+
RUN yum install --setopt=obsoletes=0 -y \
101+
make \
102+
cuda-nvml-dev-$CUDA_PKG_VERSION \
103+
cuda-command-line-tools-$CUDA_PKG_VERSION \
104+
libcublas-devel-10.2.1.243-1 \
105+
cuda-cudart-dev-$CUDA_PKG_VERSION \
106+
cuda-libraries-dev-$CUDA_PKG_VERSION \
107+
cuda-minimal-build-$CUDA_PKG_VERSION \
108+
cuda-nvprof-$CUDA_PKG_VERSION \
109+
cuda-npp-dev-$CUDA_PKG_VERSION \
110+
&& yum clean all \
111+
&& rm -rf /var/cache/yum/*
112+
113+
RUN yum install -y yum-plugin-versionlock && yum versionlock libcublas10
114+
115+
RUN yum install -y xz && NCCL_DOWNLOAD_SUM=bcff1cf98e4b24d7ca189577a9d909980d8df88075223d70dc4638e428c53f84 && \
116+
curl -fsSL https://developer.download.nvidia.com/compute/redist/nccl/v2.8/nccl_2.8.4-1+cuda10.1_x86_64.txz -O && \
117+
echo "$NCCL_DOWNLOAD_SUM nccl_2.8.4-1+cuda10.1_x86_64.txz" | sha256sum -c - && \
118+
unxz nccl_2.8.4-1+cuda10.1_x86_64.txz && \
119+
tar --no-same-owner --keep-old-files -xvf nccl_2.8.4-1+cuda10.1_x86_64.tar -C /usr/local/cuda/include/ --strip-components=2 --wildcards '*/include/*' && \
120+
tar --no-same-owner --keep-old-files -xvf nccl_2.8.4-1+cuda10.1_x86_64.tar -C /usr/local/cuda/lib64/ --strip-components=2 --wildcards '*/lib/libnccl.so' && \
121+
rm -f nccl_2.8.4-1+cuda10.1_x86_64.tar && \
122+
ldconfig
123+
124+
125+
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
126+
RUN CUDNN_DOWNLOAD_SUM=7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3 && \
127+
curl -fsSL http://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.1-linux-x64-v7.6.5.32.tgz -O && \
128+
echo "$CUDNN_DOWNLOAD_SUM cudnn-10.1-linux-x64-v7.6.5.32.tgz" | sha256sum -c - && \
129+
gunzip cudnn-10.1-linux-x64-v7.6.5.32.tgz && \
130+
tar --no-same-owner -xf cudnn-10.1-linux-x64-v7.6.5.32.tar -C /usr/local --wildcards 'cuda/lib64/libcudnn.so.*' && \
131+
rm cudnn-10.1-linux-x64-v7.6.5.32.tar && \
132+
ldconfig
133+
134+
135+
# Install miniconda 3.8
112136
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-py38_4.9.2-Linux-x86_64.sh -O miniconda.sh && \
113137
mkdir -p /opt && \
114138
sh miniconda.sh -b -p /opt/conda && \
@@ -179,20 +203,46 @@ RUN curl -fL https://github.com/cdr/code-server/releases/download/v3.8.0/code-se
179203
mv /usr/local/lib/code-server-3.8.0-linux-amd64 /usr/local/lib/code-server-3.8.0 && \
180204
ln -s /usr/local/lib/code-server-3.8.0/bin/code-server /usr/local/bin/code-server
181205

206+
RUN /opt/conda/bin/conda install \
207+
pytorch \
208+
torchvision \
209+
torchaudio \
210+
cudatoolkit=10.1 \
211+
-c pytorch \
212+
-c conda-forge
182213
RUN /opt/conda/bin/python3 -m pip install --no-cache-dir \
183-
git+https://github.com/lanpa/tensorboardX \
184-
tornado==6.0.4 \
185-
pystan==2.19.1.1 \
186-
pycairo==1.19.0 \
214+
Cython==0.29.22 \
215+
tornado==6.1 \
216+
pystan==3.0.1 \
217+
pycairo==1.20.0 \
218+
numpy==1.20.2 \
219+
tensorflow==2.3.2 \
220+
tensorboard==2.3.0 \
221+
scipy==1.6.3 \
222+
scikit-learn==0.24.2 \
187223
jupyter==1.0.0 \
188-
python-language-server[all]
224+
typeguard==2.11.1 \
225+
python-language-server[all] \
226+
matplotlib==3.4.1
227+
228+
WORKDIR /tmp
229+
COPY ./requirements.txt /tmp
230+
RUN /opt/conda/bin/python3 -m pip install --no-cache-dir --ignore-installed -r requirements.txt && \
231+
rm -f /tmp/*.whl /tmp/requirements.txt
232+
233+
# Install Jupyterlab extensions
234+
RUN jupyter nbextensions_configurator enable && \
235+
jupyter contrib nbextension install && \
236+
jupyter nbextension enable --py --sys-prefix widgetsnbextension && \
237+
jupyter serverextension enable --py jupyter_lsp && \
238+
jupyter nbextension enable execute_time/ExecuteTime && \
239+
jupyter nbextension enable toc2/main
189240

190241
# Copy Backend.Ai multi-node support
191242
COPY ./runner-scripts/bootstrap.sh /opt/container/
192243
COPY ./service-defs /etc/backend.ai/service-defs
193-
194244
# Install ipython kernelspec
195-
Run /opt/conda/bin/python3 -m ipykernel install --display-name "Conda 3.8 on Python 3.8 & CUDA 11.0)" && \
245+
Run /opt/conda/bin/python3 -m ipykernel install --display-name "Conda python 3.8 on CUDA 10.1 & CentOS 7" && \
196246
cat /usr/local/share/jupyter/kernels/python3/kernel.json
197247

198248
# Backend.AI specifics
@@ -206,7 +256,7 @@ LABEL ai.backend.kernelspec="1" \
206256
ai.backend.resource.min.cuda.shares=0 \
207257
ai.backend.runtime-type="python" \
208258
ai.backend.runtime-path="/opt/conda/bin/python3" \
209-
ai.backend.service-ports="ipython:pty:3000,jupyter:http:8081,jupyterlab:http:8090,vscode:http:8180,tensorboard:http:6006"
259+
ai.backend.service-ports="ipython:pty:3000,jupyter:http:8081,vscode:http:8180,tensorboard:http:6006"
210260

211261
WORKDIR /home/work
212262
# vim: ft=dockerfile

0 commit comments

Comments
 (0)