Skip to content

Commit 71b4731

Browse files
committed
NGC Putorch 22.01
1 parent d55b257 commit 71b4731

File tree

1 file changed

+213
-0
lines changed

1 file changed

+213
-0
lines changed
Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,213 @@
1+
FROM nvcr.io/nvidia/pytorch:22.01-py3
2+
# NVIDIA NGC PyTorch with Python 3.8 (CONDA)
3+
4+
ENV DEBIAN_FRONTEND=noninteractive \
5+
MPLBACKEND=Svg \
6+
PIP_IGNORE_INSTALLED=0 \
7+
PYTHONUNBUFFERED=1 \
8+
LD_LIBRARY_PATH="/usr/local/cuda/compat/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib:/usr/local/cuda/lib64:/usr/local/nvidia/lib64:/usr/include/x86_64-linux-gnu" \
9+
PATH="/usr/local/nvm/versions/node/v16.6.1/bin:/opt/conda/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin:/usr/local/src/lightgbm/LightGBM:/usr/local/bin/mecab" \
10+
mecab_dicdir=/usr/local/lib/mecab/dic/mecab-ko-dic \
11+
CPLUS_INCLUDE_PATH=/usr/include/gdal \
12+
C_INCLUDE_PATH=/usr/include/gdal \
13+
LANG=C.UTF-8
14+
15+
RUN apt-get update && \
16+
apt-get install -y --no-install-recommends \
17+
automake \
18+
ffmpeg \
19+
fonts-nanum \
20+
fonts-nanum-coding \
21+
fonts-nanum-extra \
22+
gfortran \
23+
htop \
24+
libasound2-dev \
25+
libatlas-base-dev \
26+
libavresample-dev \
27+
libdc1394-22-dev \
28+
libeigen3-dev \
29+
libfaac-dev \
30+
libgdal-dev \
31+
libgflags-dev \
32+
libgoogle-glog-dev \
33+
libgphoto2-dev \
34+
libgstreamer-plugins-base1.0-dev \
35+
libgstreamer1.0-dev \
36+
libgtk-3-dev \
37+
libhdf5-dev \
38+
libmp3lame-dev \
39+
libopenblas-dev \
40+
libopencore-amrnb-dev \
41+
libopencore-amrwb-dev \
42+
libprotobuf-dev \
43+
libtheora-dev \
44+
libvorbis-dev \
45+
libx264-dev \
46+
libxext6 \
47+
libxrender-dev \
48+
libxvidcore-dev \
49+
libsm6 \
50+
libtbb-dev \
51+
mercurial \
52+
ncurses-term \
53+
openjdk-8-jdk \
54+
pdsh \
55+
protobuf-compiler \
56+
v4l-utils \
57+
x264
58+
59+
# Install CUDA + cuDNN
60+
RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.8.2.4 /usr/local/cuda/lib64/libcudnn.so && \
61+
rm -rf /var/lib/apt/lists/* && \
62+
ldconfig
63+
64+
# nvtop install
65+
WORKDIR /tmp
66+
RUN git clone https://github.com/Syllo/nvtop.git && \
67+
mkdir -p nvtop/build && \
68+
cd /tmp/nvtop/build && \
69+
cmake .. && \
70+
cmake .. -DNVML_RETRIEVE_HEADER_ONLINE=True && \
71+
make -j$(nproc) && \
72+
make install
73+
74+
RUN update-alternatives --install /opt/conda/bin/python python /opt/conda/bin/python3 2
75+
76+
# install NLP packages *mecab-ko & khai*
77+
WORKDIR /tmp
78+
RUN curl -LO https://bitbucket.org/eunjeon/mecab-ko/downloads/mecab-0.996-ko-0.9.2.tar.gz && \
79+
tar zxfv mecab-0.996-ko-0.9.2.tar.gz && \
80+
cd mecab-0.996-ko-0.9.2 && \
81+
./configure && \
82+
make -j$(nproc) && \
83+
make check && \
84+
make install
85+
86+
RUN echo "Install mecab-ko-dic" && \
87+
cd /tmp && \
88+
ldconfig && \
89+
curl -LO https://bitbucket.org/eunjeon/mecab-ko-dic/downloads/mecab-ko-dic-2.1.1-20180720.tar.gz && \
90+
tar -zxvf mecab-ko-dic-2.1.1-20180720.tar.gz && \
91+
cd mecab-ko-dic-2.1.1-20180720 && \
92+
./autogen.sh && \
93+
./configure && \
94+
make -j$(nproc) && \
95+
sh -c 'echo "dicdir=/usr/local/lib/mecab/dic/mecab-ko-dic" > /usr/local/etc/mecabrc' && \
96+
make install && \
97+
cd /tmp && \
98+
git clone https://bitbucket.org/eunjeon/mecab-python-0.996.git && \
99+
python3 -m pip install /tmp/mecab-python-0.996
100+
101+
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
102+
apt-get update -y && \
103+
apt-get install -y nodejs
104+
105+
WORKDIR /tmp
106+
RUN git clone -q --branch=v0.3.18 git://github.com/xianyi/OpenBLAS.git && \
107+
cd OpenBLAS && \
108+
make DYNAMIC_ARCH=1 NO_AFFINITY=1 NUM_THREADS=48 FC=gfortran && \
109+
make install && \
110+
cd /tmp && \
111+
git clone --recursive https://github.com/bodono/scs-python.git && \
112+
cd /tmp/scs-python && \
113+
python setup.py install --scs --gpu
114+
115+
RUN /opt/conda/bin/conda install -c conda-forge opencv ffmpeg spacy
116+
117+
WORKDIR /tmp
118+
COPY ./requirements.22.01.txt /tmp/requirements.txt
119+
RUN /opt/conda/bin/python3 -m pip install --no-cache-dir -r requirements.txt && \
120+
rm -f /tmp/*.whl /tmp/requirements.txt
121+
122+
# install git-lfs
123+
WORKDIR /tmp
124+
RUN curl -sLO https://github.com/git-lfs/git-lfs/releases/download/v3.0.2/git-lfs-linux-amd64-v3.0.2.tar.gz && \
125+
tar -zxf git-lfs-linux-amd64-v3.0.2.tar.gz && \
126+
bash install.sh && \
127+
rm -rf /tmp/*
128+
129+
WORKDIR /tmp
130+
RUN git clone https://github.com/aristocratos/bashtop.git && \
131+
cd bashtop && \
132+
make install
133+
134+
RUN curl -fL https://github.com/cdr/code-server/releases/download/v3.12.0/code-server-3.12.0-linux-amd64.tar.gz | tar -C /usr/local/lib -xz && \
135+
mv /usr/local/lib/code-server-3.12.0-linux-amd64 /usr/local/lib/code-server-3.12.0 && \
136+
ln -s /usr/local/lib/code-server-3.12.0/bin/code-server /usr/local/bin/code-server
137+
138+
# Install Open MPI
139+
RUN mkdir /tmp/openmpi && \
140+
cd /tmp/openmpi && \
141+
wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.1.tar.gz && \
142+
tar zxf openmpi-4.1.1.tar.gz && \
143+
cd openmpi-4.1.1 && \
144+
./configure --enable-orterun-prefix-by-default && \
145+
make -j $(nproc) all && \
146+
make install && \
147+
ldconfig && \
148+
rm -rf /tmp/openmpi* && \
149+
# Create a wrapper for OpenMPI to allow running as root by default
150+
mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
151+
echo '#!/bin/bash' > /usr/local/bin/mpirun && \
152+
echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
153+
chmod a+x /usr/local/bin/mpirun && \
154+
155+
# Configure OpenMPI to run good defaults:
156+
echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
157+
158+
# Install Horovod, temporarily using CUDA stubs
159+
RUN ldconfig /usr/local/cuda/targets/x86_64-linux/lib/stubs && \
160+
HOROVOD_CUDA_HOME=$CONDA_PREFIX HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_GPU_BROADCAST=NCCL HOROVOD_NCCL_LINK=SHARED \
161+
HOROVOD_WITHOUT_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 HOROVOD_WITHOUT_MXNET=1 \
162+
pip install --no-cache-dir horovod==0.23.0 && \
163+
ldconfig
164+
165+
RUN python3 -m pip install --no-cache-dir \
166+
mpi4py==3.1.2 \
167+
nni==2.5 \
168+
mlflow==1.21.0 \
169+
scikit-nni==0.2.1
170+
171+
RUN jupyter nbextensions_configurator enable && \
172+
jupyter contrib nbextension install && \
173+
jupyter nbextension enable --py --sys-prefix widgetsnbextension && \
174+
jupyter serverextension enable --py jupyterlab --sys-prefix && \
175+
jupyter labextension install --no-build @jupyter-widgets/jupyterlab-manager && \
176+
jupyter labextension install --no-build @krassowski/jupyterlab-lsp && \
177+
jupyter serverextension enable --py jupyter_lsp && \
178+
jupyter labextension install --no-build @jupyterlab/toc && \
179+
jupyter nbextension enable execute_time/ExecuteTime && \
180+
jupyter nbextension enable toc2/main && \
181+
jupyter lab build
182+
183+
RUN apt autoclean && \
184+
sed -i 's/source \/usr\/local\/nvm\/nvm.sh//' /etc/bash.bashrc && \
185+
ln -sf /usr/share/terminfo/x/xterm-color /usr/share/terminfo/x/xterm-256color && \
186+
rm -rf /var/lib/apt/lists/* && \
187+
rm -rf /root/.cache && \
188+
rm -rf /tmp/*
189+
190+
RUN /opt/conda/bin/python3 -m ipykernel install \
191+
--prefix=/opt/conda/ \
192+
--display-name "PyTorch 1.11 (NGC 22.01/Python 3.8 Conda) on Backend.AI" && \
193+
cat /opt/conda/share/jupyter/kernels/python3/kernel.json
194+
195+
# Backend.AI specifics
196+
COPY ./service-defs /etc/backend.ai/service-defs
197+
COPY ./runner-scripts/bootstrap.sh runner-scripts/setup_multinode.py /opt/container/
198+
199+
LABEL ai.backend.kernelspec="1" \
200+
ai.backend.envs.corecount="OPENBLAS_NUM_THREADS,OMP_NUM_THREADS,NPROC" \
201+
ai.backend.features="batch query uid-match user-input" \
202+
ai.backend.base-distro="ubuntu16.04" \
203+
ai.backend.accelerators="cuda" \
204+
ai.backend.resource.min.cpu="1" \
205+
ai.backend.resource.min.mem="1g" \
206+
ai.backend.resource.min.cuda.device=1 \
207+
ai.backend.resource.min.cuda.shares=0 \
208+
ai.backend.base-distro="ubuntu16.04" \
209+
ai.backend.runtime-type="python" \
210+
ai.backend.runtime-path="/opt/conda/bin/python3" \
211+
ai.backend.service-ports="ipython:pty:3000,jupyter:http:8091,jupyterlab:http:8090,vscode:http:8180,tensorboard:http:6006,mlflow-ui:preopen:5000,nniboard:preopen:8080"
212+
213+
WORKDIR /home/work

0 commit comments

Comments
 (0)