Skip to content

Commit 709c27b

Browse files
committed
fixed conflict
1 parent baa3155 commit 709c27b

File tree

1 file changed

+224
-0
lines changed

1 file changed

+224
-0
lines changed
Lines changed: 224 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
FROM nvcr.io/nvidia/pytorch:22.03-py3
2+
# NVIDIA NGC PyTorch with Python 3.8 (CONDA)
3+
4+
ENV DEBIAN_FRONTEND=noninteractive \
5+
MPLBACKEND=Svg \
6+
PIP_IGNORE_INSTALLED=0 \
7+
PYTHONUNBUFFERED=1 \
8+
LD_LIBRARY_PATH="/usr/local/cuda/compat/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib:/usr/local/cuda/lib64:/usr/local/nvidia/lib64:/usr/local/cuda-11.6/include:/usr/include/x86_64-linux-gnu:$LD_LIBRARY_PATH" \
9+
PATH="/usr/local/nvm/versions/node/v16.6.1/bin:/opt/conda/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/cuda-11.6/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin:/usr/local/src/lightgbm/LightGBM:/usr/local/bin/mecab:$PATH" \
10+
mecab_dicdir=/usr/local/lib/mecab/dic/mecab-ko-dic \
11+
CPLUS_INCLUDE_PATH=/usr/include/gdal \
12+
C_INCLUDE_PATH=/usr/include/gdal \
13+
CPATH=/usr/local/cuda-11.6/targets/x86_64-linux/include:$CPATH \
14+
LANG=C.UTF-8
15+
16+
RUN apt-get update && \
17+
apt-get install -y --no-install-recommends \
18+
automake \
19+
ffmpeg \
20+
fonts-nanum \
21+
fonts-nanum-coding \
22+
fonts-nanum-extra \
23+
gfortran \
24+
htop \
25+
libasound2-dev \
26+
libatlas-base-dev \
27+
libavresample-dev \
28+
libdc1394-22-dev \
29+
libeigen3-dev \
30+
libfaac-dev \
31+
libgdal-dev \
32+
libgflags-dev \
33+
libgoogle-glog-dev \
34+
libgphoto2-dev \
35+
libgstreamer-plugins-base1.0-dev \
36+
libgstreamer1.0-dev \
37+
libgtk-3-dev \
38+
libhdf5-dev \
39+
libmp3lame-dev \
40+
libopenblas-dev \
41+
libopencore-amrnb-dev \
42+
libopencore-amrwb-dev \
43+
libprotobuf-dev \
44+
libtheora-dev \
45+
libvorbis-dev \
46+
libx264-dev \
47+
libxext6 \
48+
libxrender-dev \
49+
libxvidcore-dev \
50+
libsm6 \
51+
libtbb-dev \
52+
mercurial \
53+
ncurses-term \
54+
openjdk-8-jdk \
55+
pdsh \
56+
protobuf-compiler \
57+
v4l-utils \
58+
x264
59+
60+
# Install CUDA + cuDNN
61+
RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.8.3.2 /usr/local/cuda/lib64/libcudnn.so && \
62+
rm -rf /var/lib/apt/lists/* && \
63+
ldconfig
64+
65+
# Install cmake
66+
RUN wget https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-Linux-x86_64.sh \
67+
-q -O /tmp/cmake-install.sh && \
68+
chmod u+x /tmp/cmake-install.sh && \
69+
mkdir /usr/bin/cmake && \
70+
/tmp/cmake-install.sh --skip-license --prefix=/usr/bin/cmake && \
71+
rm /tmp/cmake-install.sh
72+
73+
# nvtop install
74+
WORKDIR /tmp
75+
RUN git clone https://github.com/Syllo/nvtop.git && \
76+
mkdir -p nvtop/build && \
77+
cd /tmp/nvtop/build && \
78+
cmake .. && \
79+
cmake .. -DNVML_RETRIEVE_HEADER_ONLINE=True && \
80+
make -j$(nproc) && \
81+
make install
82+
83+
RUN update-alternatives --install /opt/conda/bin/python python /opt/conda/bin/python3 2
84+
85+
# install NLP packages *mecab-ko & khai*
86+
WORKDIR /tmp
87+
RUN curl -LO https://bitbucket.org/eunjeon/mecab-ko/downloads/mecab-0.996-ko-0.9.2.tar.gz && \
88+
tar zxfv mecab-0.996-ko-0.9.2.tar.gz && \
89+
cd mecab-0.996-ko-0.9.2 && \
90+
./configure && \
91+
make -j$(nproc) && \
92+
make check && \
93+
make install
94+
95+
RUN echo "Install mecab-ko-dic" && \
96+
cd /tmp && \
97+
ldconfig && \
98+
curl -LO https://bitbucket.org/eunjeon/mecab-ko-dic/downloads/mecab-ko-dic-2.1.1-20180720.tar.gz && \
99+
tar -zxvf mecab-ko-dic-2.1.1-20180720.tar.gz && \
100+
cd mecab-ko-dic-2.1.1-20180720 && \
101+
./autogen.sh && \
102+
./configure && \
103+
make -j$(nproc) && \
104+
sh -c 'echo "dicdir=/usr/local/lib/mecab/dic/mecab-ko-dic" > /usr/local/etc/mecabrc' && \
105+
make install && \
106+
cd /tmp && \
107+
git clone https://bitbucket.org/eunjeon/mecab-python-0.996.git && \
108+
python3 -m pip install /tmp/mecab-python-0.996
109+
110+
RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - && \
111+
apt-get update -y && \
112+
apt-get install -y nodejs
113+
114+
WORKDIR /tmp
115+
RUN git clone -q --branch=v0.3.20 git://github.com/xianyi/OpenBLAS.git && \
116+
cd OpenBLAS && \
117+
make DYNAMIC_ARCH=1 NO_AFFINITY=1 NUM_THREADS=48 FC=gfortran && \
118+
make install && \
119+
cd /tmp && \
120+
git clone --recursive https://github.com/bodono/scs-python.git && \
121+
cd /tmp/scs-python && \
122+
python setup.py install --scs --gpu
123+
124+
RUN /opt/conda/bin/conda install -c conda-forge opencv ffmpeg spacy
125+
126+
WORKDIR /tmp
127+
COPY ./requirements.22.03.txt /tmp/requirements.txt
128+
RUN /opt/conda/bin/python3 -m pip install --no-cache-dir -r requirements.txt && \
129+
rm -f /tmp/*.whl /tmp/requirements.txt
130+
131+
# install git-lfs
132+
WORKDIR /tmp
133+
RUN curl -sLO https://github.com/git-lfs/git-lfs/releases/download/v3.0.2/git-lfs-linux-amd64-v3.0.2.tar.gz && \
134+
tar -zxf git-lfs-linux-amd64-v3.0.2.tar.gz && \
135+
bash install.sh && \
136+
rm -rf /tmp/*
137+
138+
WORKDIR /tmp
139+
RUN git clone https://github.com/aristocratos/bashtop.git && \
140+
cd bashtop && \
141+
make install
142+
143+
RUN curl -fL https://github.com/cdr/code-server/releases/download/v4.0.2/code-server-4.0.2-linux-amd64.tar.gz \
144+
| tar -C /usr/local/lib -xz && \
145+
mv /usr/local/lib/code-server-4.0.2-linux-amd64 /usr/local/lib/code-server-4.0.2 && \
146+
ln -s /usr/local/lib/code-server-4.0.2/bin/code-server /usr/local/bin/code-server
147+
148+
# Install Open MPI
149+
RUN mkdir /tmp/openmpi && \
150+
cd /tmp/openmpi && \
151+
wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.1.tar.gz && \
152+
tar zxf openmpi-4.1.1.tar.gz && \
153+
cd openmpi-4.1.1 && \
154+
./configure --enable-orterun-prefix-by-default && \
155+
make -j $(nproc) all && \
156+
make install && \
157+
ldconfig && \
158+
rm -rf /tmp/openmpi* && \
159+
# Create a wrapper for OpenMPI to allow running as root by default
160+
mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
161+
echo '#!/bin/bash' > /usr/local/bin/mpirun && \
162+
echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
163+
chmod a+x /usr/local/bin/mpirun && \
164+
165+
# Configure OpenMPI to run good defaults:
166+
echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
167+
168+
# Install Horovod, temporarily using CUDA stubs
169+
RUN cp /usr/local/cuda-11.6/bin/nvcc /bin/nvcc && \
170+
# ldconfig /usr/local/cuda/targets/x86_64-linux/lib/stubs && \
171+
HOROVOD_CUDA_HOME=$CONDA_PREFIX HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_GPU_BROADCAST=NCCL HOROVOD_NCCL_LINK=SHARED \
172+
HOROVOD_WITHOUT_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 HOROVOD_WITHOUT_MXNET=1 \
173+
pip install --no-cache-dir horovod==0.24.1 && \
174+
ldconfig
175+
176+
RUN python3 -m pip install --no-cache-dir \
177+
mpi4py==3.1.2 \
178+
nni==2.5 \
179+
mlflow==1.21.0 \
180+
scikit-nni==0.2.1
181+
182+
RUN jupyter nbextensions_configurator enable && \
183+
jupyter contrib nbextension install && \
184+
jupyter nbextension enable --py --sys-prefix widgetsnbextension && \
185+
jupyter serverextension enable --py jupyterlab --sys-prefix && \
186+
jupyter labextension install --no-build @jupyter-widgets/jupyterlab-manager && \
187+
jupyter labextension install --no-build @krassowski/jupyterlab-lsp && \
188+
jupyter serverextension enable --py jupyter_lsp && \
189+
jupyter labextension install --no-build @jupyterlab/toc && \
190+
jupyter nbextension enable execute_time/ExecuteTime && \
191+
jupyter nbextension enable toc2/main && \
192+
jupyter lab build
193+
194+
RUN apt autoclean && \
195+
sed -i 's/source \/usr\/local\/nvm\/nvm.sh//' /etc/bash.bashrc && \
196+
ln -sf /usr/share/terminfo/x/xterm-color /usr/share/terminfo/x/xterm-256color && \
197+
rm -rf /var/lib/apt/lists/* && \
198+
rm -rf /root/.cache && \
199+
rm -rf /tmp/*
200+
201+
RUN /opt/conda/bin/python3 -m ipykernel install \
202+
--prefix=/opt/conda/ \
203+
--display-name "PyTorch 1.11 (NGC 22.03/Python 3.8 Conda) on Backend.AI" && \
204+
cat /opt/conda/share/jupyter/kernels/python3/kernel.json
205+
206+
# Backend.AI specifics
207+
COPY ./service-defs /etc/backend.ai/service-defs
208+
COPY ./runner-scripts/bootstrap.sh runner-scripts/setup_multinode.py /opt/container/
209+
210+
LABEL ai.backend.kernelspec="1" \
211+
ai.backend.envs.corecount="OPENBLAS_NUM_THREADS,OMP_NUM_THREADS,NPROC" \
212+
ai.backend.features="batch query uid-match user-input" \
213+
ai.backend.base-distro="ubuntu16.04" \
214+
ai.backend.accelerators="cuda" \
215+
ai.backend.resource.min.cpu="1" \
216+
ai.backend.resource.min.mem="1g" \
217+
ai.backend.resource.min.cuda.device=1 \
218+
ai.backend.resource.min.cuda.shares=0 \
219+
ai.backend.base-distro="ubuntu16.04" \
220+
ai.backend.runtime-type="python" \
221+
ai.backend.runtime-path="/opt/conda/bin/python3" \
222+
ai.backend.service-ports="ipython:pty:3000,jupyter:http:8091,jupyterlab:http:8090,vscode:http:8180,tensorboard:http:6006,mlflow-ui:preopen:5000,nniboard:preopen:8080"
223+
224+
WORKDIR /home/work

0 commit comments

Comments
 (0)