forked from microsoft/onnxruntime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Dockerfile.training
223 lines (199 loc) · 7.43 KB
/
Dockerfile.training
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# multi-stage arguments (repeat ARG NAME below)
ARG UCX_VERSION=1.8.0
ARG OPENMPI_VERSION=4.0.4
ARG CONDA_VERSION=4.7.10
ARG NUMPY_VERSION=1.18.5
ARG ONNX_VERSION=1.7.0
ARG PYTORCH_VERSION=1.6.0
ARG BUILD_CONFIG=Release
ARG OPENMPI_PATH=/opt/openmpi-${OPENMPI_VERSION}
ARG COMMIT=master
# cuda development image for building sources
FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04 as builder
# set location for builds
WORKDIR /stage
# install curl, git, ssh (required by MPI when running ORT tests)
RUN apt-get -y update &&\
apt-get -y --no-install-recommends install \
curl \
git \
language-pack-en \
openssh-client \
unattended-upgrades
# update existing packages to minimize security vulnerabilities
RUN unattended-upgrade
RUN locale-gen en_US.UTF-8 && \
update-locale LANG=en_US.UTF-8
# install miniconda (comes with python 3.7 default)
ARG CONDA_VERSION
ARG CONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-${CONDA_VERSION}-Linux-x86_64.sh
RUN cd /stage && curl -fSsL --insecure ${CONDA_URL} -o install-conda.sh &&\
/bin/bash ./install-conda.sh -b -p /opt/conda &&\
/opt/conda/bin/conda clean -ya
ENV PATH=/opt/conda/bin:${PATH}
# install cmake, setuptools, numpy, and onnx
ARG NUMPY_VERSION
ARG ONNX_VERSION
RUN conda install -y \
setuptools \
cmake \
numpy=${NUMPY_VERSION} &&\
pip install \
onnx=="${ONNX_VERSION}"
# install cerberus for the new pytorch front-end
RUN pip install cerberus
# build ucx suite
# note: openmpi will not select ucx without multithreading enabled
ARG UCX_VERSION
ARG UCX_TARNAME=ucx-$UCX_VERSION
ARG UCX_URL=https://github.com/openucx/ucx/releases/download/v${UCX_VERSION}/${UCX_TARNAME}.tar.gz
RUN apt-get -y update && apt-get -y --no-install-recommends install \
libibverbs-dev \
libnuma-dev &&\
cd /stage && curl -fSsL ${UCX_URL} | tar xzf - &&\
cd ${UCX_TARNAME} &&\
./configure \
--prefix=/opt/ucx \
--with-cuda=/usr/local/cuda \
--with-verbs=/usr/lib/x86_64-linux-gnu \
--enable-mt &&\
make -j"$(nproc)" &&\
make install
# build openmpi (use --prefix /opt/openmpi-xxx to move to runtime image)
# note: require --enable-orterun-prefix-by-default for Azure machine learning compute
# note: disable verbs as we use ucx middleware and don't want btl openib warnings
ARG OPENMPI_VERSION
ARG OPENMPI_PATH
ARG OPENMPI_TARNAME=openmpi-${OPENMPI_VERSION}
ARG OPENMPI_URL=https://download.open-mpi.org/release/open-mpi/v%OMPI_BASE%/${OPENMPI_TARNAME}.tar.gz
RUN export OMPI_BASE=${OPENMPI_VERSION%.*} &&\
cd /stage && curl -fSsL `echo ${OPENMPI_URL} | sed s/%OMPI_BASE%/$OMPI_BASE/` | tar xzf - &&\
cd ${OPENMPI_TARNAME} &&\
./configure \
--prefix=${OPENMPI_PATH} \
--with-ucx=/opt/ucx \
--without-verbs \
--with-cuda=/usr/local/cuda \
--enable-mpirun-prefix-by-default \
--enable-orterun-prefix-by-default \
--enable-mca-no-build=btl-uct &&\
make -j"$(nproc)" install &&\
ldconfig
ENV PATH=${OPENMPI_PATH}/bin:$PATH
ENV LD_LIBRARY_PATH=${OPENMPI_PATH}/lib:$LD_LIBRARY_PATH
# install mpi4py (be sure to link existing /opt/openmpi-xxx)
RUN CC=mpicc MPICC=mpicc pip install mpi4py --no-binary mpi4py
# install pytorch
ARG PYTORCH_VERSION
RUN pip install torch==${PYTORCH_VERSION}
# in case you need to build pytorch:
# note: if you want specific branch or to link system cuda libraries or MPI
# note: recommend using many high-frequency cores (e.g. 32+ skylake cores)
# ENV CUDA_HOME="/usr/local/cuda" \
# CUDNN_LIBRARY="/usr/lib/x86_64-linux-gnu" \
# NCCL_INCLUDE_DIR="/usr/include" \
# NCCL_LIB_DIR="/usr/lib/x86_64-linux-gnu" \
# USE_SYSTEM_NCCL=1
# RUN conda install -y \
# mkl \
# mkl-include \
# ninja \
# pyyaml \
# cffi &&\
# cd /stage && git clone https://github.com/pytorch/pytorch.git &&\
# cd pytorch &&\
# git checkout v1.6.0 &&\
# git submodule update --init --recursive &&\
# python setup.py bdist_wheel -d build/wheel &&\
# pip install build/wheel/*.whl
# build onnxruntime wheel with cuda and mpi support
ARG BUILD_CONFIG
ARG COMMIT
RUN cd /stage && git clone https://github.com/microsoft/onnxruntime.git &&\
cd onnxruntime &&\
git checkout ${COMMIT} &&\
cp ThirdPartyNotices.txt /stage/ThirdPartyNotices.txt &&\
cp dockerfiles/LICENSE-IMAGE.txt /stage/LICENSE-IMAGE.txt &&\
python tools/ci_build/build.py \
--cmake_extra_defines \
ONNXRUNTIME_VERSION=`cat ./VERSION_NUMBER` \
--config ${BUILD_CONFIG} \
--enable_training \
--mpi_home ${OPENMPI_PATH} \
--use_cuda \
--cuda_home /usr/local/cuda \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--nccl_home /usr/lib/x86_64-linux-gnu/ \
--update \
--parallel \
--build_dir build \
--build \
--build_wheel \
--skip_tests &&\
pip install build/${BUILD_CONFIG}/dist/*.whl
# Install AzureML support and commonly used packages.
RUN pip install azureml-defaults transformers==2.11.0 msgpack==1.0.0 tensorboardX==1.8 tensorboard==2.3.0
# switch to cuda runtime environment
# note: launch with --gpus all or nvidia-docker
FROM nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04
WORKDIR /stage
# install ucx
# note: launch with --cap-add=sys_nice to avoid 'mbind' warnings
COPY --from=builder /opt/ucx /opt/ucx
ENV PATH=/opt/ucx/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/ucx/lib:$LD_LIBRARY_PATH
# install openmpi
# note: permit mpirun as root for Azure cluster submissions
# note: enforce openmpi select ucx or fail
ARG OPENMPI_VERSION
ARG OPENMPI_PATH
COPY --from=builder ${OPENMPI_PATH} ${OPENMPI_PATH}
ENV PATH=${OPENMPI_PATH}/bin:$PATH
ENV LD_LIBRARY_PATH=${OPENMPI_PATH}/lib:$LD_LIBRARY_PATH
ENV OMPI_ALLOW_RUN_AS_ROOT=1
ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1
ENV OMPI_MCA_pml=ucx
RUN apt-get -y update && apt-get -y --no-install-recommends install \
openssh-server \
openssh-client \
libibverbs-dev \
libnuma-dev &&\
ldconfig
# copy conda environment (includes numpy, mpi4py, pytorch, onnxruntime)
COPY --from=builder /opt/conda /opt/conda
ENV PATH=/opt/conda/bin:${PATH}
# make ssh/sshd less strict for wiring containers on Azure VM scale set
# note: use 'service ssh start' to launch sshd (will fail if 22 in use)
# note: can also set port != 22 and set port=X in MPI hosts file
# note: need to setup password free ssh login between MPI hosts
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' \
/etc/ssh/sshd_config &&\
sed -i 's/#StrictModes yes/StrictModes no/g' \
/etc/ssh/sshd_config &&\
sed -i 's/# StrictHostKeyChecking ask/ StrictHostKeyChecking no/g' \
/etc/ssh/ssh_config &&\
mkdir /run/sshd &&\
chmod u=rwx,go=rx /run/sshd
# export versions
ARG UCX_VERSION
ARG OPENMPI_VERSION
ARG CONDA_VERSION
ARG NUMPY_VERSION
ARG ONNX_VERSION
ARG PYTORCH_VERSION
LABEL UCX_VERSION=${UCX_VERSION}
LABEL OPENMPI_VERSION=${OPENMPI_VERSION}
LABEL CONDA_VERSION=${CONDA_VERSION}
LABEL NUMPY_VERSION=${NUMPY_VERSION}
LABEL ONNX_VERSION=${ONNX_VERSION}
LABEL PYTORCH_VERSION=${PYTORCH_VERSION}
# clean\finalize environment
# note: adds onnxruntime license and third party notices
RUN conda remove -y cmake &&\
apt-get purge -y build-essential &&\
apt-get autoremove -y &&\
rm -fr /stage
WORKDIR /workspace
COPY --from=builder /stage/*.txt /workspace/