Skip to content

Commit 1eeb2e0

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into feature/add_reduce_op_handle
2 parents e4de957 + 6f83142 commit 1eeb2e0

File tree

82 files changed

+2213
-421
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

82 files changed

+2213
-421
lines changed

CMakeLists.txt

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F
3939
option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF)
4040
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
4141
option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND})
42-
option(WITH_TENSORRT "Compile PaddlePaddle with TensorRT support." OFF)
4342
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
4443
option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF)
4544
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
@@ -180,13 +179,9 @@ set(EXTERNAL_LIBS
180179

181180
if(WITH_GPU)
182181
include(cuda)
182+
include(tensorrt)
183183
endif(WITH_GPU)
184184

185-
# TensorRT depends on GPU.
186-
if (NOT WITH_GPU)
187-
set(WITH_TENSORRT OFF)
188-
endif()
189-
190185
if(WITH_AMD_GPU)
191186
find_package(HIP)
192187
include(hip)

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
4646
RUN curl -s -q https://glide.sh/get | sh
4747

4848
# Install TensorRT
49-
# The unnecessary files has been removed to make the library small.
49+
# The unnecessary files has been removed to make the library small. It only contains include and lib now.
5050
RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
5151
tar -xz -C /usr/local && \
5252
cp -rf /usr/local/TensorRT/include /usr && \

Dockerfile.android

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ RUN git config --global credential.helper store
2727
# Fix locales to en_US.UTF-8
2828
RUN localedef -i en_US -f UTF-8 en_US.UTF-8
2929

30-
RUN pip install --upgrade pip && \
30+
RUN pip install --upgrade pip==9.0.3 && \
3131
pip install -U 'protobuf==3.1.0' && \
3232
pip install -U wheel sphinx && \
3333
pip install pre-commit
File renamed without changes.

cmake/configure.cmake

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,16 @@ if(WITH_GPU)
8080
# Include cuda and cudnn
8181
include_directories(${CUDNN_INCLUDE_DIR})
8282
include_directories(${CUDA_TOOLKIT_INCLUDE})
83+
84+
if(TENSORRT_FOUND)
85+
if(${CUDA_VERSION_MAJOR} VERSION_LESS 8)
86+
message(FATAL_ERROR "TensorRT needs CUDA >= 8.0 to compile")
87+
endif()
88+
if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
89+
message(FATAL_ERROR "TensorRT needs CUDNN >= 7.0 to compile")
90+
endif()
91+
include_directories(${TENSORRT_INCLUDE_DIR})
92+
endif()
8393
elseif(WITH_AMD_GPU)
8494
add_definitions(-DPADDLE_WITH_HIP)
8595
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__")

cmake/tensorrt.cmake

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
if(NOT WITH_GPU)
2+
return()
3+
endif()
4+
5+
set(TENSORRT_ROOT "/usr" CACHE PATH "TENSORRT ROOT")
6+
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
7+
PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/include
8+
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include
9+
NO_DEFAULT_PATH
10+
)
11+
12+
find_library(TENSORRT_LIBRARY NAMES libnvinfer.so libnvinfer.a
13+
PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/lib
14+
$ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib
15+
NO_DEFAULT_PATH
16+
DOC "Path to TensorRT library.")
17+
18+
if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY)
19+
set(TENSORRT_FOUND ON)
20+
else()
21+
set(TENSORRT_FOUND OFF)
22+
endif()
23+
24+
if(TENSORRT_FOUND)
25+
file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
26+
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
27+
"${TENSORRT_VERSION_FILE_CONTENTS}")
28+
string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
29+
TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
30+
31+
message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
32+
"Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
33+
endif()

doc/CMakeLists.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@ add_custom_target(paddle_apis ALL
33

44
add_custom_target(paddle_docs ALL
55
DEPENDS paddle_v2_docs paddle_v2_docs_cn
6-
paddle_fluid_docs paddle_fluid_docs_cn)
6+
paddle_fluid_docs paddle_fluid_docs_cn
7+
paddle_mobile_docs paddle_mobile_docs_cn)
78

89
add_subdirectory(v2)
910
add_subdirectory(fluid)
11+
add_subdirectory(mobile)

doc/fluid/api/initializer.rst

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,3 +33,45 @@ Xavier
3333
:members:
3434
:noindex:
3535

36+
MSRA
37+
------
38+
39+
.. autoclass:: paddle.fluid.initializer.MSRA
40+
:members:
41+
:noindex:
42+
43+
ConstantInitializer
44+
-------------------
45+
46+
.. autoclass:: paddle.fluid.initializer.ConstantInitializer
47+
:members:
48+
:noindex:
49+
50+
UniformInitializer
51+
------------------
52+
53+
.. autoclass:: paddle.fluid.initializer.UniformInitializer
54+
:members:
55+
:noindex:
56+
57+
NormalInitializer
58+
-----------------
59+
60+
.. autoclass:: paddle.fluid.initializer.NormalInitializer
61+
:members:
62+
:noindex:
63+
64+
XavierInitializer
65+
-----------------
66+
67+
.. autoclass:: paddle.fluid.initializer.XavierInitializer
68+
:members:
69+
:noindex:
70+
MSRA
71+
------
72+
73+
MSRAInitializer
74+
-----------------
75+
.. autoclass:: paddle.fluid.initializer.MSRAInitializer
76+
:members:
77+
:noindex:

doc/fluid/design/concepts/parallel_executor.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ Running an operator can be asynchronized. There is a thread pool to execute an `
8484
8585
## Synchronize GPU Kernels
8686
87-
The GPU is a non-blocking device. The different streams need be synchronized when switing streams. In current implementation, the synchronization based on the following algorithm:
87+
The GPU is a non-blocking device. The different streams need be synchronized when switching streams. In current implementation, the synchronization based on the following algorithm:
8888
8989
1. `OpHandle` will record `DeviceContext` that it is used.
9090
2. In `OpHandle::Run`, if the `DeviceContext` of current operator is different from `DeviceContext` of any input variable, just wait the generate operator of this input variable.

doc/fluid/design/dist_train/README.md

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
## Distributed training overview doc
2+
3+
Currently Paddle Fluid use parameter server architecture to support distributed training.
4+
5+
For synchronous and asynchronous training, the differences are mostly in the logic of parameter server. Now we have already support synchronous training.
6+
7+
### Synchronous training
8+
9+
The training process of synchronous training is:
10+
11+
![synchronous distributed training](./src/sync_distributed_training.png)
12+
13+
1. Pserver
14+
1. set `barrier_condition_` to 0 and waits for trainers to send gradient.
15+
1. Trainer
16+
1. Trainer read minibatch of data, run forward-backward with local parameter copy and get the gradients for parameters.
17+
1. Trainer use split op to split all the gradient into blocks. The split method is determined at compile time.
18+
1. Trainer use send_op to send all the split gradients to corresponding parameter server.
19+
1. After trainer send all the gradients, it will send a `BATCH_BARRIER_MESSAGE` to all pservers.
20+
1. Trainer call GetVariable to pserver and wait for `barrier_condition_` on pserver to be 1.
21+
1. Pserver
22+
1. Pserver will count the number of `BATCH_BARRIER_MESSAGE`.
23+
1. When the count of `BATCH_BARRIER_MESSAGE` is equal to the number of Trainer. Pserver thinks it received all gradient from all trainers.
24+
1. Pserver will run the optimization block to optimize the parameters.
25+
1. After optimization, pserver set `barrier_condition_` to 1.
26+
1. Pserver wait for `FETCH_BARRIER_MESSAGE`.
27+
1. Trainer.
28+
1. The trainer uses GetVariable to get all the parameters from pserver.
29+
1. Trainer sends a `FETCH_BARRIER_MESSAGE` to each pserver.
30+
1. Pserver.
31+
1. when the number of `FETCH_BARRIER_MESSAGE` reach the number of all trainers. Pserver think all the parameters have been got. it will go back to 1. to set `barrier_condition_` to 0.
32+
33+
### Asynchronous training
34+
In the above process. There are two barriers for all trainers to synchronize with each other. In asynchronous training, these two barriers are not needed. The trainer can just send gradients to pserver and then get parameters back.
35+
36+
The training process of asynchronous training can be:
37+
38+
![asynchronous distributed training](./src/async_distributed_training.png)
39+
40+
1. Pserver:
41+
1. Each parameter has a queue to receive its gradient from trainers.
42+
1. Each parameter has a thread to read data from the queue and run optimize block, using the gradient to optimize the parameter.
43+
1. Using an independent thread to handle RPC call `GetVariable` for trainers to get parameters back.(Maybe here we should use a thread pool to speed up fetching the parameters.)
44+
45+
1. Trainer:
46+
1. Trainer read a batch of data. Run forward and backward with local parameter copy and get the gradients for parameters.
47+
1. Trainer split all gradients to blocks and then send these gradient blocks to pservers(pserver will put them into the queue).
48+
2. Trainer gets all parameters back from pserver.
49+
50+
### Note:
51+
There are also some conditions that need to consider. For exmaple:
52+
53+
1. If trainer needs to wait for the pserver to apply it's gradient and then get back the parameters back.
54+
1. If we need a lock between parameter update and parameter fetch.
55+
1. If one parameter must be on one server, or it can also be split and send to multiple parameter servers.
56+
57+
The above architecture of asynchronous training can support different mode, we can have a detailed test in the future for these problems.

0 commit comments

Comments
 (0)