Skip to content

Commit 9216da3

Browse files
author
wanghaox
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop
2 parents ca988bd + 9db4d01 commit 9216da3

File tree

422 files changed

+10156
-7182
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

422 files changed

+10156
-7182
lines changed

.gitignore

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,10 @@ third_party/
2121
cmake-build-*
2222

2323
# generated while compiling
24-
python/paddle/v2/framework/core.so
24+
python/paddle/v2/fluid/core.so
2525
paddle/pybind/pybind.h
2626
CMakeFiles
2727
cmake_install.cmake
2828
paddle/.timestamp
2929
python/paddlepaddle.egg-info/
3030
paddle/pybind/pybind.h
31-
python/paddle/v2/framework/tests/tmp/*

CMakeLists.txt

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,7 @@ include(simd)
3636
################################ Configurations #######################################
3737
option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND})
3838
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
39-
option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND})
40-
option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND})
39+
option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND})
4140
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
4241
option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON)
4342
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
@@ -82,10 +81,8 @@ if(ANDROID OR IOS)
8281
"Disable PYTHON when cross-compiling for Android and iOS" FORCE)
8382
set(WITH_RDMA OFF CACHE STRING
8483
"Disable RDMA when cross-compiling for Android and iOS" FORCE)
85-
set(WITH_MKLDNN OFF CACHE STRING
86-
"Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
87-
set(WITH_MKLML OFF CACHE STRING
88-
"Disable MKLML package when cross-compiling for Android and iOS" FORCE)
84+
set(WITH_MKL OFF CACHE STRING
85+
"Disable MKL when cross-compiling for Android and iOS" FORCE)
8986

9087
# Compile PaddlePaddle mobile inference library
9188
if (NOT WITH_C_API)
@@ -111,6 +108,14 @@ else()
111108
set(THIRD_PARTY_BUILD_TYPE Release)
112109
endif()
113110

111+
set(WITH_MKLML ${WITH_MKL})
112+
if (WITH_MKL AND AVX2_FOUND)
113+
set(WITH_MKLDNN ON)
114+
else()
115+
message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN")
116+
set(WITH_MKLDNN OFF)
117+
endif()
118+
114119
########################################################################################
115120

116121
include(external/mklml) # download mklml package
@@ -158,14 +163,15 @@ set(EXTERNAL_LIBS
158163
)
159164

160165
if(WITH_GPU)
161-
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
162-
if(NOT WITH_DSO)
163-
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
164-
endif(NOT WITH_DSO)
166+
include(cuda)
165167
endif(WITH_GPU)
166168

169+
if(WITH_MKLML)
170+
list(APPEND EXTERNAL_LIBS ${MKLML_IOMP_LIB})
171+
endif()
172+
167173
if(WITH_MKLDNN)
168-
list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB} ${MKLDNN_IOMP_LIB})
174+
list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB})
169175
endif()
170176

171177
if(USE_NNPACK)

benchmark/paddle/image/googlenet.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
width = 224
66
num_class = 1000
77
batch_size = get_config_arg('batch_size', int, 128)
8+
use_gpu = get_config_arg('use_gpu', bool, True)
89

910
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
1011
define_py_data_sources2(
@@ -16,6 +17,8 @@
1617
learning_method=MomentumOptimizer(0.9),
1718
regularization=L2Regularization(0.0005 * batch_size))
1819

20+
conv_projection = conv_projection if use_gpu else img_conv_layer
21+
1922
def inception2(name, input, channels, \
2023
filter1,
2124
filter3R, filter3,
@@ -138,7 +141,7 @@ def inception(name, input, channels, \
138141
cat = concat_layer(
139142
name=name,
140143
input=[cov1, cov3, cov5, covprj],
141-
bias_attr=True,
144+
bias_attr=True if use_gpu else False,
142145
act=ReluActivation())
143146
return cat
144147

benchmark/paddle/image/run_mkldnn.sh

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
set -e
22

33
function train() {
4-
unset OMP_NUM_THREADS MKL_NUM_THREADS
5-
export OMP_DYNAMIC="FALSE"
6-
export KMP_AFFINITY="granularity=fine,compact,0,0"
4+
unset OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY
75
topology=$1
86
layer_num=$2
97
bs=$3
@@ -14,8 +12,6 @@ function train() {
1412
elif [ $4 == "False" ]; then
1513
thread=`nproc`
1614
# each trainer_count use only 1 core to avoid conflict
17-
export OMP_NUM_THREADS=1
18-
export MKL_NUM_THREADS=1
1915
log="logs/${topology}-${layer_num}-${thread}mklml-${bs}.log"
2016
else
2117
echo "Wrong input $3, use True or False."
@@ -44,6 +40,7 @@ fi
4440
for use_mkldnn in True False; do
4541
for batchsize in 64 128 256; do
4642
train vgg 19 $batchsize $use_mkldnn
47-
train resnet 50 $batchsize $use_mkldnn
43+
train resnet 50 $batchsize $use_mkldnn
44+
train googlenet v1 $batchsize $use_mkldnn
4845
done
4946
done

cmake/configure.cmake

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -76,27 +76,14 @@ else()
7676
include_directories(${CUDA_TOOLKIT_INCLUDE})
7777
endif(NOT WITH_GPU)
7878

79-
if(WITH_MKLDNN)
80-
add_definitions(-DPADDLE_USE_MKLDNN)
81-
if (WITH_MKLML AND MKLDNN_IOMP_DIR)
82-
message(STATUS "Enable Intel OpenMP at ${MKLDNN_IOMP_DIR}")
83-
set(OPENMP_FLAGS "-fopenmp")
84-
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
85-
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
86-
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
87-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
88-
else()
89-
find_package(OpenMP)
90-
if(OPENMP_FOUND)
91-
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
92-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
93-
else()
94-
message(WARNING "Can not find OpenMP."
95-
"Some performance features in MKLDNN may not be available")
96-
endif()
97-
endif()
98-
99-
endif(WITH_MKLDNN)
79+
if (WITH_MKLML AND MKLML_IOMP_LIB)
80+
message(STATUS "Enable Intel OpenMP with ${MKLML_IOMP_LIB}")
81+
set(OPENMP_FLAGS "-fopenmp")
82+
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
83+
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
84+
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
85+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
86+
endif()
10087

10188
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}")
10289
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}")

cmake/cross_compiling/ios.cmake

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,9 @@ set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform")
7676
# Set the architecture for iOS
7777
if(NOT DEFINED IOS_ARCH)
7878
if(IOS_PLATFORM STREQUAL "OS")
79-
# FIXME(liuyiqun): support "armv7;armv7s;arm64" future
80-
set(IOS_ARCH "arm64")
79+
set(IOS_ARCH "armv7;armv7s;arm64")
8180
elseif(IOS_PLATFORM STREQUAL "SIMULATOR")
82-
# FIXME(liuyiqun): support "i386;x86_64" future
83-
set(IOS_ARCH "x86_64")
81+
set(IOS_ARCH "i386;x86_64")
8482
endif()
8583
endif()
8684
set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS")
@@ -248,7 +246,7 @@ set(IOS_COMPILER_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${XCODE_IOS_BITCODE_
248246

249247
# Hidden visibilty is required for cxx on iOS
250248
set(CMAKE_C_FLAGS "${IOS_COMPILER_FLAGS} ${CMAKE_C_FLAGS}" CACHE STRING "C flags")
251-
set(CMAKE_CXX_FLAGS "${IOS_COMPILER_FLAGS} -fvisibility-inlines-hidden ${CMAKE_CXX_FLAGS}" CACHE STRING "CXX flags")
249+
set(CMAKE_CXX_FLAGS "${IOS_COMPILER_FLAGS} -fvisibility=hidden -fvisibility-inlines-hidden ${CMAKE_CXX_FLAGS}" CACHE STRING "CXX flags")
252250

253251
set(IOS_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first")
254252

cmake/cuda.cmake

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
if(NOT WITH_GPU)
2+
return()
3+
endif()
4+
5+
set(paddle_known_gpu_archs "30 35 50 52 60 61 70")
6+
set(paddle_known_gpu_archs7 "30 35 50 52")
7+
set(paddle_known_gpu_archs8 "30 35 50 52 60 61")
8+
9+
######################################################################################
10+
# A function for automatic detection of GPUs installed (if autodetection is enabled)
11+
# Usage:
12+
# detect_installed_gpus(out_variable)
13+
function(detect_installed_gpus out_variable)
14+
if(NOT CUDA_gpu_detect_output)
15+
set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
16+
17+
file(WRITE ${cufile} ""
18+
"#include <cstdio>\n"
19+
"int main() {\n"
20+
" int count = 0;\n"
21+
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
22+
" if (count == 0) return -1;\n"
23+
" for (int device = 0; device < count; ++device) {\n"
24+
" cudaDeviceProp prop;\n"
25+
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
26+
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
27+
" }\n"
28+
" return 0;\n"
29+
"}\n")
30+
31+
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "-ccbin=${CUDA_HOST_COMPILER}"
32+
"--run" "${cufile}"
33+
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
34+
RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
35+
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
36+
37+
if(nvcc_res EQUAL 0)
38+
# only keep the last line of nvcc_out
39+
STRING(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
40+
STRING(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
41+
list(GET nvcc_out -1 nvcc_out)
42+
string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
43+
set(CUDA_gpu_detect_output ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_installed_gpus tool" FORCE)
44+
endif()
45+
endif()
46+
47+
if(NOT CUDA_gpu_detect_output)
48+
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
49+
set(${out_variable} ${paddle_known_gpu_archs} PARENT_SCOPE)
50+
else()
51+
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
52+
endif()
53+
endfunction()
54+
55+
56+
########################################################################
57+
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
58+
# Usage:
59+
# select_nvcc_arch_flags(out_variable)
60+
function(select_nvcc_arch_flags out_variable)
61+
# List of arch names
62+
set(archs_names "Kepler" "Maxwell" "Pascal" "All" "Manual")
63+
set(archs_name_default "All")
64+
if(NOT CMAKE_CROSSCOMPILING)
65+
list(APPEND archs_names "Auto")
66+
endif()
67+
68+
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
69+
set(CUDA_ARCH_NAME ${archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
70+
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names} )
71+
mark_as_advanced(CUDA_ARCH_NAME)
72+
73+
# verify CUDA_ARCH_NAME value
74+
if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
75+
string(REPLACE ";" ", " archs_names "${archs_names}")
76+
message(FATAL_ERROR "Only ${archs_names} architeture names are supported.")
77+
endif()
78+
79+
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
80+
set(CUDA_ARCH_BIN ${paddle_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
81+
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
82+
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
83+
else()
84+
unset(CUDA_ARCH_BIN CACHE)
85+
unset(CUDA_ARCH_PTX CACHE)
86+
endif()
87+
88+
if(${CUDA_ARCH_NAME} STREQUAL "Kepler")
89+
set(cuda_arch_bin "30 35")
90+
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
91+
set(cuda_arch_bin "50")
92+
elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
93+
set(cuda_arch_bin "60 61")
94+
elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
95+
set(cuda_arch_bin "70")
96+
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
97+
set(cuda_arch_bin ${paddle_known_gpu_archs})
98+
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
99+
detect_installed_gpus(cuda_arch_bin)
100+
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
101+
set(cuda_arch_bin ${CUDA_ARCH_BIN})
102+
endif()
103+
104+
# remove dots and convert to lists
105+
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
106+
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${CUDA_ARCH_PTX}")
107+
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
108+
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
109+
list(REMOVE_DUPLICATES cuda_arch_bin)
110+
list(REMOVE_DUPLICATES cuda_arch_ptx)
111+
112+
set(nvcc_flags "")
113+
set(nvcc_archs_readable "")
114+
115+
# Tell NVCC to add binaries for the specified GPUs
116+
foreach(arch ${cuda_arch_bin})
117+
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
118+
# User explicitly specified PTX for the concrete BIN
119+
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
120+
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
121+
else()
122+
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
123+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
124+
list(APPEND nvcc_archs_readable sm_${arch})
125+
endif()
126+
endforeach()
127+
128+
# Tell NVCC to add PTX intermediate code for the specified architectures
129+
foreach(arch ${cuda_arch_ptx})
130+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
131+
list(APPEND nvcc_archs_readable compute_${arch})
132+
endforeach()
133+
134+
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
135+
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
136+
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
137+
endfunction()
138+
139+
message(STATUS "CUDA detected: " ${CUDA_VERSION})
140+
if (${CUDA_VERSION} LESS 7.0)
141+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs})
142+
elseif (${CUDA_VERSION} LESS 8.0) # CUDA 7.x
143+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs7})
144+
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
145+
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
146+
elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
147+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs8})
148+
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
149+
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
150+
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
151+
# warning for now.
152+
list(APPEND CUDA_NVCC_FLAGS "-Wno-deprecated-gpu-targets")
153+
endif()
154+
155+
include_directories(${CUDA_INCLUDE_DIRS})
156+
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
157+
if(NOT WITH_DSO)
158+
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
159+
endif(NOT WITH_DSO)
160+
161+
# setting nvcc arch flags
162+
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
163+
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
164+
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
165+
166+
# Set C++11 support
167+
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
168+
169+
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
170+
# So, don't set these flags here.
171+
list(APPEND CUDA_NVCC_FLAGS "-std=c++11")
172+
list(APPEND CUDA_NVCC_FLAGS "--use_fast_math")
173+
list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
174+
# Set :expt-relaxed-constexpr to suppress Eigen warnings
175+
list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr")
176+
177+
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
178+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
179+
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
180+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
181+
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
182+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
183+
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
184+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
185+
endif()
186+
187+
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
188+
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)

0 commit comments

Comments
 (0)