Skip to content

Commit bc3ec53

Browse files
committed
remove conflict
2 parents 6a1c223 + 134eaf2 commit bc3ec53

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+2425
-393
lines changed

CMakeLists.txt

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -108,14 +108,11 @@ else()
108108
set(THIRD_PARTY_BUILD_TYPE Release)
109109
endif()
110110

111-
if(WITH_MKL)
112-
set(WITH_MKLML ON)
113-
set(WITH_MKLDNN ${AVX2_FOUND})
114-
if(NOT WITH_MKLDNN)
115-
message(WARNING "Do not have AVX2 intrinsics and disabled MKL-DNN")
116-
endif()
111+
set(WITH_MKLML ${WITH_MKL})
112+
if (WITH_MKL AND AVX2_FOUND)
113+
set(WITH_MKLDNN ON)
117114
else()
118-
set(WITH_MKLML OFF)
115+
message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN")
119116
set(WITH_MKLDNN OFF)
120117
endif()
121118

@@ -166,10 +163,7 @@ set(EXTERNAL_LIBS
166163
)
167164

168165
if(WITH_GPU)
169-
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
170-
if(NOT WITH_DSO)
171-
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
172-
endif(NOT WITH_DSO)
166+
include(cuda)
173167
endif(WITH_GPU)
174168

175169
if(WITH_MKLML)

cmake/cuda.cmake

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
if(NOT WITH_GPU)
2+
return()
3+
endif()
4+
5+
set(paddle_known_gpu_archs "30 35 50 52 60 61 70")
6+
set(paddle_known_gpu_archs7 "30 35 50 52")
7+
set(paddle_known_gpu_archs8 "30 35 50 52 60 61")
8+
9+
######################################################################################
10+
# A function for automatic detection of GPUs installed (if autodetection is enabled)
11+
# Usage:
12+
# detect_installed_gpus(out_variable)
13+
function(detect_installed_gpus out_variable)
14+
if(NOT CUDA_gpu_detect_output)
15+
set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
16+
17+
file(WRITE ${cufile} ""
18+
"#include <cstdio>\n"
19+
"int main() {\n"
20+
" int count = 0;\n"
21+
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
22+
" if (count == 0) return -1;\n"
23+
" for (int device = 0; device < count; ++device) {\n"
24+
" cudaDeviceProp prop;\n"
25+
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
26+
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
27+
" }\n"
28+
" return 0;\n"
29+
"}\n")
30+
31+
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "-ccbin=${CUDA_HOST_COMPILER}"
32+
"--run" "${cufile}"
33+
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
34+
RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
35+
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
36+
37+
if(nvcc_res EQUAL 0)
38+
# only keep the last line of nvcc_out
39+
STRING(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
40+
STRING(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
41+
list(GET nvcc_out -1 nvcc_out)
42+
string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
43+
set(CUDA_gpu_detect_output ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_installed_gpus tool" FORCE)
44+
endif()
45+
endif()
46+
47+
if(NOT CUDA_gpu_detect_output)
48+
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
49+
set(${out_variable} ${paddle_known_gpu_archs} PARENT_SCOPE)
50+
else()
51+
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
52+
endif()
53+
endfunction()
54+
55+
56+
########################################################################
57+
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
58+
# Usage:
59+
# select_nvcc_arch_flags(out_variable)
60+
function(select_nvcc_arch_flags out_variable)
61+
# List of arch names
62+
set(archs_names "Kepler" "Maxwell" "Pascal" "All" "Manual")
63+
set(archs_name_default "All")
64+
if(NOT CMAKE_CROSSCOMPILING)
65+
list(APPEND archs_names "Auto")
66+
endif()
67+
68+
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
69+
set(CUDA_ARCH_NAME ${archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
70+
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names} )
71+
mark_as_advanced(CUDA_ARCH_NAME)
72+
73+
# verify CUDA_ARCH_NAME value
74+
if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
75+
string(REPLACE ";" ", " archs_names "${archs_names}")
76+
message(FATAL_ERROR "Only ${archs_names} architeture names are supported.")
77+
endif()
78+
79+
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
80+
set(CUDA_ARCH_BIN ${paddle_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
81+
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
82+
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
83+
else()
84+
unset(CUDA_ARCH_BIN CACHE)
85+
unset(CUDA_ARCH_PTX CACHE)
86+
endif()
87+
88+
if(${CUDA_ARCH_NAME} STREQUAL "Kepler")
89+
set(cuda_arch_bin "30 35")
90+
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
91+
set(cuda_arch_bin "50")
92+
elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
93+
set(cuda_arch_bin "60 61")
94+
elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
95+
set(cuda_arch_bin "70")
96+
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
97+
set(cuda_arch_bin ${paddle_known_gpu_archs})
98+
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
99+
detect_installed_gpus(cuda_arch_bin)
100+
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
101+
set(cuda_arch_bin ${CUDA_ARCH_BIN})
102+
endif()
103+
104+
# remove dots and convert to lists
105+
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
106+
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${CUDA_ARCH_PTX}")
107+
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
108+
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
109+
list(REMOVE_DUPLICATES cuda_arch_bin)
110+
list(REMOVE_DUPLICATES cuda_arch_ptx)
111+
112+
set(nvcc_flags "")
113+
set(nvcc_archs_readable "")
114+
115+
# Tell NVCC to add binaries for the specified GPUs
116+
foreach(arch ${cuda_arch_bin})
117+
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
118+
# User explicitly specified PTX for the concrete BIN
119+
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
120+
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
121+
else()
122+
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
123+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
124+
list(APPEND nvcc_archs_readable sm_${arch})
125+
endif()
126+
endforeach()
127+
128+
# Tell NVCC to add PTX intermediate code for the specified architectures
129+
foreach(arch ${cuda_arch_ptx})
130+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
131+
list(APPEND nvcc_archs_readable compute_${arch})
132+
endforeach()
133+
134+
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
135+
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
136+
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
137+
endfunction()
138+
139+
message(STATUS "CUDA detected: " ${CUDA_VERSION})
140+
if (${CUDA_VERSION} LESS 7.0)
141+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs})
142+
elseif (${CUDA_VERSION} LESS 8.0) # CUDA 7.x
143+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs7})
144+
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
145+
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
146+
elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
147+
set(paddle_known_gpu_archs ${paddle_known_gpu_archs8})
148+
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
149+
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
150+
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
151+
# warning for now.
152+
list(APPEND CUDA_NVCC_FLAGS "-Wno-deprecated-gpu-targets")
153+
endif()
154+
155+
include_directories(${CUDA_INCLUDE_DIRS})
156+
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
157+
if(NOT WITH_DSO)
158+
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
159+
endif(NOT WITH_DSO)
160+
161+
# setting nvcc arch flags
162+
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
163+
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
164+
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
165+
166+
# Set C++11 support
167+
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
168+
169+
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
170+
# So, don't set these flags here.
171+
list(APPEND CUDA_NVCC_FLAGS "-std=c++11")
172+
list(APPEND CUDA_NVCC_FLAGS "--use_fast_math")
173+
list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
174+
# Set :expt-relaxed-constexpr to suppress Eigen warnings
175+
list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr")
176+
177+
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
178+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
179+
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
180+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
181+
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
182+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
183+
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
184+
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
185+
endif()
186+
187+
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
188+
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)

cmake/flags.cmake

Lines changed: 0 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -149,58 +149,3 @@ endforeach()
149149
foreach(flag ${GPU_COMMON_FLAGS})
150150
safe_set_nvflag(${flag})
151151
endforeach()
152-
153-
154-
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
155-
156-
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
157-
# So, don't set these flags here.
158-
LIST(APPEND CUDA_NVCC_FLAGS -std=c++11)
159-
LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math)
160-
161-
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
162-
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
163-
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
164-
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
165-
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
166-
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
167-
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
168-
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
169-
endif()
170-
171-
function(specify_cuda_arch cuda_version cuda_arch)
172-
if(${cuda_version} VERSION_GREATER "8.0")
173-
foreach(capability 61 62)
174-
if(${cuda_arch} STREQUAL ${capability})
175-
list(APPEND __arch_flags " -gencode arch=compute_${cuda_arch},code=sm_${cuda_arch}")
176-
endif()
177-
endforeach()
178-
elseif(${cuda_version} VERSION_GREATER "7.0" and ${cuda_arch} STREQUAL "53")
179-
list(APPEND __arch_flags " -gencode arch=compute_${cuda_arch},code=sm_${cuda_arch}")
180-
endif()
181-
endfunction()
182-
183-
# Common gpu architectures: Kepler, Maxwell
184-
foreach(capability 30 35 50)
185-
list(APPEND __arch_flags " -gencode arch=compute_${capability},code=sm_${capability}")
186-
endforeach()
187-
188-
if (CUDA_VERSION VERSION_GREATER "7.0" OR CUDA_VERSION VERSION_EQUAL "7.0")
189-
list(APPEND __arch_flags " -gencode arch=compute_52,code=sm_52")
190-
endif()
191-
192-
# Modern gpu architectures: Pascal
193-
if (CUDA_VERSION VERSION_GREATER "8.0" OR CUDA_VERSION VERSION_EQUAL "8.0")
194-
list(APPEND __arch_flags " -gencode arch=compute_60,code=sm_60")
195-
list(APPEND CUDA_NVCC_FLAGS --expt-relaxed-constexpr)
196-
endif()
197-
198-
# Custom gpu architecture
199-
set(CUDA_ARCH)
200-
201-
if(CUDA_ARCH)
202-
specify_cuda_arch(${CUDA_VERSION} ${CUDA_ARCH})
203-
endif()
204-
205-
set(CUDA_NVCC_FLAGS ${__arch_flags} ${CUDA_NVCC_FLAGS})
206-

doc/api/v2/config/layer.rst

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,16 @@ bilinear_interp
335335
.. autoclass:: paddle.v2.layer.bilinear_interp
336336
:noindex:
337337

338+
dot_prod
339+
---------
340+
.. autoclass:: paddle.v2.layer.dot_prod
341+
:noindex:
342+
343+
out_prod
344+
--------
345+
.. autoclass:: paddle.v2.layer.out_prod
346+
:noindex:
347+
338348
power
339349
-----
340350
.. autoclass:: paddle.v2.layer.power
@@ -372,6 +382,11 @@ cos_sim
372382
.. autoclass:: paddle.v2.layer.cos_sim
373383
:noindex:
374384

385+
l2_distance
386+
-----------
387+
.. autoclass:: paddle.v2.layer.l2_distance
388+
:noindex:
389+
375390
trans
376391
-----
377392
.. autoclass:: paddle.v2.layer.trans

paddle/framework/backward.cc

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -513,19 +513,14 @@ ParamGradInfoMap AppendBackward(
513513
const int root_block_idx = 0;
514514
auto root_block = program_desc.MutableBlock(root_block_idx);
515515

516-
// insert fill one op for target
517-
// TODO(qiao) add some check to the target.
518516
std::string fill_one_op_out = GradVarName(target.Name());
519-
std::vector<int64_t> target_shape_desc = target.Shape();
520-
std::vector<int> target_shape;
521-
std::transform(target_shape_desc.begin(), target_shape_desc.end(),
522-
std::back_inserter(target_shape),
523-
[](int64_t dim) { return static_cast<int>(dim); });
517+
bool is_scalar = target.Shape() == std::vector<int64_t>{1};
518+
PADDLE_ENFORCE(is_scalar, "target should be scalar");
524519
VLOG(3) << "backward from loss=" << target.Name()
525520
<< " data_type=" << target.GetDataType();
526521
std::unique_ptr<OpDescBind> fill_one_op(
527522
new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}},
528-
{{"shape", target_shape},
523+
{{"shape", std::vector<int>{1}},
529524
{"value", static_cast<float>(1.0)},
530525
{"data_type", target.GetDataType()}}));
531526
// infer var type of fill_one_op

paddle/framework/backward_test.cc

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -508,6 +508,7 @@ TEST(Backward, simple_single_op) {
508508
op->SetOutput("Out", {"out"});
509509

510510
auto target = f::VarDescBind("out");
511+
target.SetShape({1});
511512
auto var_to_grad = AppendBackward(program, target, {});
512513

513514
ASSERT_EQ(block->AllOps().size(), 3UL);
@@ -544,6 +545,7 @@ TEST(Backward, default_attribute) {
544545
op->CheckAttrs();
545546

546547
auto target = f::VarDescBind("out");
548+
target.SetShape({1});
547549
AppendBackward(program, target, {});
548550

549551
ASSERT_EQ(block->AllOps().size(), 3UL);
@@ -581,6 +583,7 @@ TEST(Backward, simple_mult_op) {
581583
op3->SetOutput("Out", {"out3"});
582584

583585
auto target = f::VarDescBind("out3");
586+
target.SetShape({1});
584587
size_t forward_len = block->AllOps().size();
585588
auto var_to_grad = AppendBackward(program, target, {});
586589

@@ -670,6 +673,7 @@ TEST(Backward, intermedia_var_no_grad) {
670673
op4->SetOutput("Out", {"out4"});
671674

672675
auto target = f::VarDescBind("out4");
676+
target.SetShape({1});
673677
size_t forward_len = block->AllOps().size();
674678
auto var_to_grad = AppendBackward(program, target, {"out3"});
675679

@@ -730,6 +734,7 @@ TEST(Backward, var_no_grad) {
730734
op2->SetOutput("Z", {"z2"});
731735

732736
auto target = f::VarDescBind("z2");
737+
target.SetShape({1});
733738
size_t forward_len = block->AllOps().size();
734739
auto var_to_grad = AppendBackward(program, target, {"z1"});
735740

@@ -810,6 +815,7 @@ TEST(Backward, shared_var) {
810815
op3->SetOutput("Out", {"out3"});
811816

812817
auto target = f::VarDescBind("out3");
818+
target.SetShape({1});
813819
size_t forward_len = block->AllOps().size();
814820
auto var_to_grad = AppendBackward(program, target, {});
815821

@@ -888,6 +894,7 @@ TEST(Backward, half_backward) {
888894
op1->SetOutput("Out", {"out"});
889895

890896
auto target = f::VarDescBind("out");
897+
target.SetShape({1});
891898
size_t forward_len = block->AllOps().size();
892899
auto var_to_grad = AppendBackward(program, target, {"b"});
893900
f::OpDescBind *fill_op = block->AllOps()[forward_len];

0 commit comments

Comments
 (0)