Skip to content

Commit 31de609

Browse files
committed
Merge branch 'windows/build' into windows/online
test=develop
2 parents a8f97a8 + dfbac60 commit 31de609

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+1577
-106
lines changed

AUTHORS.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
| kexinzhao | Ke-Xin Zhao |
2626
| kuke | Yi-Bing Liu |
2727
| lcy-seso | Ying Cao |
28+
| cjld | Dun Liang |
2829
| lipeng-unisound | Peng Li |
2930
| liuyuan | Yuan Liu |
3031
| livc | Zhao Li |

cmake/generic.cmake

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -349,10 +349,17 @@ function(cc_test TARGET_NAME)
349349
set(oneValueArgs "")
350350
set(multiValueArgs SRCS DEPS ARGS)
351351
cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
352+
if(WIN32)
353+
list(APPEND win32_deps shlwapi)
354+
if("${cc_test_DEPS};" MATCHES "python;")
355+
list(REMOVE_ITEM cc_test_DEPS python)
356+
list(APPEND win32_deps ${PYTHON_LIBRARIES})
357+
endif()
358+
endif(WIN32)
352359
add_executable(${TARGET_NAME} ${cc_test_SRCS})
353360
target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog)
354361
if(WIN32)
355-
target_link_libraries(${TARGET_NAME} shlwapi)
362+
target_link_libraries(${TARGET_NAME} ${win32_deps})
356363
endif(WIN32)
357364
add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog)
358365
add_test(NAME ${TARGET_NAME}
@@ -679,7 +686,7 @@ function(py_test TARGET_NAME)
679686
set(multiValueArgs SRCS DEPS ARGS ENVS)
680687
cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
681688
add_test(NAME ${TARGET_NAME}
682-
COMMAND env FLAGS_init_allocated_mem=true FLAGS_cudnn_deterministic=true
689+
COMMAND ${CMAKE_COMMAND} -E env FLAGS_init_allocated_mem=true FLAGS_cudnn_deterministic=true
683690
FLAGS_cpu_deterministic=true
684691
PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS}
685692
${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS}

paddle/fluid/API.spec

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 's
103103
paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None))
104104
paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None)
105105
paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None))
106+
paddle.fluid.layers.group_norm ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None))
106107
paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax'], varargs=None, keywords=None, defaults=(False, -100, False, False))
107108
paddle.fluid.layers.smooth_l1 ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None))
108109
paddle.fluid.layers.one_hot ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None)

paddle/fluid/framework/details/all_reduce_op_handle.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace paddle {
2323
namespace framework {
2424
namespace details {
2525

26-
#ifdef PADDLE_WITH_CUDA
26+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
2727
AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
2828
const std::vector<Scope *> &local_scopes,
2929
const std::vector<platform::Place> &places,
@@ -74,7 +74,7 @@ void AllReduceOpHandle::RunImpl() {
7474
}
7575

7676
if (platform::is_gpu_place(lod_tensors[0]->place())) {
77-
#ifdef PADDLE_WITH_CUDA
77+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
7878
PADDLE_ENFORCE(nccl_ctxs_, "nccl_ctxs should not be nullptr.");
7979
int dtype = -1;
8080
size_t numel = 0;

paddle/fluid/framework/details/all_reduce_op_handle.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include "paddle/fluid/framework/details/op_handle_base.h"
2121
#include "paddle/fluid/framework/lod_tensor.h"
2222
#include "paddle/fluid/framework/scope.h"
23-
#ifdef PADDLE_WITH_CUDA
23+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
2424
#include "paddle/fluid/platform/nccl_helper.h"
2525
#endif
2626

@@ -29,7 +29,7 @@ namespace framework {
2929
namespace details {
3030

3131
struct AllReduceOpHandle : public OpHandleBase {
32-
#ifdef PADDLE_WITH_CUDA
32+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
3333
AllReduceOpHandle(ir::Node *node, const std::vector<Scope *> &local_scopes,
3434
const std::vector<platform::Place> &places,
3535
const platform::NCCLContextMap *ctxs);
@@ -49,7 +49,7 @@ struct AllReduceOpHandle : public OpHandleBase {
4949
private:
5050
std::vector<Scope *> local_scopes_;
5151
std::vector<platform::Place> places_;
52-
#ifdef PADDLE_WITH_CUDA
52+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
5353
const platform::NCCLContextMap *nccl_ctxs_;
5454
#endif
5555
};

paddle/fluid/framework/details/broadcast_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ void BroadcastOpHandle::BroadcastOneVar(
8282
});
8383
}
8484
} else {
85-
#ifdef PADDLE_WITH_CUDA
85+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
8686
VarHandle *out_handle = nullptr;
8787
int root_id = boost::get<platform::CUDAPlace>(in_tensor.place()).device;
8888
std::vector<std::function<void()>> broadcast_calls;

paddle/fluid/framework/details/broadcast_op_handle.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
#include "paddle/fluid/framework/selected_rows.h"
2525
#include "paddle/fluid/platform/device_context.h"
2626

27-
#ifdef PADDLE_WITH_CUDA
27+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
2828
#include "paddle/fluid/platform/nccl_helper.h"
2929
#endif
3030

@@ -34,7 +34,7 @@ namespace details {
3434

3535
struct BroadcastOpHandle : public OpHandleBase {
3636
public:
37-
#ifdef PADDLE_WITH_CUDA
37+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
3838
BroadcastOpHandle(ir::Node *node, const std::vector<Scope *> &local_scopes,
3939
const std::vector<platform::Place> &places,
4040
const platform::NCCLContextMap *nccl_ctxs)
@@ -68,7 +68,7 @@ struct BroadcastOpHandle : public OpHandleBase {
6868

6969
std::vector<Scope *> local_scopes_;
7070
std::vector<platform::Place> places_;
71-
#ifdef PADDLE_WITH_CUDA
71+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
7272
const platform::NCCLContextMap *nccl_ctxs_;
7373
#endif
7474

paddle/fluid/framework/details/broadcast_op_handle_test.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,15 +42,15 @@ struct TestBroadcastOpHandle {
4242
std::vector<std::unique_ptr<ir::Node>> nodes_;
4343
std::vector<p::Place> place_list_;
4444
bool use_gpu_;
45-
#ifdef PADDLE_WITH_CUDA
45+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
4646
std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
4747
#endif
4848

4949
void WaitAll() {
5050
for (size_t j = 0; j < ctxs_.size(); ++j) {
5151
ctxs_[j]->Wait();
5252
}
53-
#ifdef PADDLE_WITH_CUDA
53+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
5454
if (nccl_ctxs_) {
5555
nccl_ctxs_->WaitAll();
5656
}
@@ -60,7 +60,7 @@ struct TestBroadcastOpHandle {
6060
void InitCtxOnGpu(bool use_gpu) {
6161
use_gpu_ = use_gpu;
6262
if (use_gpu_) {
63-
#ifdef PADDLE_WITH_CUDA
63+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
6464
int count = p::GetCUDADeviceCount();
6565
if (count <= 1) {
6666
LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA "
@@ -84,7 +84,7 @@ struct TestBroadcastOpHandle {
8484
place_list_.push_back(p);
8585
ctxs_.emplace_back(new p::CPUDeviceContext(p));
8686
}
87-
#ifdef PADDLE_WITH_CUDA
87+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
8888
nccl_ctxs_.reset(nullptr);
8989
#endif
9090
}
@@ -106,14 +106,14 @@ struct TestBroadcastOpHandle {
106106
nodes_.emplace_back(
107107
ir::CreateNodeForTest("node0", ir::Node::Type::kOperation));
108108
if (use_gpu_) {
109-
#ifdef PADDLE_WITH_CUDA
109+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
110110
op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_,
111111
place_list_, nccl_ctxs_.get());
112112
#else
113113
PADDLE_THROW("CUDA is not support.");
114114
#endif
115115
} else {
116-
#ifdef PADDLE_WITH_CUDA
116+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
117117
op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_,
118118
place_list_, nccl_ctxs_.get());
119119
#else

paddle/fluid/framework/details/build_strategy.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ std::unique_ptr<ir::Graph> BuildStrategy::Apply(
9696
const std::string &loss_var_name,
9797
const std::unordered_set<std::string> &param_names,
9898
const std::vector<Scope *> &local_scopes,
99-
#ifdef PADDLE_WITH_CUDA
99+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
100100
const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
101101
#else
102102
const bool use_cuda) const {
@@ -118,7 +118,7 @@ std::unique_ptr<ir::Graph> BuildStrategy::Apply(
118118
pass->Erase("local_scopes");
119119
pass->SetNotOwned<const std::vector<Scope *>>("local_scopes",
120120
&local_scopes);
121-
#ifdef PADDLE_WITH_CUDA
121+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
122122
platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
123123
pass->Erase("nccl_ctxs");
124124
pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);

paddle/fluid/framework/details/build_strategy.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
#include "paddle/fluid/platform/device_context.h"
2424
#include "paddle/fluid/platform/enforce.h"
2525

26-
#ifdef PADDLE_WITH_CUDA
26+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
2727
#include "paddle/fluid/platform/nccl_helper.h"
2828
#endif
2929

@@ -98,7 +98,7 @@ struct BuildStrategy {
9898
const std::string &loss_var_name,
9999
const std::unordered_set<std::string> &param_names,
100100
const std::vector<Scope *> &local_scopes,
101-
#ifdef PADDLE_WITH_CUDA
101+
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
102102
const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const;
103103
#else
104104
const bool use_cuda) const;

0 commit comments

Comments
 (0)