Skip to content

Commit 00efc4c

Browse files
committed
Merge branch 'develop' of github.com:PaddlePaddle/Paddle into overlap_send_op
2 parents 315e44a + ded2153 commit 00efc4c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+1399
-171
lines changed

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: "
2525
message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: "
2626
"${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
2727

28-
find_package(Sphinx)
2928
if(NOT CMAKE_CROSSCOMPILING)
3029
find_package(CUDA QUIET)
3130
endif(NOT CMAKE_CROSSCOMPILING)
@@ -226,5 +225,7 @@ if(WITH_PYTHON)
226225
endif()
227226

228227
if(WITH_DOC)
228+
find_package(Sphinx REQUIRED)
229+
find_python_module(recommonmark REQUIRED)
229230
add_subdirectory(doc)
230231
endif()

doc/v2/build_and_install/build_from_source_cn.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,9 @@
1919
----------------
2020

2121
PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像
22-
可以在 `这里 <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ 找到。或者
23-
参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。
22+
可以在 `这里 <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ 找到,您也可以
23+
在 `这里 <https://github.com/PaddlePaddle/Paddle/tree/develop/tools/manylinux1/>`_ 找到 paddle_manylinux_devel
24+
镜像的编译以及使用方法。或者参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。
2425

2526
如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。
2627

doc/v2/build_and_install/build_from_source_en.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ How To Build
2222
You need to use Docker to build PaddlePaddle
2323
to avoid installing dependencies by yourself. We have several pre-built
2424
Docker images `here <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ ,
25+
you can also find how to build and use paddle_manylinux_devel Docker image from
26+
`here <https://github.com/PaddlePaddle/Paddle/tree/develop/tools/manylinux1/>`_
2527
Or you can build your own image from source as the optional step below:
2628

2729
.. code-block:: bash

paddle/fluid/framework/operator.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,10 @@ class ExecutionContext {
192192
return op_.Attr<T>(name);
193193
}
194194

195+
bool HasInput(const std::string& name) const { return op_.HasInputs(name); }
196+
197+
bool HasOutput(const std::string& name) const { return op_.HasOutputs(name); }
198+
195199
size_t InputSize(const std::string& name) const {
196200
return op_.Inputs(name).size();
197201
}

paddle/fluid/framework/parallel_executor.cc

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,8 @@ ParallelExecutor::ParallelExecutor(
5858
const std::unordered_set<std::string> &bcast_vars,
5959
const ProgramDesc &main_program, const std::string &loss_var_name,
6060
Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay,
61-
bool use_default_grad_scale, bool balance_parameter_opt_between_cards)
61+
bool use_default_grad_scale, bool balance_parameter_opt_between_cards,
62+
size_t num_trainers, size_t trainer_id)
6263
: member_(new ParallelExecutorPrivate(places)) {
6364
member_->global_scope_ = scope;
6465

@@ -80,7 +81,13 @@ ParallelExecutor::ParallelExecutor(
8081

8182
// Bcast Parameters to all GPUs
8283
#ifdef PADDLE_WITH_CUDA
83-
member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
84+
auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
85+
ncclUniqueId *nccl_id = nullptr;
86+
if (nccl_id_var != nullptr) {
87+
nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
88+
}
89+
member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
90+
member_->places_, nccl_id, num_trainers, trainer_id));
8491
#endif
8592
if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
8693
local_scopes.empty()) { // Is CUDA

paddle/fluid/framework/parallel_executor.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@ class ParallelExecutor {
4141
const std::string& loss_var_name, Scope* scope,
4242
const std::vector<Scope*>& local_scopes,
4343
bool allow_op_delay, bool use_default_grad_scale,
44-
bool balance_parameter_opt_between_cards);
44+
bool balance_parameter_opt_between_cards,
45+
size_t num_trainers = 1, size_t trainer_id = 0);
4546

4647
~ParallelExecutor();
4748

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
cc_library(dot SRCS dot.cc)
1+
cc_library(analysis SRCS dot.cc node.cc node.h)
2+
cc_test(test_node SRCS node_tester.cc DEPS analysis)
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#pragma once
16+
17+
namespace paddle {
18+
namespace inference {
19+
namespace analysis {
20+
21+
enum class Device { CPU, GPU };
22+
23+
} // namespace analysis
24+
} // namespace inference
25+
} // namespace paddle
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/fluid/inference/analysis/dot.h"
16+
17+
#include <gtest/gtest.h>
18+
#include <memory>
19+
#include "paddle/fluid/inference/analysis/data_flow_graph.h"
20+
21+
namespace paddle {
22+
namespace inference {
23+
namespace analysis {
24+
25+
class DotTester : public ::testing::Test {
26+
protected:
27+
void SetUp() override {
28+
std::vector<Dot::Attr> attrs({{"title", "hello"}});
29+
dot.reset(new Dot(attrs));
30+
dot->AddNode("a", {Dot::Attr{"shape", "box"}, Dot::Attr("color", "blue")});
31+
dot->AddNode("b", {});
32+
dot->AddNode("c", {});
33+
dot->AddEdge("a", "b", {});
34+
dot->AddEdge("b", "c", {});
35+
dot->AddEdge("a", "c", {});
36+
}
37+
38+
std::unique_ptr<Dot> dot;
39+
};
40+
41+
TEST_F(DotTester, Build) {
42+
auto codes = dot->Build();
43+
// Output the DOT language code, the generated codes are too long to compare
44+
// the string.
45+
//
46+
// The output is
47+
//
48+
// digraph G {
49+
// title="hello"
50+
// node_1
51+
// node_2
52+
// node_0[label="a" shape="box" color="blue"]
53+
// node_0->node_1
54+
// node_1->node_2
55+
// node_0->node_2
56+
// } // end G
57+
LOG(INFO) << '\n' << codes;
58+
}
59+
60+
} // namespace analysis
61+
} // namespace inference
62+
} // namespace paddle
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#pragma once
16+
17+
#include <string>
18+
#include <unordered_map>
19+
#include <vector>
20+
21+
#include "paddle/fluid/platform/enforce.h"
22+
23+
namespace paddle {
24+
namespace inference {
25+
namespace analysis {
26+
27+
template <typename IteratorT>
28+
class iterator_range {
29+
IteratorT begin_, end_;
30+
31+
public:
32+
template <typename Container>
33+
explicit iterator_range(Container &&c) : begin_(c.begin()), end_(c.end()) {}
34+
35+
iterator_range(const IteratorT &begin, const IteratorT &end)
36+
: begin_(begin), end_(end) {}
37+
38+
const IteratorT &begin() const { return begin_; }
39+
const IteratorT &end() const { return end_; }
40+
};
41+
42+
/*
43+
* An registry helper class, with its records keeps the order they registers.
44+
*/
45+
template <typename T>
46+
class OrderedRegistry {
47+
public:
48+
T *Register(const std::string &name, T *x) {
49+
PADDLE_ENFORCE(!dic_.count(name));
50+
dic_[name] = data_.size();
51+
data_.emplace_back(std::unique_ptr<T>(x));
52+
return data_.back().get();
53+
}
54+
55+
T *Lookup(const std::string &name) {
56+
auto it = dic_.find(name);
57+
if (it == dic_.end()) return nullptr;
58+
return data_[it->second].get();
59+
}
60+
61+
protected:
62+
std::unordered_map<std::string, int> dic_;
63+
std::vector<std::unique_ptr<T>> data_;
64+
};
65+
66+
} // namespace analysis
67+
} // namespace inference
68+
} // namespace paddle
69+
70+
#define PADDLE_DISALLOW_COPY_AND_ASSIGN(type__) \
71+
\
72+
type__(const type__ &) = delete; \
73+
\
74+
void operator=(const type__ &) = delete;

0 commit comments

Comments
 (0)