Skip to content

Commit 7c777dd

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into feature/exec_strategy
2 parents c72a4f4 + 0acc93c commit 7c777dd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1387
-171
lines changed

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: "
2525
message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: "
2626
"${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
2727

28-
find_package(Sphinx)
2928
if(NOT CMAKE_CROSSCOMPILING)
3029
find_package(CUDA QUIET)
3130
endif(NOT CMAKE_CROSSCOMPILING)
@@ -226,5 +225,7 @@ if(WITH_PYTHON)
226225
endif()
227226

228227
if(WITH_DOC)
228+
find_package(Sphinx REQUIRED)
229+
find_python_module(recommonmark REQUIRED)
229230
add_subdirectory(doc)
230231
endif()

doc/v2/build_and_install/build_from_source_cn.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,9 @@
1919
----------------
2020

2121
PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像
22-
可以在 `这里 <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ 找到。或者
23-
参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。
22+
可以在 `这里 <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ 找到,您也可以
23+
在 `这里 <https://github.com/PaddlePaddle/Paddle/tree/develop/tools/manylinux1/>`_ 找到 paddle_manylinux_devel
24+
镜像的编译以及使用方法。或者参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。
2425

2526
如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。
2627

doc/v2/build_and_install/build_from_source_en.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ How To Build
2222
You need to use Docker to build PaddlePaddle
2323
to avoid installing dependencies by yourself. We have several pre-built
2424
Docker images `here <https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/>`_ ,
25+
you can also find how to build and use paddle_manylinux_devel Docker image from
26+
`here <https://github.com/PaddlePaddle/Paddle/tree/develop/tools/manylinux1/>`_
2527
Or you can build your own image from source as the optional step below:
2628

2729
.. code-block:: bash

paddle/fluid/framework/operator.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,10 @@ class ExecutionContext {
192192
return op_.Attr<T>(name);
193193
}
194194

195+
bool HasInput(const std::string& name) const { return op_.HasInputs(name); }
196+
197+
bool HasOutput(const std::string& name) const { return op_.HasOutputs(name); }
198+
195199
size_t InputSize(const std::string& name) const {
196200
return op_.Inputs(name).size();
197201
}

paddle/fluid/framework/parallel_executor.cc

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ ParallelExecutor::ParallelExecutor(
5757
const std::unordered_set<std::string> &bcast_vars,
5858
const ProgramDesc &main_program, const std::string &loss_var_name,
5959
Scope *scope, const std::vector<Scope *> &local_scopes,
60-
const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy)
60+
const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
61+
size_t num_trainers, size_t trainer_id)
6162
: member_(new ParallelExecutorPrivate(places)) {
6263
member_->global_scope_ = scope;
6364

@@ -79,7 +80,13 @@ ParallelExecutor::ParallelExecutor(
7980

8081
// Bcast Parameters to all GPUs
8182
#ifdef PADDLE_WITH_CUDA
82-
member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
83+
auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
84+
ncclUniqueId *nccl_id = nullptr;
85+
if (nccl_id_var != nullptr) {
86+
nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
87+
}
88+
member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
89+
member_->places_, nccl_id, num_trainers, trainer_id));
8390
#endif
8491
if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
8592
local_scopes.empty()) { // Is CUDA

paddle/fluid/framework/parallel_executor.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,8 @@ class ParallelExecutor {
4444
const std::string &loss_var_name, Scope *scope,
4545
const std::vector<Scope *> &local_scopes,
4646
const ExecutionStrategy &exec_strategy,
47-
const BuildStrategy &build_strategy);
47+
const BuildStrategy &build_strategy,
48+
size_t num_trainers = 1, size_t trainer_id = 0);
4849

4950
~ParallelExecutor();
5051

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
cc_library(dot SRCS dot.cc)
1+
cc_library(analysis SRCS dot.cc node.cc node.h)
2+
cc_test(test_node SRCS node_tester.cc DEPS analysis)
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
#pragma once
15+
16+
namespace paddle {
17+
namespace inference {
18+
namespace analysis {
19+
20+
enum class Device { CPU, GPU };
21+
22+
} // namespace analysis
23+
} // namespace inference
24+
} // namespace paddle

paddle/fluid/inference/analysis/dot.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
#include <glog/logging.h>
2323
#include <sstream>
24+
#include <string>
2425
#include <unordered_map>
2526
#include <vector>
2627

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/fluid/inference/analysis/dot.h"
16+
17+
#include <gtest/gtest.h>
18+
#include <memory>
19+
#include "paddle/fluid/inference/analysis/data_flow_graph.h"
20+
21+
namespace paddle {
22+
namespace inference {
23+
namespace analysis {
24+
25+
class DotTester : public ::testing::Test {
26+
protected:
27+
void SetUp() override {
28+
std::vector<Dot::Attr> attrs({{"title", "hello"}});
29+
dot.reset(new Dot(attrs));
30+
dot->AddNode("a", {Dot::Attr{"shape", "box"}, Dot::Attr("color", "blue")});
31+
dot->AddNode("b", {});
32+
dot->AddNode("c", {});
33+
dot->AddEdge("a", "b", {});
34+
dot->AddEdge("b", "c", {});
35+
dot->AddEdge("a", "c", {});
36+
}
37+
38+
std::unique_ptr<Dot> dot;
39+
};
40+
41+
TEST_F(DotTester, Build) {
42+
auto codes = dot->Build();
43+
// Output the DOT language code, the generated codes are too long to compare
44+
// the string.
45+
//
46+
// The output is
47+
//
48+
// digraph G {
49+
// title="hello"
50+
// node_1
51+
// node_2
52+
// node_0[label="a" shape="box" color="blue"]
53+
// node_0->node_1
54+
// node_1->node_2
55+
// node_0->node_2
56+
// } // end G
57+
LOG(INFO) << '\n' << codes;
58+
}
59+
60+
} // namespace analysis
61+
} // namespace inference
62+
} // namespace paddle

0 commit comments

Comments
 (0)