Skip to content

Commit a58dd3e

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into port_python3_syntax
2 parents 850c394 + 0964de1 commit a58dd3e

File tree

104 files changed

+2720
-710
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

104 files changed

+2720
-710
lines changed

cmake/external/anakin.cmake

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ set(ANAKIN_INCLUDE "${ANAKIN_INSTALL_DIR}" CACHE STRING "root of Anakin header f
88
set(ANAKIN_LIBRARY "${ANAKIN_INSTALL_DIR}" CACHE STRING "path of Anakin library")
99

1010
set(ANAKIN_COMPILE_EXTRA_FLAGS
11+
-Wno-error=unused-but-set-variable -Wno-unused-but-set-variable
1112
-Wno-error=unused-variable -Wno-unused-variable
1213
-Wno-error=format-extra-args -Wno-format-extra-args
1314
-Wno-error=comment -Wno-comment
@@ -19,7 +20,7 @@ set(ANAKIN_COMPILE_EXTRA_FLAGS
1920
-Wno-reorder
2021
-Wno-error=cpp)
2122

22-
set(ANAKIN_LIBRARY_URL "https://github.com/pangge/Anakin/releases/download/3.0/anakin_release_simple.tar.gz")
23+
set(ANAKIN_LIBRARY_URL "https://github.com/pangge/Anakin/releases/download/Version0.1.0/anakin.tar.gz")
2324

2425
# A helper function used in Anakin, currently, to use it, one need to recursively include
2526
# nearly all the header files.
@@ -41,9 +42,9 @@ if (NOT EXISTS "${ANAKIN_INSTALL_DIR}")
4142
message(STATUS "Download Anakin library from ${ANAKIN_LIBRARY_URL}")
4243
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
4344
execute_process(COMMAND bash -c "rm -rf ${ANAKIN_INSTALL_DIR}/*")
44-
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; wget -q ${ANAKIN_LIBRARY_URL}")
45+
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; wget --no-check-certificate -q ${ANAKIN_LIBRARY_URL}")
4546
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
46-
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; tar xzf anakin_release_simple.tar.gz")
47+
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; tar xzf anakin.tar.gz")
4748
endif()
4849

4950
if (WITH_ANAKIN)

paddle/fluid/API.spec

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,9 +263,7 @@ paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=[], varargs='ar
263263
paddle.fluid.layers.scatter ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
264264
paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
265265
paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
266-
paddle.fluid.layers.polygon_box_transform ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
267266
paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
268-
paddle.fluid.layers.iou_similarity ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
269267
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
270268
paddle.fluid.layers.sigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
271269
paddle.fluid.layers.logsigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
@@ -306,7 +304,9 @@ paddle.fluid.layers.ssd_loss ArgSpec(args=['location', 'confidence', 'gt_box', '
306304
paddle.fluid.layers.detection_map ArgSpec(args=['detect_res', 'label', 'class_num', 'background_label', 'overlap_threshold', 'evaluate_difficult', 'has_state', 'input_states', 'out_states', 'ap_version'], varargs=None, keywords=None, defaults=(0, 0.3, True, None, None, None, 'integral'))
307305
paddle.fluid.layers.rpn_target_assign ArgSpec(args=['loc', 'scores', 'anchor_box', 'gt_box', 'rpn_batch_size_per_im', 'fg_fraction', 'rpn_positive_overlap', 'rpn_negative_overlap'], varargs=None, keywords=None, defaults=(256, 0.25, 0.7, 0.3))
308306
paddle.fluid.layers.anchor_generator ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None))
307+
paddle.fluid.layers.iou_similarity ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
309308
paddle.fluid.layers.box_coder ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
309+
paddle.fluid.layers.polygon_box_transform ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
310310
paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None))
311311
paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk'], varargs=None, keywords=None, defaults=('ROC', 200, 1))
312312
paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,))

paddle/fluid/framework/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ cc_library(ddim SRCS ddim.cc DEPS eigen3 boost)
77
cc_test(ddim_test SRCS ddim_test.cc DEPS ddim)
88
nv_test(dim_test SRCS dim_test.cu DEPS ddim)
99
cc_library(data_type SRCS data_type.cc DEPS framework_proto ddim device_context)
10+
cc_test(data_type_test SRCS data_type_test.cc DEPS data_type place tensor)
1011
if(WITH_GPU)
1112
nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS place memory data_type device_context)
1213
else()

paddle/fluid/framework/data_type.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
#include <string>
1818
#include <unordered_map>
1919

20+
using float16 = paddle::platform::float16;
21+
2022
namespace paddle {
2123
namespace framework {
2224

@@ -53,7 +55,7 @@ static DataTypeMap* InitDataTypeMap() {
5355
RegisterType<cc_type>(retv, proto_type, #cc_type)
5456

5557
// NOTE: Add your customize type here.
56-
RegType(platform::float16, proto::VarType::FP16);
58+
RegType(float16, proto::VarType::FP16);
5759
RegType(float, proto::VarType::FP32);
5860
RegType(double, proto::VarType::FP64);
5961
RegType(int, proto::VarType::INT32);
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
#include "paddle/fluid/framework/data_type.h"
15+
16+
#include <string>
17+
#include "gtest/gtest.h"
18+
#include "paddle/fluid/framework/tensor.h"
19+
20+
TEST(DataType, float16) {
21+
using paddle::framework::Tensor;
22+
using paddle::platform::CPUPlace;
23+
using paddle::platform::float16;
24+
namespace f = paddle::framework;
25+
f::proto::VarType::Type dtype = f::proto::VarType::FP16;
26+
27+
Tensor tensor;
28+
CPUPlace cpu;
29+
tensor.mutable_data(cpu, f::ToTypeIndex(dtype));
30+
31+
// test fp16 tensor
32+
EXPECT_EQ(tensor.type(), std::type_index(typeid(float16)));
33+
34+
// test fp16 size
35+
EXPECT_EQ(f::SizeOfType(f::ToTypeIndex(dtype)), 2u);
36+
37+
// test debug info
38+
std::string type = "float16";
39+
EXPECT_STREQ(f::DataTypeToString(dtype).c_str(), type.c_str());
40+
}
Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include "paddle/fluid/platform/enforce.h"
18+
19+
namespace paddle {
20+
namespace framework {
21+
namespace details {
22+
23+
class ExceptionHolder {
24+
public:
25+
void Catch(const platform::EnforceNotMet& exp) {
26+
std::lock_guard<std::mutex> lock(mu_);
27+
exception_.reset(new platform::EnforceNotMet(exp));
28+
type_ = kEnforceNotMet;
29+
}
30+
31+
void Catch(const platform::EOFException& exp) {
32+
std::lock_guard<std::mutex> lock(mu_);
33+
// EOFException will not cover up existing EnforceNotMet.
34+
if (exception_.get() == nullptr) {
35+
exception_.reset(new platform::EOFException(exp));
36+
type_ = kEOF;
37+
}
38+
}
39+
40+
bool ExceptionCatched() const {
41+
std::lock_guard<std::mutex> lock(mu_);
42+
return exception_.get() != nullptr;
43+
}
44+
45+
void Throw() {
46+
std::lock_guard<std::mutex> lock(mu_);
47+
switch (type_) {
48+
case kNone:
49+
break;
50+
case kEnforceNotMet: {
51+
auto e = *static_cast<platform::EnforceNotMet*>(exception_.get());
52+
throw e;
53+
break;
54+
}
55+
case kEOF: {
56+
auto e = *static_cast<platform::EOFException*>(exception_.get());
57+
throw e;
58+
break;
59+
}
60+
default:
61+
LOG(FATAL) << "Unknown exception.";
62+
}
63+
exception_.reset();
64+
type_ = kNone;
65+
}
66+
67+
void Clear() {
68+
std::lock_guard<std::mutex> lock(mu_);
69+
exception_.reset();
70+
type_ = kNone;
71+
}
72+
73+
private:
74+
enum ExceptionType { kNone, kEnforceNotMet, kEOF };
75+
ExceptionType type_{kNone};
76+
77+
std::unique_ptr<std::exception> exception_;
78+
mutable std::mutex mu_;
79+
};
80+
81+
} // namespace details
82+
} // namespace framework
83+
} // namespace paddle

paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,9 @@ class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor {
4141
std::vector<VariableInfo> var_infos, std::vector<platform::Place> places,
4242
std::unique_ptr<SSAGraphExecutor>&& underlying_executor);
4343

44-
const ir::Graph& Graph() const { return underlying_executor_->Graph(); }
44+
const ir::Graph& Graph() const override {
45+
return underlying_executor_->Graph();
46+
}
4547

4648
FeedFetchList Run(const std::vector<std::string>& fetch_tensors) override;
4749

paddle/fluid/framework/details/threaded_ssa_graph_executor.cc

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
8383

8484
// Clean run context
8585
run_op_futures_.clear();
86-
exception_.reset();
86+
exception_holder_.Clear();
8787

8888
// Step 3. Execution
8989
while (!pending_vars.empty()) {
@@ -103,23 +103,11 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
103103
auto cur_ready_vars = ready_vars.PopAll(1, &timeout);
104104

105105
if (timeout) {
106-
std::unique_lock<std::mutex> l(exception_mu_);
107-
if (exception_) {
108-
l.unlock();
106+
if (exception_holder_.ExceptionCatched()) {
109107
for (auto &run_op_future : run_op_futures_) {
110108
run_op_future.wait();
111109
}
112-
l.lock();
113-
std::exception *exp = exception_.get();
114-
if (dynamic_cast<platform::EOFException *>(exp)) {
115-
auto e = *static_cast<platform::EOFException *>(exp);
116-
throw e;
117-
} else if (dynamic_cast<platform::EnforceNotMet *>(exp)) {
118-
auto e = *static_cast<platform::EnforceNotMet *>(exp);
119-
throw e;
120-
} else {
121-
LOG(FATAL) << "Unknown exception.";
122-
}
110+
exception_holder_.Throw();
123111
} else {
124112
continue;
125113
}
@@ -229,14 +217,9 @@ void ThreadedSSAGraphExecutor::RunOp(
229217
ready_var_q->Extend(op->Outputs());
230218
VLOG(10) << op << " " << op->Name() << "Signal posted";
231219
} catch (platform::EOFException ex) {
232-
std::lock_guard<std::mutex> l(exception_mu_);
233-
// EOFException will not cover up existing EnforceNotMet.
234-
if (exception_.get() == nullptr) {
235-
exception_.reset(new platform::EOFException(ex));
236-
}
220+
exception_holder_.Catch(ex);
237221
} catch (platform::EnforceNotMet ex) {
238-
std::lock_guard<std::mutex> l(exception_mu_);
239-
exception_.reset(new platform::EnforceNotMet(ex));
222+
exception_holder_.Catch(ex);
240223
} catch (...) {
241224
LOG(FATAL) << "Unknown exception catched";
242225
}

paddle/fluid/framework/details/threaded_ssa_graph_executor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <functional>
2525
#include "ThreadPool.h" // ThreadPool in thrird party
2626
#include "paddle/fluid/framework/blocking_queue.h"
27+
#include "paddle/fluid/framework/details/exception_holder.h"
2728
#include "paddle/fluid/framework/details/execution_strategy.h"
2829
#include "paddle/fluid/framework/details/fetch_op_handle.h"
2930
#include "paddle/fluid/framework/details/ssa_graph_executor.h"
@@ -42,7 +43,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor {
4243
const std::vector<platform::Place> &places,
4344
std::unique_ptr<ir::Graph> &&graph);
4445

45-
const ir::Graph &Graph() const { return *graph_; }
46+
const ir::Graph &Graph() const override { return *graph_; }
4647
// Run a SSAGraph by a thread pool
4748
// Use topological sort algorithm
4849
FeedFetchList Run(const std::vector<std::string> &fetch_tensors) override;
@@ -59,8 +60,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor {
5960
std::vector<Scope *> local_scopes_;
6061
std::vector<platform::Place> places_;
6162
platform::DeviceContextPool fetch_ctxs_;
62-
std::mutex exception_mu_;
63-
std::unique_ptr<std::exception> exception_;
63+
ExceptionHolder exception_holder_;
6464
std::atomic<int> running_ops_;
6565

6666
void InsertPendingOp(std::unordered_map<OpHandleBase *, size_t> *pending_ops,

paddle/fluid/framework/ir/graph_helper_test.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ TEST(GraphHelperTest, Basic) {
116116
for (size_t i = 0; i < sorted.size(); ++i) {
117117
node_map[sorted[i]->Name()] = i;
118118
}
119-
ASSERT_EQ(node_map.at("op1"), 0);
120-
ASSERT_EQ(node_map.at("op2"), 1);
119+
ASSERT_EQ(node_map.at("op1"), 0UL);
120+
ASSERT_EQ(node_map.at("op2"), 1UL);
121121
ASSERT_TRUE(node_map.at("op3") < node_map.at("op5"));
122122
}
123123
} // namespace ir

0 commit comments

Comments
 (0)