Skip to content

Commit 64464cb

Browse files
committed
Merge develop
2 parents 79918a8 + 4a4567f commit 64464cb

24 files changed

+145
-48
lines changed

paddle/fluid/framework/ir/graph.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,6 @@ class Graph {
142142
nodes_.erase(node);
143143
}
144144

145-
const ProgramDesc &program() const { return program_; }
146-
147145
private:
148146
// This method takes ownership of `node`.
149147
ir::Node *AddNode(ir::Node *node) {
@@ -154,7 +152,7 @@ class Graph {
154152
}
155153

156154
// NOTE: program_ shouldn't be exposed to user.
157-
const ProgramDesc &program_;
155+
const ProgramDesc program_;
158156
std::map<std::string, boost::any> attrs_;
159157
std::map<std::string, std::function<void(void)>> attr_dels_;
160158
std::map<ir::Node *, std::unique_ptr<ir::Node>> nodes_;

paddle/fluid/framework/ir/node.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ class Node {
4141
explicit Node(OpDesc* op_desc)
4242
: name_(op_desc->Type()),
4343
var_desc_(nullptr),
44-
op_desc_(new OpDesc(*op_desc)), // TODO(panyx0718) the pointer in the
45-
// original OpDesc might go out.
44+
op_desc_(new OpDesc(*op_desc, op_desc->Block())),
4645
type_(Type::kOperation) {}
4746

4847
Type NodeType() const { return type_; }

paddle/fluid/framework/op_proto_maker.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,10 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
129129
"Optimized for variable")
130130
.SetDefault({});
131131

132+
AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(),
133+
"Callstack for Op Creatation.")
134+
.SetDefault({});
135+
132136
Validate();
133137
}
134138

paddle/fluid/framework/op_proto_maker.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ class OpProtoAndCheckerMaker {
3939
public:
4040
static const char *OpRoleAttrName() { return "op_role"; }
4141
static const char *OpRoleVarAttrName() { return "op_role_var"; }
42+
static const char *OpCreationCallstackAttrName() { return "op_callstack"; }
4243

4344
void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker);
4445

paddle/fluid/framework/operator.cc

Lines changed: 46 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
1111
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
14-
#include <gflags/gflags.h>
15-
#include <glog/logging.h>
16-
14+
#include "paddle/fluid/framework/operator.h"
1715
#include <algorithm>
18-
16+
#include <sstream>
17+
#include <string>
18+
#include <vector>
19+
#include "gflags/gflags.h"
20+
#include "glog/logging.h"
1921
#include "paddle/fluid/framework/data_transform.h"
2022
#include "paddle/fluid/framework/executor.h"
2123
#include "paddle/fluid/framework/lod_tensor.h"
22-
#include "paddle/fluid/framework/operator.h"
24+
#include "paddle/fluid/framework/op_proto_maker.h"
2325
#include "paddle/fluid/framework/shape_inference.h"
2426
#include "paddle/fluid/framework/var_type.h"
2527
#include "paddle/fluid/platform/profiler.h"
@@ -127,19 +129,48 @@ static LoD GetLoD(const Scope& scope, const std::string& name) {
127129
}
128130

129131
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
130-
VLOG(4) << place << " " << DebugStringEx(&scope);
131-
if (platform::is_gpu_place(place)) {
132+
try {
133+
if (VLOG_IS_ON(4)) {
134+
VLOG(4) << place << " " << DebugStringEx(&scope);
135+
}
136+
if (platform::is_gpu_place(place)) {
132137
#ifndef PADDLE_WITH_CUDA
133-
PADDLE_THROW("Cannot run operator on place %s", place);
138+
PADDLE_THROW("Cannot run operator on place %s", place);
134139
#else
135-
auto dev_id = boost::get<platform::CUDAPlace>(place).device;
136-
platform::SetDeviceId(dev_id);
140+
auto dev_id = boost::get<platform::CUDAPlace>(place).device;
141+
platform::SetDeviceId(dev_id);
137142
#endif
143+
}
144+
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
145+
platform::RecordEvent record_event(Type(), pool.Get(place));
146+
RunImpl(scope, place);
147+
if (VLOG_IS_ON(3)) {
148+
VLOG(3) << place << " " << DebugStringEx(&scope);
149+
}
150+
} catch (platform::EnforceNotMet exception) {
151+
if (Attrs().count("sub_block") != 0) {
152+
throw exception;
153+
}
154+
155+
auto& callstack = Attr<std::vector<std::string>>(
156+
OpProtoAndCheckerMaker::OpCreationCallstackAttrName());
157+
158+
if (callstack.empty()) {
159+
throw exception;
160+
}
161+
std::ostringstream sout;
162+
sout << "Invoke operator " << Type() << " error.\n";
163+
sout << "Python Callstacks: \n";
164+
for (auto& line : callstack) {
165+
sout << line;
166+
}
167+
sout << "C++ Callstacks: \n";
168+
sout << exception.err_str_;
169+
exception.err_str_ = sout.str();
170+
throw exception;
171+
} catch (...) {
172+
std::rethrow_exception(std::current_exception());
138173
}
139-
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
140-
platform::RecordEvent record_event(Type(), pool.Get(place));
141-
RunImpl(scope, place);
142-
VLOG(3) << place << " " << DebugStringEx(&scope);
143174
}
144175

145176
bool OperatorBase::HasInputs(const std::string& name) const {
@@ -167,7 +198,7 @@ const std::vector<std::string>& OperatorBase::Inputs(
167198
}
168199

169200
bool OperatorBase::HasOutputs(const std::string& name) const {
170-
if (outputs_.find(name) != outputs_.end()) {
201+
if (outputs_.end() != outputs_.find(name)) {
171202
return true;
172203
} else {
173204
return false;

paddle/fluid/operators/concat_op.h

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,21 @@ class ConcatGradKernel : public framework::OpKernel<T> {
6262
void Compute(const framework::ExecutionContext& ctx) const {
6363
auto* out_grad =
6464
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
65-
auto ins = ctx.MultiInput<framework::Tensor>("X");
65+
auto ins = ctx.MultiInput<framework::LoDTensor>("X");
6666
auto out_var_names = ctx.Outputs(framework::GradVarName("X"));
67-
auto outs = ctx.MultiOutput<framework::Tensor>(framework::GradVarName("X"));
67+
auto outs =
68+
ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));
69+
70+
{
71+
auto dx = outs;
72+
auto x = ins;
73+
for (size_t i = 0; i < dx.size(); ++i) {
74+
if (dx[i] != nullptr) {
75+
dx[i]->set_lod(x[i]->lod());
76+
}
77+
}
78+
}
79+
6880
int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
6981

7082
// get output tensor that the name is not kEmptyVarName

paddle/fluid/operators/elementwise_add_mkldnn_op.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,10 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
137137
};
138138

139139
template <typename T>
140-
class EltwiseAddMKLDNNGradKernel : public framework::OpKernel<T> {
140+
class EltwiseAddMKLDNNGradKernel : public ElemwiseGradKernel<T> {
141141
public:
142142
void Compute(const framework::ExecutionContext& ctx) const override {
143+
ElemwiseGradKernel<T>::Compute(ctx);
143144
using Tensor = framework::Tensor;
144145

145146
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));

paddle/fluid/operators/elementwise_add_op.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ limitations under the License. */
1515
#pragma once
1616

1717
#include "paddle/fluid/framework/eigen.h"
18+
#include "paddle/fluid/operators/elementwise_op.h"
1819
#include "paddle/fluid/operators/elementwise_op_function.h"
1920
#include "paddle/fluid/operators/math/blas.h"
2021

@@ -136,9 +137,11 @@ elementwise_add_grad(const framework::ExecutionContext& ctx,
136137
}
137138

138139
template <typename DeviceContext, typename T>
139-
class ElementwiseAddGradKernel : public framework::OpKernel<T> {
140+
class ElementwiseAddGradKernel : public ElemwiseGradKernel<T> {
140141
public:
141142
void Compute(const framework::ExecutionContext& ctx) const override {
143+
ElemwiseGradKernel<T>::Compute(ctx);
144+
142145
using Tensor = framework::Tensor;
143146

144147
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));

paddle/fluid/operators/elementwise_div_op.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@ limitations under the License. */
1414

1515
#pragma once
1616

17+
#include "paddle/fluid/operators/elementwise_op.h"
1718
#include "paddle/fluid/operators/elementwise_op_function.h"
18-
1919
namespace paddle {
2020
namespace operators {
2121

@@ -53,9 +53,10 @@ struct DivGradDY {
5353
};
5454

5555
template <typename DeviceContext, typename T>
56-
class ElementwiseDivGradKernel : public framework::OpKernel<T> {
56+
class ElementwiseDivGradKernel : public ElemwiseGradKernel<T> {
5757
public:
5858
void Compute(const framework::ExecutionContext& ctx) const override {
59+
ElemwiseGradKernel<T>::Compute(ctx);
5960
using Tensor = framework::Tensor;
6061

6162
auto* x = ctx.Input<Tensor>("X");

paddle/fluid/operators/elementwise_max_op.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ limitations under the License. */
1414

1515
#pragma once
1616

17+
#include "paddle/fluid/operators/elementwise_op.h"
1718
#include "paddle/fluid/operators/elementwise_op_function.h"
1819

1920
namespace paddle {
@@ -55,9 +56,10 @@ struct MaxGradDy {
5556
};
5657

5758
template <typename DeviceContext, typename T>
58-
class ElementwiseMaxGradKernel : public framework::OpKernel<T> {
59+
class ElementwiseMaxGradKernel : public ElemwiseGradKernel<T> {
5960
public:
6061
void Compute(const framework::ExecutionContext& ctx) const override {
62+
ElemwiseGradKernel<T>::Compute(ctx);
6163
using Tensor = framework::Tensor;
6264

6365
auto* x = ctx.Input<Tensor>("X");

0 commit comments

Comments
 (0)