Skip to content

Commit 133f890

Browse files
authored
Merge pull request #5499 from reyoung/feature/increase_cpu
Feature/increase cpu
2 parents 78fb29b + 9c10db3 commit 133f890

File tree

7 files changed

+60
-128
lines changed

7 files changed

+60
-128
lines changed

paddle/framework/backward.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -408,6 +408,11 @@ std::vector<std::unique_ptr<OpDescBind>> MakeBlockBackward(
408408

409409
for (const auto& desc : op_grads) {
410410
for (const std::string& out_name : desc->OutputArgumentNames()) {
411+
if (out_name.find("@GRAD") == std::string::npos) {
412+
// Not all outputs of a backward operator is a gradient. Only gradient
413+
// need to be sum. Skip variables are not gradient.
414+
continue;
415+
}
411416
dup_out_ops[out_name].emplace_back(grad_desc_idx);
412417
}
413418
++grad_desc_idx;

paddle/operators/increment_op.cc

Lines changed: 47 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,57 @@
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

15-
#include "paddle/operators/increment_op.h"
15+
#include "paddle/framework/op_registry.h"
1616

1717
namespace paddle {
1818
namespace operators {
1919

20-
class IncrementOp : public framework::OperatorWithKernel {
20+
class IncrementInferShape : public framework::InferShapeBase {
2121
public:
22-
using framework::OperatorWithKernel::OperatorWithKernel;
23-
24-
void InferShape(framework::InferShapeContext *ctx) const override {
22+
void operator()(framework::InferShapeContext *ctx) const override {
2523
PADDLE_ENFORCE(ctx->HasInput("X"),
2624
"Input(X) of IncrementOp should not be null.");
2725
PADDLE_ENFORCE(ctx->HasOutput("Out"),
2826
"Output(Out) of IncrementOp should not be null.");
27+
PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X")));
2928
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
30-
ctx->ShareLoD("X", /*->*/ "Out");
29+
}
30+
};
31+
32+
struct IncrementFunctor {
33+
IncrementFunctor(const framework::LoDTensor &x, framework::LoDTensor *out,
34+
float value)
35+
: x_(x), out_(out), value_(value) {}
36+
37+
template <typename T>
38+
void operator()() const {
39+
*out_->data<T>() = *x_.data<T>() + static_cast<T>(value_);
40+
}
41+
42+
const framework::LoDTensor &x_;
43+
framework::LoDTensor *out_;
44+
float value_;
45+
};
46+
47+
class IncrementOp : public framework::OperatorBase {
48+
public:
49+
IncrementOp(const std::string &type, const framework::VariableNameMap &inputs,
50+
const framework::VariableNameMap &outputs,
51+
const framework::AttributeMap &attrs)
52+
: OperatorBase(type, inputs, outputs, attrs) {}
53+
54+
void Run(const framework::Scope &scope,
55+
const platform::DeviceContext &dev_ctx) const override {
56+
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>();
57+
auto &out =
58+
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
59+
60+
PADDLE_ENFORCE(platform::is_cpu_place(x.place()));
61+
out.Resize(x.dims());
62+
out.mutable_data(x.place(), x.type());
63+
float value = Attr<float>("step");
64+
framework::VisitDataType(framework::ToDataType(out.type()),
65+
IncrementFunctor(x, &out, value));
3166
}
3267
};
3368

@@ -59,10 +94,10 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker {
5994

6095
std::unique_ptr<framework::OpDescBind> Apply() const override {
6196
auto *grad_op = new framework::OpDescBind();
62-
grad_op->SetType("scale");
63-
grad_op->SetInput("X", OutputGrad("Out"));
64-
grad_op->SetOutput("Out", InputGrad("X"));
65-
grad_op->SetAttr("scale", 1.0f);
97+
grad_op->SetType("increment");
98+
grad_op->SetInput("X", Output("Out"));
99+
grad_op->SetOutput("Out", Input("X"));
100+
grad_op->SetAttr("step", -boost::get<float>(GetAttr("step")));
66101
return std::unique_ptr<framework::OpDescBind>(grad_op);
67102
}
68103
};
@@ -71,11 +106,5 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker {
71106
} // namespace paddle
72107

73108
namespace ops = paddle::operators;
74-
75-
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker,
76-
ops::IncrementGradOpMaker);
77-
REGISTER_OP_CPU_KERNEL(
78-
increment, ops::IncrementKernel<paddle::platform::CPUPlace, float>,
79-
ops::IncrementKernel<paddle::platform::CPUPlace, double>,
80-
ops::IncrementKernel<paddle::platform::CPUPlace, int>,
81-
ops::IncrementKernel<paddle::platform::CPUPlace, int64_t>);
109+
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementInferShape,
110+
ops::IncrementOpMaker, ops::IncrementGradOpMaker);

paddle/operators/increment_op.cu

Lines changed: 0 additions & 22 deletions
This file was deleted.

paddle/operators/increment_op.h

Lines changed: 0 additions & 40 deletions
This file was deleted.

python/paddle/v2/framework/layers.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -872,7 +872,7 @@ def array_to_lod_tensor(x, table, main_program=None):
872872

873873

874874
def fill_constant(shape, dtype, value, main_program=None):
875-
helper = LayerHelper("ones", **locals())
875+
helper = LayerHelper("fill_constant", **locals())
876876
out = helper.create_tmp_variable(dtype=dtype)
877877
helper.append_op(
878878
type='fill_constant',
@@ -895,9 +895,12 @@ def zeros(shape, dtype, main_program=None):
895895
return fill_constant(value=0.0, **locals())
896896

897897

898-
def increment(x, value=1.0, main_program=None):
898+
def increment(x, value=1.0, in_place=True, main_program=None):
899899
helper = LayerHelper("increment", **locals())
900-
out = helper.create_tmp_variable(dtype=x.data_type)
900+
if in_place:
901+
out = x
902+
else:
903+
out = helper.create_tmp_variable(dtype=x.data_type)
901904
helper.append_op(
902905
type='increment',
903906
inputs={'X': [x]},

python/paddle/v2/framework/tests/test_array_read_write_op.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,21 +20,19 @@ def test_read_write(self):
2020
each_x.stop_gradient = False
2121

2222
i = layers.zeros(shape=[1], dtype='int64')
23+
i.stop_gradient = False
2324
arr = layers.array_write(x=x[0], i=i)
2425
i = layers.increment(x=i)
25-
i.stop_gradient = True
2626
arr = layers.array_write(x=x[1], i=i, array=arr)
2727
i = layers.increment(x=i)
28-
i.stop_gradient = True
2928
arr = layers.array_write(x=x[2], i=i, array=arr)
3029

3130
i = layers.zeros(shape=[1], dtype='int64')
31+
i.stop_gradient = False
3232
a0 = layers.array_read(array=arr, i=i)
3333
i = layers.increment(x=i)
34-
i.stop_gradient = True # index should not calculate gradient
3534
a1 = layers.array_read(array=arr, i=i)
3635
i = layers.increment(x=i)
37-
i.stop_gradient = True
3836
a2 = layers.array_read(array=arr, i=i)
3937

4038
mean_a0 = layers.mean(x=a0)

python/paddle/v2/framework/tests/test_increment_op.py

Lines changed: 0 additions & 41 deletions
This file was deleted.

0 commit comments

Comments
 (0)