Skip to content

Commit 42e61af

Browse files
committed
polish
test=develop
1 parent 4d9feb3 commit 42e61af

File tree

3 files changed

+10
-8
lines changed

3 files changed

+10
-8
lines changed

paddle/fluid/imperative/layer.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,8 +211,8 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
211211

212212
std::vector<framework::VariableValueMap> grad_outputs;
213213
if (backward_id_ > 0) {
214-
grad_outputs.resize(1);
215214
VLOG(3) << "py_layer_grad";
215+
grad_outputs.resize(1);
216216
grad_outputs[0][framework::GradVarName(PyLayer::kFwdOut)] =
217217
PyLayer::ApplyGrad(
218218
backward_id_,

paddle/fluid/imperative/layer.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,10 @@ class OpBase {
199199
// For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_.
200200
framework::OpDesc* op_desc_;
201201
int forward_id_;
202+
202203
// When has backward, one of `grad_op_descs_` or `backward_id_` is set,
203204
// not both.
205+
// Note: each fwd op corresponds to a vector of bwd ops.
204206
std::vector<framework::OpDesc*> grad_op_descs_;
205207
int backward_id_;
206208

@@ -211,8 +213,11 @@ class OpBase {
211213
OpBasePtrMap pre_ops_;
212214
std::map<std::string, std::vector<int>> pre_ops_out_idx_;
213215

216+
// Inputs to a vector of bwd ops.
214217
std::vector<framework::VariableValueMap> grad_input_vars_;
218+
// Outputs to a vector of bwd ops.
215219
std::vector<framework::VariableValueMap> grad_output_vars_;
220+
216221
framework::BlockDesc* block_;
217222
};
218223

python/paddle/fluid/tests/unittests/test_imperative.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,19 +68,16 @@ def forward(self, inputs):
6868

6969
class TestImperative(unittest.TestCase):
7070
def test_sum_op(self):
71+
x = np.ones([2, 2], np.float32)
7172
with fluid.imperative.guard():
7273
inputs = []
7374
for _ in range(10):
74-
inputs.append(
75-
fluid.imperative.base.to_variable(
76-
np.ones([2, 2], np.float32)))
77-
sys.stderr.write('%s\n' % inputs[0].dtype)
75+
inputs.append(fluid.imperative.base.to_variable(x))
7876
ret = fluid.layers.sums(inputs)
79-
sys.stderr.write('%s\n' % ret.dtype)
8077
loss = fluid.layers.reduce_sum(ret)
81-
sys.stderr.write('%s\n' % loss.dtype)
8278
loss._backward()
83-
sys.stderr.write('%s %s\n' % (ret._numpy(), inputs[0]._gradient()))
79+
self.assertTrue(np.allclose(ret._numpy(), x * 10))
80+
self.assertTrue(np.allclose(inputs[0]._gradient(), x))
8481

8582
def test_layer(self):
8683
with fluid.imperative.guard():

0 commit comments

Comments
 (0)