Skip to content

Commit 6094a72

Browse files
authored
Fix bug in reduce_op caused by PR #13534 (#13748)
* Fix bug in reduce_op caused by PR #13534 * Fix output shape and enhance unit test. test=develop
1 parent 59c306e commit 6094a72

File tree

2 files changed

+89
-1
lines changed

2 files changed

+89
-1
lines changed

paddle/fluid/operators/cub_reduce.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
#include <cub/cub.cuh> // NOLINT
2424
#include "paddle/fluid/framework/tensor.h"
25+
#include "paddle/fluid/framework/tensor_util.h"
2526

2627
namespace paddle {
2728
namespace operators {
@@ -293,7 +294,12 @@ void TensorReduce(const framework::Tensor& x, framework::Tensor* y,
293294
}
294295
auto x_data = x.data<Tx>();
295296
auto y_data = y->mutable_data<Ty>(x.place());
296-
if (reduce_num == 1) return;
297+
if (reduce_num == 1) {
298+
auto out_dims = y->dims();
299+
framework::TensorCopy(x, y->place(), y);
300+
y->Resize(out_dims);
301+
return;
302+
}
297303

298304
#define CUB_BLOCK_DIM_CASE(block_dim) \
299305
case block_dim: { \

python/paddle/fluid/tests/unittests/test_reduce_op.py

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,5 +243,87 @@ def test_check_grad(self):
243243
self.check_grad(['X'], 'Out')
244244

245245

246+
class TestReduceSumWithDimOne(OpTest):
247+
def setUp(self):
248+
self.op_type = "reduce_sum"
249+
self.inputs = {'X': np.random.random((10, 1, 1)).astype("float64")}
250+
self.attrs = {'dim': [1, 2], 'keep_dim': True}
251+
self.outputs = {
252+
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
253+
keepdims=True)
254+
}
255+
256+
def test_check_output(self):
257+
self.check_output()
258+
259+
def test_check_grad(self):
260+
self.check_grad(['X'], 'Out')
261+
262+
263+
class TestReduceSumWithNumelOne(OpTest):
264+
def setUp(self):
265+
self.op_type = "reduce_sum"
266+
self.inputs = {'X': np.random.random((1, 1)).astype("float64")}
267+
self.attrs = {'dim': [1], 'keep_dim': False}
268+
self.outputs = {
269+
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
270+
keepdims=False)
271+
}
272+
273+
def test_check_output(self):
274+
self.check_output()
275+
276+
def test_check_grad(self):
277+
self.check_grad(['X'], 'Out')
278+
279+
280+
class TestReduceMeanWithDimOne(OpTest):
281+
def setUp(self):
282+
self.op_type = "reduce_mean"
283+
self.inputs = {'X': np.random.random((10, 1, 1)).astype("float64")}
284+
self.attrs = {'dim': [1], 'keep_dim': False}
285+
self.outputs = {
286+
'Out': self.inputs['X'].mean(
287+
axis=tuple(self.attrs['dim']), keepdims=False)
288+
}
289+
290+
def test_check_output(self):
291+
self.check_output()
292+
293+
def test_check_grad(self):
294+
self.check_grad(['X'], 'Out')
295+
296+
297+
class TestReduceMeanWithNumelOne(OpTest):
298+
def setUp(self):
299+
self.op_type = "reduce_mean"
300+
self.inputs = {'X': np.random.random((1, 1)).astype("float64")}
301+
self.attrs = {'dim': [1], 'keep_dim': True}
302+
self.outputs = {
303+
'Out': self.inputs['X'].mean(
304+
axis=tuple(self.attrs['dim']), keepdims=True)
305+
}
306+
307+
def test_check_output(self):
308+
self.check_output()
309+
310+
def test_check_grad(self):
311+
self.check_grad(['X'], 'Out')
312+
313+
314+
class TestReduceAll(OpTest):
315+
def setUp(self):
316+
self.op_type = "reduce_sum"
317+
self.inputs = {'X': np.random.random((1, 1, 1)).astype("float64")}
318+
self.attrs = {'reduce_all': True, 'keep_dim': False}
319+
self.outputs = {'Out': self.inputs['X'].sum()}
320+
321+
def test_check_output(self):
322+
self.check_output()
323+
324+
def test_check_grad(self):
325+
self.check_grad(['X'], 'Out')
326+
327+
246328
if __name__ == '__main__':
247329
unittest.main()

0 commit comments

Comments
 (0)