Skip to content

Commit 22fd52c

Browse files
authored
[0-size Tensor No.41、125] Add 0-size Tensor support for cumsum (PaddlePaddle#72799)
1 parent e7ab115 commit 22fd52c

File tree

6 files changed

+85
-0
lines changed

6 files changed

+85
-0
lines changed

paddle/phi/kernels/cpu/cum_kernel.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,10 @@ void ScanKernel(const Context& dev_ctx,
5757
bool reverse,
5858
Reducer reducer,
5959
DenseTensor* out) {
60+
if (out && out->numel() == 0) {
61+
dev_ctx.template Alloc<T>(out);
62+
return;
63+
}
6064
dev_ctx.template Alloc<T>(out);
6165

6266
if (x.numel() == 1) {

paddle/phi/kernels/gpu/cum_kernel.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,10 @@ void ScanKernel(const Context& dev_ctx,
274274
bool reverse,
275275
Op op,
276276
DenseTensor* out) {
277+
if (out && out->numel() == 0) {
278+
dev_ctx.template Alloc<T>(out);
279+
return;
280+
}
277281
T* out_data = dev_ctx.template Alloc<T>(out);
278282

279283
// For 0D Tensor

paddle/phi/kernels/impl/logcumsumexp_grad_impl.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,10 @@ void LogcumsumexpGradKernel(const Context& dev_ctx,
5050
bool exclusive,
5151
bool reverse,
5252
DenseTensor* d_x) {
53+
if (d_x && d_x->numel() == 0) {
54+
dev_ctx.template Alloc<T>(d_x);
55+
return;
56+
}
5357
reverse = !reverse;
5458
dev_ctx.template Alloc<T>(d_x);
5559

paddle/phi/kernels/xpu/cum_kernel.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@ void CumsumKernel(const Context& dev_ctx,
2828
bool reverse,
2929
DenseTensor* out) {
3030
using XPUType = typename XPUTypeTrait<T>::Type;
31+
if (out && out->numel() == 0) {
32+
dev_ctx.template Alloc<T>(out);
33+
return;
34+
}
3135
dev_ctx.template Alloc<T>(out);
3236

3337
if (x.numel() == 1) {

test/legacy_test/test_cumsum_op.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -747,5 +747,39 @@ def test_fp16(self):
747747
paddle.disable_static()
748748

749749

750+
def create_test_class(op_type, dtype, shape, axis):
751+
class Cls(unittest.TestCase):
752+
def test_zero_size(self):
753+
paddle.disable_static()
754+
numpy_tensor_1 = np.random.rand(*shape).astype(dtype)
755+
paddle_x = paddle.to_tensor(numpy_tensor_1)
756+
paddle_x.stop_gradient = False
757+
758+
paddle_api = eval(f"paddle.{op_type}")
759+
paddle_out = paddle_api(paddle_x, axis=axis)
760+
numpy_api = eval(f"np.{op_type}")
761+
numpy_out = numpy_api(numpy_tensor_1, axis=axis)
762+
763+
np.testing.assert_allclose(
764+
paddle_out.numpy(),
765+
numpy_out,
766+
1e-2,
767+
1e-2,
768+
)
769+
np.testing.assert_allclose(
770+
paddle_out.shape,
771+
numpy_out.shape,
772+
)
773+
774+
cls_name = f"{op_type}{dtype}_0SizeTest"
775+
Cls.__name__ = cls_name
776+
globals()[cls_name] = Cls
777+
778+
779+
create_test_class("cumsum", "float32", [3, 4, 0], 0)
780+
create_test_class("cumsum", "float64", [3, 4, 0, 3, 4], -2)
781+
create_test_class("cumsum", "int32", [3, 4, 0], 0)
782+
create_test_class("cumsum", "int64", [3, 4, 0, 3, 4], -1)
783+
750784
if __name__ == '__main__':
751785
unittest.main()

test/legacy_test/test_logcumsumexp_op.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,5 +375,40 @@ def test_check_grad(self):
375375
)
376376

377377

378+
def create_test_class(op_type, dtype, shape, axis):
379+
class Cls(unittest.TestCase):
380+
def test_zero_size(self):
381+
paddle.disable_static()
382+
numpy_tensor_1 = np.random.rand(*shape).astype(dtype)
383+
paddle_x = paddle.to_tensor(numpy_tensor_1)
384+
paddle_x.stop_gradient = False
385+
386+
paddle_api = eval(f"paddle.{op_type}")
387+
paddle_out = paddle_api(paddle_x, axis=axis)
388+
numpy_out = np.log(
389+
np.cumsum(np.exp(numpy_tensor_1), axis=axis)
390+
) # Numpy does not have logcumsumexp
391+
392+
np.testing.assert_allclose(
393+
paddle_out.numpy(),
394+
numpy_out,
395+
1e-2,
396+
1e-2,
397+
)
398+
np.testing.assert_allclose(
399+
paddle_out.shape,
400+
numpy_out.shape,
401+
)
402+
403+
cls_name = f"{op_type}{dtype}_0SizeTest"
404+
Cls.__name__ = cls_name
405+
globals()[cls_name] = Cls
406+
407+
408+
create_test_class("logcumsumexp", "float32", [3, 4, 0], 0)
409+
create_test_class("logcumsumexp", "float64", [3, 4, 0, 3, 4], -2)
410+
create_test_class("logcumsumexp", "int32", [3, 4, 0], 0)
411+
create_test_class("logcumsumexp", "int64", [3, 4, 0, 3, 4], -1)
412+
378413
if __name__ == '__main__':
379414
unittest.main()

0 commit comments

Comments
 (0)