Skip to content

Commit 9863408

Browse files
authored
Revert "[PHI]fix 0 size error (#71485)" (#71568) (#71576)
This reverts commit e134de7.
1 parent 9b805c4 commit 9863408

File tree

6 files changed

+23
-51
lines changed

6 files changed

+23
-51
lines changed

paddle/phi/infermeta/binary.cc

Lines changed: 18 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2434,19 +2434,16 @@ void IndexSelectInferMeta(const MetaTensor& x,
24342434
"the dimension of Input(Index) is [%d].",
24352435
index_dim,
24362436
index_dim.size()));
2437-
if (dim < 0) {
2438-
dim += input_dim.size();
2439-
}
24402437

2441-
if (input_dim[dim] != 0) {
2442-
PADDLE_ENFORCE_EQ(index_dim[0] != 0,
2443-
true,
2444-
common::errors::InvalidArgument(
2445-
"The length of Input(Index) can't be 0."));
2446-
}
2438+
PADDLE_ENFORCE_EQ(index_dim[0] != 0,
2439+
true,
2440+
common::errors::InvalidArgument(
2441+
"The length of Input(Index) can't be 0."));
24472442

24482443
auto output_dim = common::vectorize(input_dim);
2449-
2444+
if (dim < 0) {
2445+
dim += input_dim.size();
2446+
}
24502447
output_dim[dim] = index_dim[0];
24512448
output->set_dims(common::make_ddim(output_dim));
24522449
output->set_dtype(x.dtype());
@@ -3671,23 +3668,18 @@ void RepeatInterleaveWithTensorIndexInferMeta(const MetaTensor& x,
36713668
repeats_dim,
36723669
repeats_dim.size()));
36733670

3674-
if (input_dim.size() == 1 && input_dim[0] == 0) {
3675-
output_dim[0] = 0;
3676-
} else {
3677-
PADDLE_ENFORCE_EQ(repeats_dim[0] != 0,
3678-
true,
3679-
common::errors::InvalidArgument(
3680-
"The length of Input(RepeatsTensor) can't be 0."));
3681-
PADDLE_ENFORCE_NE(
3682-
out,
3683-
nullptr,
3684-
common::errors::InvalidArgument(
3685-
"repeat_interleave's output tensor can't be nullptr"));
3686-
if (dim < 0) {
3687-
dim += input_dim.size();
3688-
}
3689-
output_dim[dim] = -1;
3671+
PADDLE_ENFORCE_EQ(repeats_dim[0] != 0,
3672+
true,
3673+
common::errors::InvalidArgument(
3674+
"The length of Input(RepeatsTensor) can't be 0."));
3675+
PADDLE_ENFORCE_NE(out,
3676+
nullptr,
3677+
common::errors::InvalidArgument(
3678+
"repeat_interleave's output tensor can't be nullptr"));
3679+
if (dim < 0) {
3680+
dim += input_dim.size();
36903681
}
3682+
output_dim[dim] = -1;
36913683

36923684
out->set_dims(common::make_ddim(output_dim));
36933685
out->share_lod(x);

paddle/phi/kernels/gpu/concat_kernel.cu

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,6 @@ void ConcatKernel(const Context& dev_ctx,
4545
out->Resize(out_dims);
4646
dev_ctx.template Alloc<T>(out);
4747

48-
if (out->numel() == 0) {
49-
return;
50-
}
51-
5248
// If axis is 0, the lod of the output is not the same as inputs.
5349
if (axis == 0 && x[0]->lod().size() > 0) {
5450
size_t lod_size_0 = x[0]->lod().size();

paddle/phi/kernels/gpu/masked_select_kernel.cu

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,6 @@ void MaskedSelectKernel(const Context& dev_ctx,
5353
DenseTensor mask_expand;
5454
DenseTensor x_expand;
5555

56-
if (x.numel() == 0 || mask.numel() == 0) {
57-
out->Resize({0});
58-
dev_ctx.template Alloc<T>(out);
59-
60-
return;
61-
}
62-
6356
auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
6457
common::vectorize(x.dims()), common::vectorize(mask.dims()));
6558

paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,10 +120,6 @@ void RepeatInterleaveWithTensorIndexKernel(const Context& ctx,
120120
const DenseTensor& repeats_tensor,
121121
int dim,
122122
DenseTensor* out) {
123-
if (x.numel() == 0) {
124-
ctx.template Alloc<T>(out);
125-
return;
126-
}
127123
auto place = ctx.GetPlace();
128124
auto cpu_place = phi::CPUPlace();
129125

paddle/phi/kernels/kps/reduce_kernel.cu

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -249,16 +249,9 @@ void SumRawKernel(const Context& dev_ctx,
249249
}
250250
}
251251
out->Resize(phi::make_ddim(out_dims));
252-
if (x.dtype() == phi::DataType::BOOL || x.dtype() == phi::DataType::INT32) {
253-
dev_ctx.template Alloc<int64_t>(out);
254-
FullKernel<int64_t, Context>(
255-
dev_ctx, out_dims, 0, phi::CppTypeToDataType<int64_t>::Type(), out);
256-
} else {
257-
dev_ctx.template Alloc<T>(out);
258-
FullKernel<T, Context>(
259-
dev_ctx, out_dims, 0, phi::CppTypeToDataType<T>::Type(), out);
260-
}
261-
252+
dev_ctx.template Alloc<T>(out);
253+
FullKernel<T, Context>(
254+
dev_ctx, out_dims, 0, phi::CppTypeToDataType<T>::Type(), out);
262255
return;
263256
}
264257
if (x.numel() > std::numeric_limits<int32_t>::max()) {

python/paddle/tensor/manipulation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1409,6 +1409,8 @@ def concat(
14091409
if in_dynamic_mode():
14101410
if isinstance(axis, Variable):
14111411
axis = axis.item(0)
1412+
if not isinstance(input, (Variable, paddle.pir.Value)):
1413+
input = [t for t in input if t.shape.count(0) == 0]
14121414
return _C_ops.concat(input, axis)
14131415
elif in_pir_mode():
14141416

0 commit comments

Comments
 (0)