Skip to content

Commit 497f59c

Browse files
Fix reduction over dim list for empty input
Differential Revision: D81383049 Pull Request resolved: #13833
1 parent e289f6c commit 497f59c

File tree

8 files changed

+126
-9
lines changed

8 files changed

+126
-9
lines changed

kernels/portable/cpu/op_any.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ Tensor& any_dims_out(
105105
in, dim_list, out, [&](const auto begin, const auto end) {
106106
for (const auto out_ix : c10::irange(begin, end)) {
107107
bool any = false;
108-
if (in_not_empty) {
108+
if (plan.has_value()) {
109109
any = plan->execute<CTYPE_IN, bool>(
110110
[](CTYPE_IN v) { return static_cast<bool>(v); },
111111
[](bool outv, bool acc) { return acc || outv; },

kernels/portable/cpu/op_mean.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ Tensor& mean_dim_out(
4545
InvalidArgument,
4646
out);
4747

48-
MapReduceOverDimListPlan plan(in, dim_list);
48+
std::optional<MapReduceOverDimListPlan> plan;
49+
if (in.numel() > 0) {
50+
plan.emplace(in, dim_list);
51+
}
4952
// @lint-ignore CLANGTIDY facebook-hte-CArray
5053
static constexpr const char op_name[] = "mean.out";
5154
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
@@ -56,8 +59,8 @@ Tensor& mean_dim_out(
5659
in, dim_list, out, [&](const auto begin, const auto end) {
5760
for (const auto out_ix : c10::irange(begin, end)) {
5861
CTYPE_OUT sum = 0;
59-
if (in.numel() > 0) {
60-
sum = plan.execute<CTYPE_IN, CTYPE_OUT>(
62+
if (plan.has_value()) {
63+
sum = plan->execute<CTYPE_IN, CTYPE_OUT>(
6164
[](CTYPE_IN v) { return static_cast<CTYPE_OUT>(v); },
6265
[](CTYPE_OUT outv, CTYPE_OUT acc) { return acc + outv; },
6366
out_ix);

kernels/portable/cpu/op_var.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ void compute_variance(
3232
for (const auto out_ix : c10::irange(out.numel())) {
3333
out_data[out_ix] = NAN;
3434
}
35-
} else {
35+
} else if (in.numel() > 0) {
3636
MapReduceOverDimListPlan plan(in, dim_list);
3737
const bool success = parallel_for_each_reduce_over_dim_list_output_index(
3838
in, dim_list, out, [&](const auto begin, const auto end) {

kernels/portable/cpu/util/reduce_util.h

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -543,6 +543,9 @@ class MapReduceOverDimListPlan {
543543
const MapOp& map_fun,
544544
const ReduceOp& reduce_fun,
545545
const size_t out_ix) const {
546+
ET_CHECK_MSG(
547+
plan_.get_input_tensor().numel() > 0, "Input tensor must be nonempty");
548+
546549
const size_t init_index =
547550
get_init_index(plan_.get_input_tensor(), plan_.get_dim_list(), out_ix);
548551

@@ -834,10 +837,12 @@ template <typename Func>
834837
const Func& func) {
835838
#ifdef ET_USE_THREADPOOL
836839
const ssize_t reduction_size = get_reduced_dim_product(in, dim_list);
837-
const auto grain_size = std::max(
838-
static_cast<ssize_t>(1),
839-
static_cast<ssize_t>(executorch::extension::internal::GRAIN_SIZE) /
840-
reduction_size);
840+
const auto grain_size = reduction_size == 0
841+
? 1
842+
: std::max(
843+
static_cast<ssize_t>(1),
844+
static_cast<ssize_t>(executorch::extension::internal::GRAIN_SIZE) /
845+
reduction_size);
841846
#else // ET_USE_THREADPOOL
842847
const auto grain_size = 1;
843848
#endif // ET_USE_THREADPOOL

kernels/test/op_any_test.cpp

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,3 +148,31 @@ TEST_F(OpAnyOutTest, SmokeTest) {
148148
op_any_out(self, dim, keepdim, out);
149149
EXPECT_TENSOR_CLOSE(out, out_expected);
150150
}
151+
152+
TEST_F(OpAnyOutTest, EmptyInput) {
153+
TensorFactory<ScalarType::Float> tf;
154+
TensorFactory<ScalarType::Bool> tfBool;
155+
156+
Tensor x = tf.make({2, 0, 3}, {});
157+
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
158+
Tensor out = tfBool.make({2, 0, 3}, {});
159+
160+
op_any_dims_out(x, dim_list, /*keepdim=*/true, out);
161+
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 0, 3}));
162+
163+
out = tfBool.ones({2, 0, 3});
164+
op_any_dims_out(x, dim_list, /*keepdim=*/false, out);
165+
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 0, 3}));
166+
167+
int64_t dims1[1] = {1};
168+
dim_list = ArrayRef<int64_t>{dims1, 1};
169+
out = tfBool.ones({2, 3});
170+
op_any_dims_out(x, dim_list, /*keepdim=*/false, out);
171+
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 3}));
172+
173+
int64_t dims2[1] = {2};
174+
dim_list = ArrayRef<int64_t>{dims2, 1};
175+
out = tfBool.make({2, 0, 1}, {});
176+
op_any_dims_out(x, dim_list, /*keepdim=*/true, out);
177+
EXPECT_TENSOR_CLOSE(out, tfBool.make({2, 0, 1}, {}));
178+
}

kernels/test/op_mean_test.cpp

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -551,3 +551,30 @@ TEST_F(OpMeanOutTest, DTypeOutFloatNAN) {
551551
Tensor ret = op_mean_dtype_out(x, ScalarType::Float, out);
552552
EXPECT_TENSOR_CLOSE(out, expected_result);
553553
}
554+
555+
TEST_F(OpMeanOutTest, EmptyInput) {
556+
TensorFactory<ScalarType::Float> tf;
557+
558+
Tensor x = tf.make({2, 0, 3}, {});
559+
optional<ScalarType> dtype = ScalarType::Float;
560+
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
561+
Tensor out = tf.zeros({1, 1, 1});
562+
op_mean_out(x, dim_list, /*keepdim=*/true, dtype, out);
563+
EXPECT_TENSOR_CLOSE(out, tf.make({1, 1, 1}, {NAN}));
564+
565+
out = tf.zeros({});
566+
op_mean_out(x, dim_list, /*keepdim=*/false, dtype, out);
567+
EXPECT_TENSOR_CLOSE(out, tf.make({}, {NAN}));
568+
569+
int64_t dims1[1] = {1};
570+
dim_list = ArrayRef<int64_t>{dims1, 1};
571+
out = tf.zeros({2, 3});
572+
op_mean_out(x, dim_list, /*keepdim=*/false, dtype, out);
573+
EXPECT_TENSOR_CLOSE(out, tf.make({2, 3}, {NAN, NAN, NAN, NAN, NAN, NAN}));
574+
575+
int64_t dims2[1] = {2};
576+
dim_list = ArrayRef<int64_t>{dims2, 1};
577+
out = tf.make({2, 0, 1}, {});
578+
op_mean_out(x, dim_list, /*keepdim=*/true, dtype, out);
579+
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
580+
}

kernels/test/op_sum_test.cpp

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -490,3 +490,30 @@ TEST_F(OpSumOutTest, InfinityAndNANTest) {
490490
}));
491491
// clang-format on
492492
}
493+
494+
TEST_F(OpSumOutTest, EmptyInput) {
495+
TensorFactory<ScalarType::Float> tf;
496+
497+
Tensor x = tf.make({2, 0, 3}, {});
498+
optional<ScalarType> dtype = ScalarType::Float;
499+
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
500+
Tensor out = tf.ones({1, 1, 1});
501+
op_sum_intlist_out(x, dim_list, /*keepdim=*/true, dtype, out);
502+
EXPECT_TENSOR_CLOSE(out, tf.zeros({1, 1, 1}));
503+
504+
out = tf.ones({});
505+
op_sum_intlist_out(x, dim_list, /*keepdim=*/false, dtype, out);
506+
EXPECT_TENSOR_CLOSE(out, tf.zeros({}));
507+
508+
int64_t dims1[1] = {1};
509+
dim_list = ArrayRef<int64_t>{dims1, 1};
510+
out = tf.ones({2, 3});
511+
op_sum_intlist_out(x, dim_list, /*keepdim=*/false, dtype, out);
512+
EXPECT_TENSOR_CLOSE(out, tf.zeros({2, 3}));
513+
514+
int64_t dims2[1] = {2};
515+
dim_list = ArrayRef<int64_t>{dims2, 1};
516+
out = tf.make({2, 0, 1}, {});
517+
op_sum_intlist_out(x, dim_list, /*keepdim=*/true, dtype, out);
518+
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
519+
}

kernels/test/op_var_test.cpp

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -468,3 +468,30 @@ TEST_F(OpVarCorrectionOutTest, SmokeTest) {
468468
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
469469
#undef TEST_ENTRY
470470
}
471+
472+
TEST_F(OpVarOutTest, EmptyInput) {
473+
TensorFactory<ScalarType::Float> tf;
474+
475+
Tensor x = tf.make({2, 0, 3}, {});
476+
bool unbiased = true;
477+
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
478+
Tensor out = tf.zeros({1, 1, 1});
479+
op_var_out(x, dim_list, unbiased, /*keepdim=*/true, out);
480+
EXPECT_TENSOR_CLOSE(out, tf.make({1, 1, 1}, {NAN}));
481+
482+
out = tf.zeros({});
483+
op_var_out(x, dim_list, unbiased, /*keepdim=*/false, out);
484+
EXPECT_TENSOR_CLOSE(out, tf.make({}, {NAN}));
485+
486+
int64_t dims1[1] = {1};
487+
dim_list = ArrayRef<int64_t>{dims1, 1};
488+
out = tf.zeros({2, 3});
489+
op_var_out(x, dim_list, unbiased, /*keepdim=*/false, out);
490+
EXPECT_TENSOR_CLOSE(out, tf.make({2, 3}, {NAN, NAN, NAN, NAN, NAN, NAN}));
491+
492+
int64_t dims2[1] = {2};
493+
dim_list = ArrayRef<int64_t>{dims2, 1};
494+
out = tf.make({2, 0, 1}, {});
495+
op_var_out(x, dim_list, unbiased, /*keepdim=*/true, out);
496+
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
497+
}

0 commit comments

Comments
 (0)