Skip to content

Commit a48dbfc

Browse files
BujSetagrima1304
authored andcommitted
Refactoring Portable Operators to Standardize op_name Format (pytorch#12941)
### Summary Minor refactor to standardize how operator names are printed when debugging. Especially useful when hitting a missing operator error.
1 parent 6627cbc commit a48dbfc

21 files changed

+153
-105
lines changed

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -54,19 +54,15 @@ Tensor& _to_dim_order_copy_out(
5454
return out;
5555
}
5656

57-
ET_SWITCH_REALHBBF16_TYPES(
58-
self.scalar_type(),
59-
ctx,
60-
"dim_order_ops::_to_dim_order_copy.out",
61-
CTYPE_IN,
62-
[&] {
63-
ET_SWITCH_REALHBBF16_TYPES(
64-
out.scalar_type(),
65-
ctx,
66-
"dim_order_ops::_to_dim_order_copy.out",
67-
CTYPE_OUT,
68-
[&] { _to_dim_order_copy_impl<CTYPE_IN, CTYPE_OUT>(self, out); });
69-
});
57+
// @lint-ignore CLANGTIDY facebook-hte-CArray
58+
static constexpr const char op_name[] =
59+
"dim_order_ops::_to_dim_order_copy.out";
60+
61+
ET_SWITCH_REALHBBF16_TYPES(self.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
62+
ET_SWITCH_REALHBBF16_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] {
63+
_to_dim_order_copy_impl<CTYPE_IN, CTYPE_OUT>(self, out);
64+
});
65+
});
7066

7167
return out;
7268
}

kernels/portable/cpu/op_abs.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,16 @@ Tensor& abs_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
3737
ET_KERNEL_CHECK(
3838
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
3939

40+
// @lint-ignore CLANGTIDY facebook-hte-CArray
41+
static constexpr const char op_name[] = "abs.out";
42+
4043
if (in_is_complex) {
4144
// NOTE: Elected not to add COMPLEXH to dtype_util.h for now
4245
// because I am not planning wide rollout of complex support; if
4346
// we do add SupportedTensorDtypes::COMPLEXH support, then we
4447
// should use it here.
45-
ET_SWITCH_COMPLEXH_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE_IN, [&] {
46-
ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, "abs.out", CTYPE_OUT, [&] {
48+
ET_SWITCH_COMPLEXH_TYPES(in.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
49+
ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] {
4750
apply_unary_map_fn<CTYPE_IN, CTYPE_OUT>(
4851
[](const CTYPE_IN val_in) -> CTYPE_OUT {
4952
return sqrt(
@@ -55,7 +58,7 @@ Tensor& abs_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
5558
});
5659
});
5760
} else {
58-
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] {
61+
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] {
5962
apply_unary_map_fn(
6063
[](const CTYPE val_in) {
6164
if (val_in < 0) {

kernels/portable/cpu/op_amax.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,11 @@ Tensor& amax_out(
4444
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
4545

4646
ReduceOverDimListPlan plan(in, dim_list);
47-
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
47+
48+
// @lint-ignore CLANGTIDY facebook-hte-CArray
49+
static constexpr const char op_name[] = "amax.out";
50+
51+
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() {
4852
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
4953
const bool success = parallel_for_each_reduce_over_dim_list_output_index(
5054
in, dim_list, out, [&](const auto begin, const auto end) {

kernels/portable/cpu/op_amin.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,11 @@ Tensor& amin_out(
4343
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
4444

4545
ReduceOverDimListPlan plan(in, dim_list);
46-
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amin.out", CTYPE, [&]() {
46+
47+
// @lint-ignore CLANGTIDY facebook-hte-CArray
48+
static constexpr const char op_name[] = "amin.out";
49+
50+
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() {
4751
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
4852
const bool success = parallel_for_each_reduce_over_dim_list_output_index(
4953
in, dim_list, out, [&](const auto begin, const auto end) {

kernels/portable/cpu/op_any.cpp

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,12 @@ Tensor& any_all_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
3030

3131
ScalarType in_type = in.scalar_type();
3232
ScalarType out_type = out.scalar_type();
33-
constexpr auto name = "any.all_out";
3433

35-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
36-
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] {
34+
// @lint-ignore CLANGTIDY facebook-hte-CArray
35+
static constexpr const char op_name[] = "any.all_out";
36+
37+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] {
38+
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] {
3739
const auto data_in = in.const_data_ptr<CTYPE_IN>();
3840
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
3941
data_out[0] = static_cast<CTYPE_OUT>(false);
@@ -79,15 +81,17 @@ Tensor& any_dims_out(
7981

8082
ScalarType in_type = in.scalar_type();
8183
ScalarType out_type = out.scalar_type();
82-
constexpr auto name = "any.dims_out";
84+
85+
// @lint-ignore CLANGTIDY facebook-hte-CArray
86+
static constexpr const char op_name[] = "any.dims_out";
8387

8488
const bool in_not_empty = in.numel() > 0;
8589
std::optional<MapReduceOverDimListPlan> plan;
8690
if ((!dim_list.has_value() || !dim_list.value().empty()) && in_not_empty) {
8791
plan.emplace(in, dim_list);
8892
}
89-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
90-
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] {
93+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] {
94+
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] {
9195
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
9296
if (dim_list.has_value() && dim_list.value().empty()) {
9397
const CTYPE_IN* in_data = in.const_data_ptr<CTYPE_IN>();
@@ -144,10 +148,12 @@ Tensor& any_out(
144148

145149
ScalarType in_type = in.scalar_type();
146150
ScalarType out_type = out.scalar_type();
147-
constexpr auto name = "any.out";
148151

149-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
150-
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] {
152+
// @lint-ignore CLANGTIDY facebook-hte-CArray
153+
static constexpr const char op_name[] = "any.out";
154+
155+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] {
156+
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] {
151157
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
152158
const bool success = parallel_for_each_reduce_over_dim_output_index(
153159
in, dim, out, [&](const auto begin, const auto end) {

kernels/portable/cpu/op_argmax.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ Tensor& argmax_out(
4444
ET_KERNEL_CHECK(
4545
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
4646

47-
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] {
47+
// @lint-ignore CLANGTIDY facebook-hte-CArray
48+
static constexpr const char op_name[] = "argmax.out";
49+
50+
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] {
4851
long* out_data = out.mutable_data_ptr<long>();
4952

5053
const bool success = parallel_for_each_reduce_over_dim_output_index(

kernels/portable/cpu/op_argmin.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ Tensor& argmin_out(
4444
ET_KERNEL_CHECK(
4545
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
4646

47-
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] {
47+
// @lint-ignore CLANGTIDY facebook-hte-CArray
48+
static constexpr const char op_name[] = "argmin.out";
49+
50+
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] {
4851
long* out_data = out.mutable_data_ptr<long>();
4952

5053
const bool success = parallel_for_each_reduce_over_dim_output_index(

kernels/portable/cpu/op_avg_pool2d.cpp

Lines changed: 50 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -67,53 +67,56 @@ Tensor& avg_pool2d_out(
6767
out);
6868

6969
ScalarType in_type = in.scalar_type();
70-
ET_SWITCH_FLOATHBF16_TYPES_AND(
71-
Long, in_type, ctx, "avg_pool2d.out", CTYPE, [&]() {
72-
if (divisor_override.has_value()) {
73-
int64_t divisor = divisor_override.value();
74-
// If divisor_override is specified, then we don't need to use `count`
75-
// in the calculation. Simply sum x / divisor to get the output.
76-
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
77-
[](const CTYPE in_val,
78-
int64_t in_idx,
79-
CTYPE accum,
80-
int64_t accum_idx) {
81-
// Average pooling does not track indexes, so return 0 for
82-
// accum_idx
83-
return std::tuple<CTYPE, int64_t>(in_val + accum, 0);
84-
},
85-
[divisor](const int64_t count, const CTYPE accum) {
86-
return accum / static_cast<CTYPE>(divisor);
87-
},
88-
count_include_pad,
89-
in,
90-
kernel_size,
91-
stride,
92-
padding,
93-
{},
94-
out);
95-
} else {
96-
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
97-
[](const CTYPE in_val,
98-
int64_t in_idx,
99-
CTYPE accum,
100-
int64_t accum_idx) {
101-
// Average pooling does not track indexes, so return 0 for
102-
// accum_idx
103-
return std::tuple<CTYPE, int64_t>(in_val + accum, 0);
104-
},
105-
[](const int64_t count, const CTYPE accum) {
106-
return accum / static_cast<CTYPE>(count);
107-
},
108-
count_include_pad,
109-
in,
110-
kernel_size,
111-
stride,
112-
padding,
113-
{},
114-
out);
115-
}
116-
});
70+
71+
// @lint-ignore CLANGTIDY facebook-hte-CArray
72+
static constexpr const char op_name[] = "avg_pool2d.out";
73+
74+
ET_SWITCH_FLOATHBF16_TYPES_AND(Long, in_type, ctx, op_name, CTYPE, [&]() {
75+
if (divisor_override.has_value()) {
76+
int64_t divisor = divisor_override.value();
77+
// If divisor_override is specified, then we don't need to use `count`
78+
// in the calculation. Simply sum x / divisor to get the output.
79+
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
80+
[](const CTYPE in_val,
81+
int64_t in_idx,
82+
CTYPE accum,
83+
int64_t accum_idx) {
84+
// Average pooling does not track indexes, so return 0 for
85+
// accum_idx
86+
return std::tuple<CTYPE, int64_t>(in_val + accum, 0);
87+
},
88+
[divisor](const int64_t count, const CTYPE accum) {
89+
return accum / static_cast<CTYPE>(divisor);
90+
},
91+
count_include_pad,
92+
in,
93+
kernel_size,
94+
stride,
95+
padding,
96+
{},
97+
out);
98+
} else {
99+
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
100+
[](const CTYPE in_val,
101+
int64_t in_idx,
102+
CTYPE accum,
103+
int64_t accum_idx) {
104+
// Average pooling does not track indexes, so return 0 for
105+
// accum_idx
106+
return std::tuple<CTYPE, int64_t>(in_val + accum, 0);
107+
},
108+
[](const int64_t count, const CTYPE accum) {
109+
return accum / static_cast<CTYPE>(count);
110+
},
111+
count_include_pad,
112+
in,
113+
kernel_size,
114+
stride,
115+
padding,
116+
{},
117+
out);
118+
}
119+
});
117120

118121
return out;
119122
}

kernels/portable/cpu/op_bitwise_not.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,16 @@ bitwise_not_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
3737
ET_KERNEL_CHECK(
3838
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
3939

40+
// @lint-ignore CLANGTIDY facebook-hte-CArray
41+
static constexpr const char op_name[] = "bitwise_not.out";
4042
if (in.scalar_type() == executorch::aten::ScalarType::Bool) {
4143
apply_unary_map_fn(
4244
[](const bool val_in) { return !val_in; },
4345
in.const_data_ptr<bool>(),
4446
out.mutable_data_ptr<bool>(),
4547
in.numel());
4648
} else if (isIntegralType(in.scalar_type(), /*includeBool=*/false)) {
47-
ET_SWITCH_INT_TYPES(in.scalar_type(), ctx, "bitwise_not.out", CTYPE, [&] {
49+
ET_SWITCH_INT_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] {
4850
apply_unary_map_fn(
4951
[](const CTYPE val_in) { return ~val_in; },
5052
in.const_data_ptr<CTYPE>(),

kernels/portable/cpu/op_bmm.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,17 @@ Tensor& bmm_out(
3636
InvalidArgument,
3737
out);
3838

39-
constexpr auto name = "bmm.out";
39+
// @lint-ignore CLANGTIDY facebook-hte-CArray
40+
static constexpr const char op_name[] = "bmm.out";
4041

4142
auto in_type = in.scalar_type();
4243

4344
if (executorch::runtime::isComplexType(in_type)) {
44-
ET_SWITCH_COMPLEXH_TYPES(in_type, ctx, name, CTYPE, [&]() {
45+
ET_SWITCH_COMPLEXH_TYPES(in_type, ctx, op_name, CTYPE, [&]() {
4546
internal::bmm_out_impl<CTYPE>(in, mat2, out);
4647
});
4748
} else {
48-
ET_SWITCH_REALH_TYPES(in_type, ctx, name, CTYPE, [&]() {
49+
ET_SWITCH_REALH_TYPES(in_type, ctx, op_name, CTYPE, [&]() {
4950
internal::bmm_out_impl<CTYPE>(in, mat2, out);
5051
});
5152
}

0 commit comments

Comments
 (0)