Skip to content

Commit bc71d1c

Browse files
committed
msvc really hates constexpr variables I guess 2
1 parent 7857821 commit bc71d1c

22 files changed

+34
-74
lines changed

kernels/optimized/cpu/op_bmm.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ Tensor& opt_bmm_out(
150150
ET_KERNEL_CHECK(
151151
ctx, check_bmm_out_args(self, mat2, out), InvalidArgument, out);
152152

153+
153154
auto self_type = self.scalar_type();
154155

155156
if (executorch::runtime::isComplexType(self_type)) {

kernels/portable/cpu/op_convolution_backward.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -305,9 +305,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> convolution_backward_out(
305305
ret_val);
306306
}
307307

308-
constexpr auto name = "convolution_backward.out";
309-
310-
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
308+
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, "convolution_backward.out", CTYPE, [&]() {
311309
conv2d_backward_impl<CTYPE>(
312310
grad_output,
313311
input,

kernels/portable/cpu/op_gather.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,7 @@ Tensor& gather_out(
8585
InvalidArgument,
8686
out);
8787

88-
constexpr auto name = "gather.out";
89-
90-
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
88+
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "gather.out", CTYPE, [&]() {
9189
gather_helper<CTYPE>(in, index, out, dim);
9290
});
9391

kernels/portable/cpu/op_max.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,10 +124,8 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "max.unary_out";
128-
129-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130-
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
127+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "max.unary_out", CTYPE_IN, [&] {
128+
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, "max.unary_out", CTYPE_OUT, [&] {
131129
const auto data_in = in.const_data_ptr<CTYPE_IN>();
132130
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
133131
data_out[0] = lower_bound<CTYPE_OUT>();

kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -169,9 +169,7 @@ Tensor& max_pool2d_with_indices_backward_out(
169169
InvalidArgument,
170170
grad_input);
171171

172-
constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
173-
174-
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
172+
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, "max_pool2d_with_indices_backward.grad_input", CTYPE, [&]() {
175173
max_pool_backward_impl<CTYPE, false>(grad_input, grad_output, indices);
176174
});
177175

kernels/portable/cpu/op_min.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,10 +124,8 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "min.unary_out";
128-
129-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130-
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
127+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "min.unary_out", CTYPE_IN, [&] {
128+
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, "min.unary_out", CTYPE_OUT, [&] {
131129
const auto data_in = in.const_data_ptr<CTYPE_IN>();
132130
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
133131
data_out[0] = upper_bound<CTYPE_OUT>();

kernels/portable/cpu/op_native_batch_norm.cpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_training_out(
102102
size_t outer = getLeadingDims(in, C_dim);
103103
size_t inner = getTrailingDims(in, C_dim);
104104

105-
constexpr auto name = "native_batch_norm_legit_no_training.out";
106-
107-
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
105+
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, "native_batch_norm_legit_no_training.out", CTYPE, [&] {
108106
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
109107
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
110108

@@ -259,9 +257,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_stats_out(
259257
InvalidArgument,
260258
ret_val);
261259

262-
constexpr auto name = "_native_batch_norm_legit.no_stats_out";
263-
264-
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
260+
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, "_native_batch_norm_legit.no_stats_out", CTYPE, [&] {
265261
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
266262
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
267263
CTYPE* mean_data = mean_out.mutable_data_ptr<CTYPE>();

kernels/portable/cpu/op_native_group_norm.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -190,9 +190,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_group_norm_out(
190190
ret_val);
191191
}
192192

193-
constexpr auto name = "native_group_norm.out";
194-
195-
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
193+
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, "native_group_norm.out", CTYPE, [&]() {
196194
group_norm<CTYPE>(
197195
input, weight, bias, N, C, HxW, group, eps, out, mean_out, rstd_out);
198196
});

kernels/portable/cpu/op_pdist_forward.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,9 @@ Tensor& _pdist_forward_out(
4040
out);
4141

4242
ScalarType in_type = in.scalar_type();
43-
constexpr auto name = "_pdist_forward.out";
4443

4544
ET_SWITCH_FLOATHBF16_TYPES(
46-
in_type, ctx, name, CTYPE, [&] { pdist<CTYPE>(in, out, p); });
45+
in_type, ctx, "_pdist_forward.out", CTYPE, [&] { pdist<CTYPE>(in, out, p); });
4746

4847
return out;
4948
}

kernels/portable/cpu/op_prod.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,9 @@ Tensor& prod_out(
3232

3333
ScalarType in_type = in.scalar_type();
3434
ScalarType out_type = out.scalar_type();
35-
constexpr auto name = "prod.int_out";
3635

37-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
38-
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
36+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "prod.int_out", CTYPE_IN, [&] {
37+
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, "prod.int_out", CTYPE_OUT, [&] {
3938
const auto data_in = in.const_data_ptr<CTYPE_IN>();
4039
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
4140
data_out[0] = static_cast<CTYPE_OUT>(1);
@@ -72,10 +71,9 @@ Tensor& prod_int_out(
7271

7372
ScalarType in_type = in.scalar_type();
7473
ScalarType out_type = out.scalar_type();
75-
constexpr auto name = "prod.int_out";
7674

77-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
78-
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
75+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "prod.int_out", CTYPE_IN, [&] {
76+
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, "prod.int_out", CTYPE_OUT, [&] {
7977
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
8078
const bool success = parallel_for_each_reduce_over_dim_output_index(
8179
in, dim, out, [&](const auto begin, const auto end) {

0 commit comments

Comments
 (0)