Skip to content

Commit 8be08b9

Browse files
Msvc ops changes (pytorch#15226)
### Summary MSVC really doesn't like the constexpr variables that I've removed in this PR. It was complaining about them not actually being static at compile time. Searching around suggests that its the act of putting them in a variable that breaks it and if I just place the value directly in all the used locations it should be fine. I tried that and it works. The other changes are fixing up some compiler flags, adding a missing include, fixing an interaction between lambdas the ternary operator and auto, and removing unneeded statement expressions ### Test plan current ci to verify no regression on linux/mac. Manual testing of msvc will add ci soon
1 parent 7ce673a commit 8be08b9

28 files changed

+73
-62
lines changed

kernels/optimized/CMakeLists.txt

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,10 @@ if(NOT EXECUTORCH_ROOT)
2121
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
2222
endif()
2323

24-
set(_common_compile_options -Wno-deprecated-declarations)
24+
set(_common_compile_options
25+
$<$<CXX_COMPILER_ID:MSVC>:/wd4996>
26+
$<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wno-deprecated-declarations>
27+
)
2528

2629
# Note for apple platform we can rely on Accelerate framework Will come back to
2730
# this

kernels/optimized/cpu/op_bmm.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,8 @@ Tensor& opt_bmm_out(
150150
ET_KERNEL_CHECK(
151151
ctx, check_bmm_out_args(self, mat2, out), InvalidArgument, out);
152152

153-
constexpr auto name = "bmm.out";
153+
static constexpr auto name = "bmm.out";
154+
154155
auto self_type = self.scalar_type();
155156

156157
if (executorch::runtime::isComplexType(self_type)) {

kernels/portable/cpu/op_convolution_backward.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> convolution_backward_out(
305305
ret_val);
306306
}
307307

308-
constexpr auto name = "convolution_backward.out";
308+
static constexpr auto name = "convolution_backward.out";
309309

310310
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
311311
conv2d_backward_impl<CTYPE>(

kernels/portable/cpu/op_gather.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ Tensor& gather_out(
8585
InvalidArgument,
8686
out);
8787

88-
constexpr auto name = "gather.out";
88+
static constexpr auto name = "gather.out";
8989

9090
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
9191
gather_helper<CTYPE>(in, index, out, dim);

kernels/portable/cpu/op_masked_scatter.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,13 @@ Tensor& masked_scatter_out(
4141
InvalidArgument,
4242
out);
4343

44-
constexpr auto op_name = "masked_scatter.out";
45-
4644
int64_t idx = 0;
4745
int64_t src_numel = src.numel();
4846
bool src_numel_check = true;
4947

50-
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE, [&]() {
48+
static constexpr auto name = "masked_scatter.out";
49+
50+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE, [&]() {
5151
const CTYPE* const src_data = src.const_data_ptr<CTYPE>();
5252
apply_binary_elementwise_fn<CTYPE, bool, CTYPE>(
5353
[src_data, &idx, &src_numel, &src_numel_check](

kernels/portable/cpu/op_max.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "max.unary_out";
127+
static constexpr auto name = "max.unary_out";
128128

129129
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130130
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {

kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ Tensor& max_pool2d_with_indices_backward_out(
169169
InvalidArgument,
170170
grad_input);
171171

172-
constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
172+
static constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
173173

174174
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
175175
max_pool_backward_impl<CTYPE, false>(grad_input, grad_output, indices);

kernels/portable/cpu/op_min.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "min.unary_out";
127+
static constexpr auto name = "min.unary_out";
128128

129129
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130130
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {

kernels/portable/cpu/op_native_batch_norm.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_training_out(
102102
size_t outer = getLeadingDims(in, C_dim);
103103
size_t inner = getTrailingDims(in, C_dim);
104104

105-
constexpr auto name = "native_batch_norm_legit_no_training.out";
105+
static constexpr auto name = "native_batch_norm_legit_no_training.out";
106106

107107
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
108108
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
@@ -259,7 +259,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_stats_out(
259259
InvalidArgument,
260260
ret_val);
261261

262-
constexpr auto name = "_native_batch_norm_legit.no_stats_out";
262+
static constexpr auto name = "_native_batch_norm_legit.no_stats_out";
263263

264264
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
265265
const CTYPE* in_data = in.const_data_ptr<CTYPE>();

kernels/portable/cpu/op_native_group_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_group_norm_out(
190190
ret_val);
191191
}
192192

193-
constexpr auto name = "native_group_norm.out";
193+
static constexpr auto name = "native_group_norm.out";
194194

195195
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
196196
group_norm<CTYPE>(

0 commit comments

Comments
 (0)