Skip to content

Commit 9bc4b60

Browse files
committed
Fix warning when building kernel libraries
Getting warnings like the following: ``` /home/larryliu/executorch/kernels/portable/cpu/op_maximum.cpp: In lambda function: /home/larryliu/executorch/kernels/portable/cpu/op_maximum.cpp:52:9: note: the ABI for passing parameters with 32-byte alignment has changed in GCC 4.6 52 | [](const auto val_a, const auto val_b) { | ^ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:919:7: note: in definition of macro 'ET_INTERNAL_SWITCH' 919 | __VA_ARGS__ \ | ^~~~~~~~~~~ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:931:3: note: in expansion of macro 'ET_INTERNAL_SWITCH_CASE' 931 | ET_INTERNAL_SWITCH_CASE( \ | ^~~~~~~~~~~~~~~~~~~~~~~ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:957:3: note: in expansion of macro 'ET_INTERNAL_SWITCH_CASE_INT_TYPES' 957 | ET_INTERNAL_SWITCH_CASE_INT_TYPES(CTYPE_ALIAS, __VA_ARGS__) \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:1008:3: note: in expansion of macro 'ET_INTERNAL_SWITCH_CASE_REAL_TYPES' 1008 | ET_INTERNAL_SWITCH_CASE_REAL_TYPES(CTYPE_ALIAS, __VA_ARGS__) \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:1136:7: note: in expansion of macro 'ET_INTERNAL_SWITCH_CASE_REAL_TYPES_AND' 1136 | ET_INTERNAL_SWITCH_CASE_REAL_TYPES_AND( \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /home/larryliu/executorch/../executorch/runtime/core/exec_aten/util/scalar_type_util.h:1172:3: note: in expansion of macro 'ET_SWITCH_REAL_TYPES_AND' 1172 | ET_SWITCH_REAL_TYPES_AND(Bool, TYPE, CONTEXT, NAME, CTYPE_ALIAS, __VA_ARGS__) | ^~~~~~~~~~~~~~~~~~~~~~~~ /home/larryliu/executorch/kernels/portable/cpu/op_maximum.cpp:47:3: note: in expansion of macro 'ET_SWITCH_REALB_TYPES' 47 | ET_SWITCH_REALB_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() { | ^~~~~~~~~~~~~~~~~~~~~ ``` Fixing them in this PR
1 parent 72b1fa1 commit 9bc4b60

23 files changed

+49
-28
lines changed

kernels/optimized/cpu/op_add.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ Tensor& opt_add_out(
6868

6969
using Vec = at::vec::Vectorized<CTYPE>;
7070
at::vec::map<CTYPE>(
71-
[alpha_val, b_val](Vec x) { return x + Vec(alpha_val * b_val); },
71+
[alpha_val, b_val](Vec& x) { return x + Vec(alpha_val * b_val); },
7272
out.mutable_data_ptr<CTYPE>(),
7373
a.const_data_ptr<CTYPE>(),
7474
out.numel());

kernels/optimized/cpu/op_div.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,14 @@ Tensor& opt_div_out(
8787
using Vec = at::vec::Vectorized<CTYPE>;
8888
if (a.numel() == 1) {
8989
at::vec::map<CTYPE>(
90-
[scalar_casted](Vec x) { return Vec(scalar_casted) / x; },
90+
[scalar_casted](Vec& x) { return Vec(scalar_casted) / x; },
9191
out.mutable_data_ptr<CTYPE>(),
9292
tensor->const_data_ptr<CTYPE>(),
9393
out.numel());
9494
} else {
9595
Vec inv_scalar_casted_vec(CTYPE(1) / scalar_casted);
9696
at::vec::map<CTYPE>(
97-
[inv_scalar_casted_vec](Vec x) {
97+
[inv_scalar_casted_vec](Vec& x) {
9898
return x * inv_scalar_casted_vec;
9999
},
100100
out.mutable_data_ptr<CTYPE>(),

kernels/optimized/cpu/op_exp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void exp_data(
3636
CTYPE_OUT* out_data) {
3737
using Vec = at::vec::Vectorized<CTYPE_IN>;
3838
at::vec::map<CTYPE_IN>(
39-
[](Vec x) { return x.exp(); }, out_data, in_data, numel);
39+
[](Vec& x) { return x.exp(); }, out_data, in_data, numel);
4040
}
4141

4242
/**

kernels/optimized/cpu/op_le.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ Tensor& opt_le_tensor_out(
4848
ET_SWITCH_REALB_TYPES(a_type, ctx, op_name, CTYPE, [&]() {
4949
using Vec = at::vec::Vectorized<CTYPE>;
5050
at::vec::map2<CTYPE>(
51-
[](Vec x, Vec y) { return x.le(y); },
51+
[](Vec& x, Vec& y) { return x.le(y); },
5252
out.mutable_data_ptr<CTYPE>(),
5353
a.const_data_ptr<CTYPE>(),
5454
b.const_data_ptr<CTYPE>(),

kernels/optimized/cpu/op_log_softmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ void log_softmax_kernel(const Tensor& input, int64_t dim, Tensor& out) {
5555
0,
5656
outer_size,
5757
::executorch::extension::internal::GRAIN_SIZE,
58-
[&](const auto begin, const auto end) {
58+
[&](const auto& begin, const auto& end) {
5959
at::native::serial_vec_log_softmax_lastdim_range(
6060
input_data_base,
6161
output_data_base,

kernels/optimized/cpu/op_mul.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ Tensor& opt_mul_out(
5656

5757
using Vec = at::vec::Vectorized<CTYPE>;
5858
at::vec::map<CTYPE>(
59-
[b_casted](Vec x) { return x * Vec(b_casted); },
59+
[b_casted](Vec& x) { return x * Vec(b_casted); },
6060
out.mutable_data_ptr<CTYPE>(),
6161
a.const_data_ptr<CTYPE>(),
6262
out.numel());

kernels/optimized/cpu/op_native_layer_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ void layer_norm(
9292
}
9393
} else {
9494
at::vec::map3<CTYPE>(
95-
[scale, offset](auto x, auto gamma, auto beta) {
95+
[scale, offset](auto& x, auto& gamma, auto& beta) {
9696
using Vec = decltype(x);
9797
return (x * Vec(scale) + Vec(offset)) * gamma + beta;
9898
},

kernels/optimized/cpu/op_sub.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,15 +86,15 @@ Tensor& opt_sub_out(
8686
using Vec = at::vec::Vectorized<CTYPE>;
8787
if (a.numel() == 1) {
8888
at::vec::map<CTYPE>(
89-
[alpha_val, scalar_casted](Vec x) {
89+
[alpha_val, scalar_casted](Vec& x) {
9090
return Vec(scalar_casted) - Vec(alpha_val) * x;
9191
},
9292
out.mutable_data_ptr<CTYPE>(),
9393
tensor->const_data_ptr<CTYPE>(),
9494
out.numel());
9595
} else {
9696
at::vec::map<CTYPE>(
97-
[alpha_val, scalar_casted](Vec x) {
97+
[alpha_val, scalar_casted](Vec& x) {
9898
return x - Vec(alpha_val * scalar_casted);
9999
},
100100
out.mutable_data_ptr<CTYPE>(),

kernels/portable/cpu/op_add.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ Tensor& add_out(
8080
CTYPE_COMPUTE,
8181
op_name,
8282
utils::SupportedTensorDtypes::REALHBBF16>(
83-
[val_alpha](const auto val_a, const auto val_b) {
83+
[val_alpha](const auto& val_a, const auto& val_b) {
8484
return val_a + val_alpha * val_b;
8585
},
8686
ctx,
@@ -136,7 +136,7 @@ Tensor& add_scalar_out(
136136
CTYPE_COMPUTE,
137137
op_name,
138138
utils::SupportedTensorDtypes::SAME_AS_COMMON>(
139-
[val_alpha_times_b](const auto val_a) {
139+
[val_alpha_times_b](const auto& val_a) {
140140
// Cast here supports vectorization; either it does nothing
141141
// or it casts from CTYPE_COMPUTE to
142142
// Vectorized<CTYPE_COMPUTE>.

kernels/portable/cpu/op_addmm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ Tensor& addmm_out(
9292
CTYPE,
9393
op_name,
9494
utils::SupportedTensorDtypes::REALHBF16>(
95-
[alpha_val, beta_val](const auto val_a, const auto val_b) {
95+
[alpha_val, beta_val](const auto& val_a, const auto& val_b) {
9696
return val_a * alpha_val + val_b * beta_val;
9797
},
9898
ctx,

0 commit comments

Comments
 (0)