diff --git a/kernels/portable/cpu/op__to_dim_order_copy.cpp b/kernels/portable/cpu/op__to_dim_order_copy.cpp index b6e35f90cdb..eb208908395 100644 --- a/kernels/portable/cpu/op__to_dim_order_copy.cpp +++ b/kernels/portable/cpu/op__to_dim_order_copy.cpp @@ -54,19 +54,15 @@ Tensor& _to_dim_order_copy_out( return out; } - ET_SWITCH_REALHBBF16_TYPES( - self.scalar_type(), - ctx, - "dim_order_ops::_to_dim_order_copy.out", - CTYPE_IN, - [&] { - ET_SWITCH_REALHBBF16_TYPES( - out.scalar_type(), - ctx, - "dim_order_ops::_to_dim_order_copy.out", - CTYPE_OUT, - [&] { _to_dim_order_copy_impl(self, out); }); - }); + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = + "dim_order_ops::_to_dim_order_copy.out"; + + ET_SWITCH_REALHBBF16_TYPES(self.scalar_type(), ctx, op_name, CTYPE_IN, [&] { + ET_SWITCH_REALHBBF16_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] { + _to_dim_order_copy_impl(self, out); + }); + }); return out; } diff --git a/kernels/portable/cpu/op_abs.cpp b/kernels/portable/cpu/op_abs.cpp index 2f45037bce0..42072351a66 100644 --- a/kernels/portable/cpu/op_abs.cpp +++ b/kernels/portable/cpu/op_abs.cpp @@ -37,13 +37,16 @@ Tensor& abs_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_KERNEL_CHECK( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "abs.out"; + if (in_is_complex) { // NOTE: Elected not to add COMPLEXH to dtype_util.h for now // because I am not planning wide rollout of complex support; if // we do add SupportedTensorDtypes::COMPLEXH support, then we // should use it here. - ET_SWITCH_COMPLEXH_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE_IN, [&] { - ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, "abs.out", CTYPE_OUT, [&] { + ET_SWITCH_COMPLEXH_TYPES(in.scalar_type(), ctx, op_name, CTYPE_IN, [&] { + ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] { apply_unary_map_fn( [](const CTYPE_IN val_in) -> CTYPE_OUT { return sqrt( @@ -55,7 +58,7 @@ Tensor& abs_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { }); }); } else { - ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] { + ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { apply_unary_map_fn( [](const CTYPE val_in) { if (val_in < 0) { diff --git a/kernels/portable/cpu/op_amax.cpp b/kernels/portable/cpu/op_amax.cpp index 4ad409d4820..192fad5c908 100644 --- a/kernels/portable/cpu/op_amax.cpp +++ b/kernels/portable/cpu/op_amax.cpp @@ -44,7 +44,11 @@ Tensor& amax_out( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); ReduceOverDimListPlan plan(in, dim_list); - ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() { + + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "amax.out"; + + ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() { CTYPE* out_data = out.mutable_data_ptr(); const bool success = parallel_for_each_reduce_over_dim_list_output_index( in, dim_list, out, [&](const auto begin, const auto end) { diff --git a/kernels/portable/cpu/op_amin.cpp b/kernels/portable/cpu/op_amin.cpp index 396cb6c016d..d4e9be4f4e0 100644 --- a/kernels/portable/cpu/op_amin.cpp +++ b/kernels/portable/cpu/op_amin.cpp @@ -43,7 +43,11 @@ Tensor& amin_out( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); ReduceOverDimListPlan plan(in, dim_list); - ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amin.out", CTYPE, [&]() { + + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "amin.out"; + + ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() { CTYPE* out_data = out.mutable_data_ptr(); const bool success = parallel_for_each_reduce_over_dim_list_output_index( in, dim_list, out, [&](const auto begin, const auto end) { diff --git a/kernels/portable/cpu/op_any.cpp b/kernels/portable/cpu/op_any.cpp index ee9e54fc0c3..8be0993767d 100644 --- a/kernels/portable/cpu/op_any.cpp +++ b/kernels/portable/cpu/op_any.cpp @@ -30,10 +30,12 @@ Tensor& any_all_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - constexpr auto name = "any.all_out"; - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { - ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "any.all_out"; + + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] { + ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] { const auto data_in = in.const_data_ptr(); auto data_out = out.mutable_data_ptr(); data_out[0] = static_cast(false); @@ -79,15 +81,17 @@ Tensor& any_dims_out( ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - constexpr auto name = "any.dims_out"; + + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "any.dims_out"; const bool in_not_empty = in.numel() > 0; std::optional plan; if ((!dim_list.has_value() || !dim_list.value().empty()) && in_not_empty) { plan.emplace(in, dim_list); } - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { - ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] { + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] { + ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] { CTYPE_OUT* out_data = out.mutable_data_ptr(); if (dim_list.has_value() && dim_list.value().empty()) { const CTYPE_IN* in_data = in.const_data_ptr(); @@ -144,10 +148,12 @@ Tensor& any_out( ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - constexpr auto name = "any.out"; - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { - ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "any.out"; + + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] { + ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, op_name, CTYPE_OUT, [&] { CTYPE_OUT* out_data = out.mutable_data_ptr(); const bool success = parallel_for_each_reduce_over_dim_output_index( in, dim, out, [&](const auto begin, const auto end) { diff --git a/kernels/portable/cpu/op_argmax.cpp b/kernels/portable/cpu/op_argmax.cpp index 72881453d39..0e62c049082 100644 --- a/kernels/portable/cpu/op_argmax.cpp +++ b/kernels/portable/cpu/op_argmax.cpp @@ -44,7 +44,10 @@ Tensor& argmax_out( ET_KERNEL_CHECK( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); - ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "argmax.out"; + + ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { long* out_data = out.mutable_data_ptr(); const bool success = parallel_for_each_reduce_over_dim_output_index( diff --git a/kernels/portable/cpu/op_argmin.cpp b/kernels/portable/cpu/op_argmin.cpp index 4e661c68694..d422610769f 100644 --- a/kernels/portable/cpu/op_argmin.cpp +++ b/kernels/portable/cpu/op_argmin.cpp @@ -44,7 +44,10 @@ Tensor& argmin_out( ET_KERNEL_CHECK( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); - ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "argmin.out"; + + ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { long* out_data = out.mutable_data_ptr(); const bool success = parallel_for_each_reduce_over_dim_output_index( diff --git a/kernels/portable/cpu/op_avg_pool2d.cpp b/kernels/portable/cpu/op_avg_pool2d.cpp index e41c1fa1afa..0533ac4bdca 100644 --- a/kernels/portable/cpu/op_avg_pool2d.cpp +++ b/kernels/portable/cpu/op_avg_pool2d.cpp @@ -67,53 +67,56 @@ Tensor& avg_pool2d_out( out); ScalarType in_type = in.scalar_type(); - ET_SWITCH_FLOATHBF16_TYPES_AND( - Long, in_type, ctx, "avg_pool2d.out", CTYPE, [&]() { - if (divisor_override.has_value()) { - int64_t divisor = divisor_override.value(); - // If divisor_override is specified, then we don't need to use `count` - // in the calculation. Simply sum x / divisor to get the output. - apply_kernel_2d_reduce_then_map_fn( - [](const CTYPE in_val, - int64_t in_idx, - CTYPE accum, - int64_t accum_idx) { - // Average pooling does not track indexes, so return 0 for - // accum_idx - return std::tuple(in_val + accum, 0); - }, - [divisor](const int64_t count, const CTYPE accum) { - return accum / static_cast(divisor); - }, - count_include_pad, - in, - kernel_size, - stride, - padding, - {}, - out); - } else { - apply_kernel_2d_reduce_then_map_fn( - [](const CTYPE in_val, - int64_t in_idx, - CTYPE accum, - int64_t accum_idx) { - // Average pooling does not track indexes, so return 0 for - // accum_idx - return std::tuple(in_val + accum, 0); - }, - [](const int64_t count, const CTYPE accum) { - return accum / static_cast(count); - }, - count_include_pad, - in, - kernel_size, - stride, - padding, - {}, - out); - } - }); + + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "avg_pool2d.out"; + + ET_SWITCH_FLOATHBF16_TYPES_AND(Long, in_type, ctx, op_name, CTYPE, [&]() { + if (divisor_override.has_value()) { + int64_t divisor = divisor_override.value(); + // If divisor_override is specified, then we don't need to use `count` + // in the calculation. Simply sum x / divisor to get the output. + apply_kernel_2d_reduce_then_map_fn( + [](const CTYPE in_val, + int64_t in_idx, + CTYPE accum, + int64_t accum_idx) { + // Average pooling does not track indexes, so return 0 for + // accum_idx + return std::tuple(in_val + accum, 0); + }, + [divisor](const int64_t count, const CTYPE accum) { + return accum / static_cast(divisor); + }, + count_include_pad, + in, + kernel_size, + stride, + padding, + {}, + out); + } else { + apply_kernel_2d_reduce_then_map_fn( + [](const CTYPE in_val, + int64_t in_idx, + CTYPE accum, + int64_t accum_idx) { + // Average pooling does not track indexes, so return 0 for + // accum_idx + return std::tuple(in_val + accum, 0); + }, + [](const int64_t count, const CTYPE accum) { + return accum / static_cast(count); + }, + count_include_pad, + in, + kernel_size, + stride, + padding, + {}, + out); + } + }); return out; } diff --git a/kernels/portable/cpu/op_bitwise_not.cpp b/kernels/portable/cpu/op_bitwise_not.cpp index c28cb374300..6a074762caa 100644 --- a/kernels/portable/cpu/op_bitwise_not.cpp +++ b/kernels/portable/cpu/op_bitwise_not.cpp @@ -37,6 +37,8 @@ bitwise_not_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_KERNEL_CHECK( ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "bitwise_not.out"; if (in.scalar_type() == executorch::aten::ScalarType::Bool) { apply_unary_map_fn( [](const bool val_in) { return !val_in; }, @@ -44,7 +46,7 @@ bitwise_not_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { out.mutable_data_ptr(), in.numel()); } else if (isIntegralType(in.scalar_type(), /*includeBool=*/false)) { - ET_SWITCH_INT_TYPES(in.scalar_type(), ctx, "bitwise_not.out", CTYPE, [&] { + ET_SWITCH_INT_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { apply_unary_map_fn( [](const CTYPE val_in) { return ~val_in; }, in.const_data_ptr(), diff --git a/kernels/portable/cpu/op_bmm.cpp b/kernels/portable/cpu/op_bmm.cpp index a887cd3c926..060b92a0da2 100644 --- a/kernels/portable/cpu/op_bmm.cpp +++ b/kernels/portable/cpu/op_bmm.cpp @@ -36,16 +36,17 @@ Tensor& bmm_out( InvalidArgument, out); - constexpr auto name = "bmm.out"; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "bmm.out"; auto in_type = in.scalar_type(); if (executorch::runtime::isComplexType(in_type)) { - ET_SWITCH_COMPLEXH_TYPES(in_type, ctx, name, CTYPE, [&]() { + ET_SWITCH_COMPLEXH_TYPES(in_type, ctx, op_name, CTYPE, [&]() { internal::bmm_out_impl(in, mat2, out); }); } else { - ET_SWITCH_REALH_TYPES(in_type, ctx, name, CTYPE, [&]() { + ET_SWITCH_REALH_TYPES(in_type, ctx, op_name, CTYPE, [&]() { internal::bmm_out_impl(in, mat2, out); }); } diff --git a/kernels/portable/cpu/op_cat.cpp b/kernels/portable/cpu/op_cat.cpp index 5b0a308bda5..ab15d5249df 100644 --- a/kernels/portable/cpu/op_cat.cpp +++ b/kernels/portable/cpu/op_cat.cpp @@ -59,6 +59,9 @@ Tensor& cat_out( const bool out_is_complex = executorch::runtime::isComplexType(out.scalar_type()); + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "cat.out"; + if (out_is_complex) { // TODO: The current support for complex dtype enforces that input and // output tensors have the same dtype. Support mixed dtypes in the future. @@ -66,7 +69,7 @@ Tensor& cat_out( const auto in_type = tensors[i].scalar_type(); ET_KERNEL_CHECK(ctx, out_type == in_type, InvalidArgument, out); } - ET_SWITCH_COMPLEXH_TYPES(out_type, ctx, "cat.out", CTYPE, [&] { + ET_SWITCH_COMPLEXH_TYPES(out_type, ctx, op_name, CTYPE, [&] { CTYPE* out_ptr = out.mutable_data_ptr(); for (size_t i = 0; i < outer; ++i) { for (size_t j = 0; j < ninputs; ++j) { @@ -82,12 +85,12 @@ Tensor& cat_out( } }); } else { - ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, "cat.out", CTYPE_OUT, [&] { + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, op_name, CTYPE_OUT, [&] { CTYPE_OUT* out_ptr = out.mutable_data_ptr(); for (size_t i = 0; i < outer; ++i) { for (size_t j = 0; j < ninputs; ++j) { const auto in_type = tensors[j].scalar_type(); - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "cat.out", CTYPE_IN, [&] { + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE_IN, [&] { if (tensors[j].numel() == 0) { return; } diff --git a/kernels/portable/cpu/op_cdist_forward.cpp b/kernels/portable/cpu/op_cdist_forward.cpp index 3e82584f820..c4a026f9e29 100644 --- a/kernels/portable/cpu/op_cdist_forward.cpp +++ b/kernels/portable/cpu/op_cdist_forward.cpp @@ -160,10 +160,12 @@ Tensor& _cdist_forward_out( out); ScalarType out_type = out.scalar_type(); - constexpr auto name = "_cdist_forward.out"; + + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "_cdist_forward.out"; ET_SWITCH_FLOATHBF16_TYPES( - out_type, ctx, name, CTYPE, [&] { cdist(x1, x2, out, p); }); + out_type, ctx, op_name, CTYPE, [&] { cdist(x1, x2, out, p); }); return out; } diff --git a/kernels/portable/cpu/op_clamp.cpp b/kernels/portable/cpu/op_clamp.cpp index c2b9c73f2ea..31d4b8fdf56 100644 --- a/kernels/portable/cpu/op_clamp.cpp +++ b/kernels/portable/cpu/op_clamp.cpp @@ -40,16 +40,19 @@ ET_NODISCARD bool check_bounds( const char* val_name) { auto is_valid = true; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "clamp.out"; + if (isIntegralType(out_type, /*includeBool=*/false)) { const long val_long = utils::scalar_to(val_scalar); - ET_SWITCH_INT_TYPES(out_type, ctx, "clamp.out", CTYPE_OUT, [&]() { + ET_SWITCH_INT_TYPES(out_type, ctx, op_name, CTYPE_OUT, [&]() { if (is_out_of_bounds(val_long)) { ET_LOG(Error, "%s value out of bounds", val_name); is_valid = false; } }); } else if (isFloatingType(out_type)) { - ET_SWITCH_FLOATHBF16_TYPES(out_type, ctx, "clamp.out", CTYPE_OUT, [&]() { + ET_SWITCH_FLOATHBF16_TYPES(out_type, ctx, op_name, CTYPE_OUT, [&]() { const double val_double = utils::scalar_to(val_scalar); if (std::isfinite(val_double) && is_out_of_bounds(val_double)) { diff --git a/kernels/portable/cpu/op_constant_pad_nd.cpp b/kernels/portable/cpu/op_constant_pad_nd.cpp index be3962e018c..7da10456e58 100644 --- a/kernels/portable/cpu/op_constant_pad_nd.cpp +++ b/kernels/portable/cpu/op_constant_pad_nd.cpp @@ -184,7 +184,10 @@ Tensor& constant_pad_nd_out( ScalarType in_type = in.scalar_type(); - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "constant_pad_nd.out", CTYPE, [&]() { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "constant_pad_nd.out"; + + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE, [&]() { auto opt_value_casted = utils::internal::check_overflow_scalar_cast(value); ET_KERNEL_CHECK(ctx, opt_value_casted.has_value(), InvalidArgument, ); diff --git a/kernels/portable/cpu/op_copy.cpp b/kernels/portable/cpu/op_copy.cpp index 41a13ed0b38..968231fc42e 100644 --- a/kernels/portable/cpu/op_copy.cpp +++ b/kernels/portable/cpu/op_copy.cpp @@ -52,7 +52,7 @@ Tensor& copy_out( src.numel() > 0) { std::memcpy(out.mutable_data_ptr(), src.const_data_ptr(), src.nbytes()); } else { - ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "copy.out", CTYPE, [&]() { + ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() { utils::apply_bitensor_elementwise_fn< CTYPE, op_name, @@ -94,7 +94,7 @@ Tensor& copy_( src.numel() > 0) { std::memcpy(in.mutable_data_ptr(), src.const_data_ptr(), in.nbytes()); } else { - ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "copy_", CTYPE, [&]() { + ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() { utils::apply_bitensor_elementwise_fn< CTYPE, op_name, diff --git a/kernels/portable/cpu/op_diagonal_copy.cpp b/kernels/portable/cpu/op_diagonal_copy.cpp index 6eb0569e3c2..769d53e948b 100644 --- a/kernels/portable/cpu/op_diagonal_copy.cpp +++ b/kernels/portable/cpu/op_diagonal_copy.cpp @@ -98,9 +98,10 @@ Tensor& diagonal_copy_out( InvalidArgument, out); - constexpr auto name = "diagonal_copy.out"; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "diagonal_copy.out"; - ET_SWITCH_ALL_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] { + ET_SWITCH_ALL_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { diagonal_copy_impl(in, offset, dim1, dim2, out); }); diff --git a/kernels/portable/cpu/op_embedding.cpp b/kernels/portable/cpu/op_embedding.cpp index acde09ebdc5..289369faad9 100644 --- a/kernels/portable/cpu/op_embedding.cpp +++ b/kernels/portable/cpu/op_embedding.cpp @@ -116,10 +116,12 @@ Tensor& embedding_out( ix_type == ScalarType::Long || ix_type == ScalarType::Int, "Expected indices tensor to have Long or Int scalar types"); - ET_SWITCH_TWO_TYPES( - Long, Int, ix_type, ctx, "op_embedding.out", CTYPE, [&]() { - embedding_kernel(ctx, weight, indices, out); - }); + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "op_embedding.out"; + + ET_SWITCH_TWO_TYPES(Long, Int, ix_type, ctx, op_name, CTYPE, [&]() { + embedding_kernel(ctx, weight, indices, out); + }); return out; } diff --git a/kernels/portable/cpu/op_fill.cpp b/kernels/portable/cpu/op_fill.cpp index 8d98aa8bb7f..6c7032a3b41 100644 --- a/kernels/portable/cpu/op_fill.cpp +++ b/kernels/portable/cpu/op_fill.cpp @@ -41,7 +41,10 @@ Tensor& fill_scalar_out( out, "Failed to resize output tensor."); - ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "fill.Scalar_out", CTYPE_A, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "fill.Scalar_out"; + + ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, op_name, CTYPE_A, [&] { auto opt_b_casted = utils::internal::check_overflow_scalar_cast(b); ET_KERNEL_CHECK(ctx, opt_b_casted.has_value(), InvalidArgument, ); auto b_casted = opt_b_casted.value(); @@ -83,9 +86,12 @@ Tensor& fill_tensor_out( out, "Failed to resize output tensor."); - ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "fill.Tensor_out", CTYPE_A, [&] { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "fill.Tensor_out"; + + ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, op_name, CTYPE_A, [&] { CTYPE_A b_casted; - ET_SWITCH_REALHBBF16_TYPES(b_type, ctx, "fill.Tensor_out", CTYPE_B, [&] { + ET_SWITCH_REALHBBF16_TYPES(b_type, ctx, op_name, CTYPE_B, [&] { CTYPE_B b_val; ET_EXTRACT_SCALAR_TENSOR(b, b_val); b_casted = static_cast(b_val); diff --git a/kernels/portable/cpu/op_flip.cpp b/kernels/portable/cpu/op_flip.cpp index 8ad122b7e7e..41ec6663714 100644 --- a/kernels/portable/cpu/op_flip.cpp +++ b/kernels/portable/cpu/op_flip.cpp @@ -65,9 +65,10 @@ Tensor& flip_out( size_t flip_dim_length = static_cast(in.dim()); // NOLINT ArrayRef flip_dim(flip_dim_data, flip_dim_length); - constexpr auto name = "flip.out"; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "flip_out"; - ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] { + ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] { const CTYPE* in_data = in.const_data_ptr(); CTYPE* out_data = out.mutable_data_ptr(); diff --git a/kernels/portable/cpu/op_full.cpp b/kernels/portable/cpu/op_full.cpp index b83637f2b91..c47ba61ce4c 100644 --- a/kernels/portable/cpu/op_full.cpp +++ b/kernels/portable/cpu/op_full.cpp @@ -34,9 +34,10 @@ Tensor& full_out( out, "Failed to resize output tensor."); - constexpr auto name = "full.out"; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "full.out"; - ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, op_name, CTYPE_OUT, [&] { auto opt_val_casted = utils::internal::check_overflow_scalar_cast(fill_value); ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, ); diff --git a/kernels/portable/cpu/op_full_like.cpp b/kernels/portable/cpu/op_full_like.cpp index 213e1f38d9a..5fefd53c30b 100644 --- a/kernels/portable/cpu/op_full_like.cpp +++ b/kernels/portable/cpu/op_full_like.cpp @@ -50,9 +50,10 @@ Tensor& full_like_out( ScalarType out_type = out.scalar_type(); - constexpr auto name = "full_like.out"; + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "full_like.out"; - ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, op_name, CTYPE_OUT, [&] { auto opt_val_casted = utils::internal::check_overflow_scalar_cast(fill_value); ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, );