Skip to content

Commit 83cf868

Browse files
committed
add static
1 parent 7857821 commit 83cf868

21 files changed

+25
-25
lines changed

kernels/portable/cpu/op_convolution_backward.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> convolution_backward_out(
305305
ret_val);
306306
}
307307

308-
constexpr auto name = "convolution_backward.out";
308+
static constexpr auto name = "convolution_backward.out";
309309

310310
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
311311
conv2d_backward_impl<CTYPE>(

kernels/portable/cpu/op_gather.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ Tensor& gather_out(
8585
InvalidArgument,
8686
out);
8787

88-
constexpr auto name = "gather.out";
88+
static constexpr auto name = "gather.out";
8989

9090
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
9191
gather_helper<CTYPE>(in, index, out, dim);

kernels/portable/cpu/op_max.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "max.unary_out";
127+
static constexpr auto name = "max.unary_out";
128128

129129
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130130
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {

kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ Tensor& max_pool2d_with_indices_backward_out(
169169
InvalidArgument,
170170
grad_input);
171171

172-
constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
172+
static constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
173173

174174
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
175175
max_pool_backward_impl<CTYPE, false>(grad_input, grad_output, indices);

kernels/portable/cpu/op_min.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
124124

125125
ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);
126126

127-
constexpr auto name = "min.unary_out";
127+
static constexpr auto name = "min.unary_out";
128128

129129
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
130130
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {

kernels/portable/cpu/op_native_batch_norm.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_training_out(
102102
size_t outer = getLeadingDims(in, C_dim);
103103
size_t inner = getTrailingDims(in, C_dim);
104104

105-
constexpr auto name = "native_batch_norm_legit_no_training.out";
105+
static constexpr auto name = "native_batch_norm_legit_no_training.out";
106106

107107
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
108108
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
@@ -259,7 +259,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_stats_out(
259259
InvalidArgument,
260260
ret_val);
261261

262-
constexpr auto name = "_native_batch_norm_legit.no_stats_out";
262+
static constexpr auto name = "_native_batch_norm_legit.no_stats_out";
263263

264264
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
265265
const CTYPE* in_data = in.const_data_ptr<CTYPE>();

kernels/portable/cpu/op_native_group_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_group_norm_out(
190190
ret_val);
191191
}
192192

193-
constexpr auto name = "native_group_norm.out";
193+
static constexpr auto name = "native_group_norm.out";
194194

195195
ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
196196
group_norm<CTYPE>(

kernels/portable/cpu/op_pdist_forward.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ Tensor& _pdist_forward_out(
4040
out);
4141

4242
ScalarType in_type = in.scalar_type();
43-
constexpr auto name = "_pdist_forward.out";
43+
static constexpr auto name = "_pdist_forward.out";
4444

4545
ET_SWITCH_FLOATHBF16_TYPES(
4646
in_type, ctx, name, CTYPE, [&] { pdist<CTYPE>(in, out, p); });

kernels/portable/cpu/op_prod.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ Tensor& prod_out(
3232

3333
ScalarType in_type = in.scalar_type();
3434
ScalarType out_type = out.scalar_type();
35-
constexpr auto name = "prod.int_out";
35+
static constexpr auto name = "prod.int_out";
3636

3737
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
3838
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
@@ -72,7 +72,7 @@ Tensor& prod_int_out(
7272

7373
ScalarType in_type = in.scalar_type();
7474
ScalarType out_type = out.scalar_type();
75-
constexpr auto name = "prod.int_out";
75+
static constexpr auto name = "prod.int_out";
7676

7777
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
7878
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {

kernels/portable/cpu/op_reflection_pad1d.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ Tensor& reflection_pad1d_out(
4444
out);
4545

4646
ScalarType in_type = in.scalar_type();
47-
constexpr auto name = "reflection_pad1d.out";
47+
static constexpr auto name = "reflection_pad1d.out";
4848

4949
ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
5050
pad1d<CTYPE>(reflection_ix, in, out, padding);

0 commit comments

Comments
 (0)