Skip to content

Commit 5848cc3

Browse files
authored
Resolve many sign compare errors (#8651)
Resolve many sign compare errors (#8651) Summary: Pull Request resolved: #8651 In preparation for a full transition, resolve many sign compare errors. This is chunked to separate these many changes. Reviewed By: swolchok Differential Revision: D70110991
1 parent dc6a956 commit 5848cc3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+395
-319
lines changed

kernels/portable/cpu/op_any.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
1112

@@ -34,7 +35,7 @@ Tensor& any_all_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
3435
const auto data_in = in.const_data_ptr<CTYPE_IN>();
3536
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
3637
data_out[0] = static_cast<CTYPE_OUT>(false);
37-
for (auto i = 0; i < in.numel(); ++i) {
38+
for (const auto i : c10::irange(in.numel())) {
3839
if (static_cast<bool>(data_in[i])) {
3940
data_out[0] = static_cast<CTYPE_OUT>(true);
4041
break;
@@ -83,12 +84,12 @@ Tensor& any_dims_out(
8384
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
8485
if (dim_list.has_value() && dim_list.value().empty()) {
8586
const CTYPE_IN* in_data = in.const_data_ptr<CTYPE_IN>();
86-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
87+
for (const auto out_ix : c10::irange(out.numel())) {
8788
out_data[out_ix] =
8889
static_cast<CTYPE_OUT>(static_cast<bool>(in_data[out_ix]));
8990
}
9091
} else {
91-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
92+
for (const auto out_ix : c10::irange(out.numel())) {
9293
bool any = false;
9394
if (in.numel() > 0) {
9495
any = map_reduce_over_dim_list<CTYPE_IN, bool>(
@@ -138,7 +139,7 @@ Tensor& any_out(
138139
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
139140
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, name, CTYPE_OUT, [&] {
140141
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
141-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
142+
for (const auto out_ix : c10::irange(out.numel())) {
142143
CTYPE_OUT any = false;
143144
if (in.numel() > 0) {
144145
std::tuple<CTYPE_OUT, long> acc =

kernels/portable/cpu/op_cdist_forward.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
1011
#include <executorch/kernels/portable/cpu/util/distance_util.h>
1112
#include <executorch/runtime/kernel/kernel_includes.h>
@@ -34,7 +35,7 @@ void cdist(const Tensor& x1, const Tensor& x2, Tensor& out, double p) {
3435
// If the last dimension of x1 (which is equal to the last dimension of x2)
3536
// has size 0, then the output is filled with 0s.
3637
if (x1.numel() == 0) {
37-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
38+
for (const auto out_ix : c10::irange(out.numel())) {
3839
out_data[out_ix] = 0;
3940
}
4041
return;
@@ -64,7 +65,7 @@ void cdist(const Tensor& x1, const Tensor& x2, Tensor& out, double p) {
6465
size_t x2_inner_size = R * M;
6566
size_t out_inner_size = P * R;
6667

67-
for (size_t b = 0; b < out_batch_numel; ++b) {
68+
for (const auto b : c10::irange(out_batch_numel)) {
6869
size_t x1_base_ix = b * x1_inner_size;
6970
size_t x2_base_ix = b * x2_inner_size;
7071
size_t out_base_ix = b * out_inner_size;
@@ -81,14 +82,13 @@ void cdist(const Tensor& x1, const Tensor& x2, Tensor& out, double p) {
8182
x2_base_ix = linearize_access_indexes(out_base_coord, out.dim(), x2);
8283
}
8384
}
84-
8585
size_t out_ix = 0;
86-
for (size_t i = 0; i < P; ++i) {
86+
for (const auto i : c10::irange(P)) {
8787
const CTYPE* row_i = x1_data + x1_base_ix + i * M;
88-
for (size_t j = 0; j < R; ++j) {
88+
for (const auto j : c10::irange(R)) {
8989
const CTYPE* row_j = x2_data + x2_base_ix + j * M;
9090
CTYPE agg = 0;
91-
for (size_t k = 0; k < M; ++k) {
91+
for (const auto k : c10::irange(M)) {
9292
CTYPE diff = std::abs(row_i[k] - row_j[k]);
9393
agg = Norm::reduce(agg, Norm::map(diff, p));
9494
}

kernels/portable/cpu/op_constant_pad_nd.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011
#include <cstring>
1112

@@ -56,7 +57,7 @@ void apply_padding_to_dim(
5657
size_t out_step_len = out_strides[dim];
5758
size_t in_step_len = self_strides[dim];
5859

59-
for (size_t i = 0; i < pad_before; ++i) {
60+
for ([[maybe_unused]] const auto i : c10::irange(pad_before)) {
6061
set_all_to_value(out_data, out_step_len, value);
6162
out_data += out_step_len;
6263
}
@@ -75,7 +76,7 @@ void apply_padding_to_dim(
7576
}
7677
// Otherwise, call this function recursively
7778
else {
78-
for (size_t i = 0; i < self_sizes[dim]; ++i) {
79+
for ([[maybe_unused]] const auto i : c10::irange(self_sizes[dim])) {
7980
apply_padding_to_dim(
8081
ndim,
8182
self_data,
@@ -94,7 +95,7 @@ void apply_padding_to_dim(
9495
}
9596
}
9697

97-
for (int i = 0; i < pad_after; ++i) {
98+
for ([[maybe_unused]] const auto i : c10::irange(pad_after)) {
9899
set_all_to_value(out_data, out_step_len, value);
99100
out_data += out_step_len;
100101
}
@@ -124,7 +125,7 @@ void constant_pad_nd_out_impl(
124125
// Collect sizes and strides of input and output tensors and determine the
125126
// last padded dimension
126127
size_t last_padded_dim = 0;
127-
for (size_t i = 0; i < ndim; ++i) {
128+
for (const auto i : c10::irange(ndim)) {
128129
self_sizes[i] = self.size(i);
129130
self_strides[i] = getTrailingDims(self, static_cast<int64_t>(i));
130131
out_sizes[i] = out.size(i);

kernels/portable/cpu/op_convolution.cpp

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cstring>
1011

1112
#include <executorch/kernels/portable/cpu/util/dtype_util.h>
@@ -91,25 +92,25 @@ void conv2d_impl(
9192
if (!transposed) {
9293
w_coord[0] = out_c;
9394
// Compute 2D output region
94-
for (size_t out_y = 0; out_y < out_H; ++out_y) {
95+
for (const auto out_y : c10::irange(out_H)) {
9596
out_coord[2] = out_y;
96-
for (size_t out_x = 0; out_x < out_W; ++out_x) {
97+
for (const auto out_x : c10::irange(out_W)) {
9798
out_coord[3] = out_x;
9899

99100
CTYPE accum = 0.0f;
100-
for (size_t in_c = in_c_start; in_c < in_c_start + in_C_per_group;
101-
++in_c) {
101+
for (const auto in_c :
102+
c10::irange(in_c_start, in_c_start + in_C_per_group)) {
102103
in_coord[1] = in_c;
103104
w_coord[1] = in_c - in_c_start;
104105

105-
for (size_t w_y = 0; w_y < w_H; ++w_y) {
106+
for (const auto w_y : c10::irange(w_H)) {
106107
w_coord[2] = w_y;
107108

108109
size_t in_y = stride_y * out_y + dilation_y * w_y - padding_y;
109110
in_coord[2] = in_y;
110111
// Only proceed if input y coordinate is within bounds
111112
if (in_y >= 0 && in_y < in_H) {
112-
for (size_t w_x = 0; w_x < w_W; ++w_x) {
113+
for (const auto w_x : c10::irange(w_W)) {
113114
w_coord[3] = w_x;
114115

115116
size_t in_x = stride_x * out_x + dilation_x * w_x - padding_x;
@@ -143,29 +144,29 @@ void conv2d_impl(
143144
} else { // transposed convolution
144145
w_coord[1] = out_c - out_c_start;
145146

146-
for (size_t in_y = 0; in_y < in_H; ++in_y) {
147+
for (const auto in_y : c10::irange(in_H)) {
147148
in_coord[2] = in_y;
148149

149-
for (size_t in_x = 0; in_x < in_W; ++in_x) {
150+
for (const auto in_x : c10::irange(in_W)) {
150151
in_coord[3] = in_x;
151152

152-
for (size_t in_c = in_c_start; in_c < in_c_start + in_C_per_group;
153-
++in_c) {
153+
for (const auto in_c :
154+
c10::irange(in_c_start, in_c_start + in_C_per_group)) {
154155
in_coord[1] = in_c;
155156

156157
size_t in_idx =
157158
calculate_linear_index(in_coord, in_strides.data(), 4);
158159
CTYPE in_val = in_ptr[in_idx];
159160

160161
w_coord[0] = in_c;
161-
for (size_t w_y = 0; w_y < w_H; ++w_y) {
162+
for (const auto w_y : c10::irange(w_H)) {
162163
w_coord[2] = w_y;
163164
size_t out_y = stride_y * in_y + dilation_y * w_y - padding_y;
164165
out_coord[2] = out_y;
165166

166167
// Only proceed if output y coordinate is within bounds
167168
if (out_y >= 0 && out_y < out_H) {
168-
for (size_t w_x = 0; w_x < w_W; ++w_x) {
169+
for (const auto w_x : c10::irange(w_W)) {
169170
w_coord[3] = w_x;
170171
size_t out_x = stride_x * in_x + dilation_x * w_x - padding_x;
171172
out_coord[3] = out_x;
@@ -302,21 +303,21 @@ void convolution_wrapper(
302303
memset(out_ptr, 0, out.nbytes());
303304
} else {
304305
// If bias is present, we initialize the output to the bias value
305-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
306+
for (const auto out_ix : c10::irange(out.numel())) {
306307
out_ptr[out_ix] = load_bias(&bias_ptr
307308
[((out_ix / out_strides[1]) % out_C) *
308309
bias.value().element_size()]);
309310
}
310311
}
311312
}
312313

313-
for (size_t batch = 0; batch < out_N; ++batch) {
314-
for (size_t group = 0; group < groups; ++group) {
314+
for (const auto batch : c10::irange(out_N)) {
315+
for (const auto group : c10::irange(groups)) {
315316
// Align channel offset based on the group
316317
size_t out_c_start = group * out_C_per_group;
317318
// Populate all the out channels in the group
318-
for (size_t out_c = out_c_start; out_c < out_c_start + out_C_per_group;
319-
++out_c) {
319+
for (const auto out_c :
320+
c10::irange(out_c_start, out_c_start + out_C_per_group)) {
320321
conv2d_impl(
321322
in_ptr,
322323
in_sizes,

kernels/portable/cpu/op_diagonal_copy.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
1112
#include <executorch/runtime/platform/assert.h>
@@ -40,20 +41,21 @@ void diagonal_copy_impl(
4041

4142
size_t new_ndim = out.dim();
4243
int64_t new_sizes[kTensorDimensionLimit];
43-
for (size_t i = 0; i < new_ndim; ++i) {
44+
for (const auto i : c10::irange(new_ndim)) {
4445
new_sizes[i] = out.size(i);
4546
}
4647

4748
int64_t new_strides[kTensorDimensionLimit];
4849
size_t shift = 0;
49-
for (size_t d = 0; d < in.dim(); ++d) {
50-
if (d == dim1 || d == dim2) {
50+
size_t in_dim = in.dim();
51+
for (const auto d : c10::irange(in_dim)) {
52+
if (static_cast<int64_t>(d) == dim1 || static_cast<int64_t>(d) == dim2) {
5153
shift++;
5254
} else {
5355
new_strides[d - shift] = in.strides().at(d);
5456
}
5557
}
56-
new_strides[in.dim() - 2] = in.strides().at(dim1) + in.strides().at(dim2);
58+
new_strides[in_dim - 2] = in.strides().at(dim1) + in.strides().at(dim2);
5759

5860
as_strided_copy<CTYPE>(
5961
in, {new_sizes, new_ndim}, {new_strides, new_ndim}, storage_offset, out);

kernels/portable/cpu/op_flip.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* This source code is licensed under the BSD-style license found in the
66
* LICENSE file in the root directory of this source tree.
77
*/
8+
#include <c10/util/irange.h>
89

910
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
@@ -25,7 +26,7 @@ size_t unflip_flat_ix(size_t ix, const Tensor& in, ArrayRef<bool> flip_dim) {
2526
indexToCoordinate(in, ix, ix_coord);
2627

2728
size_t unflip_coord[kTensorDimensionLimit];
28-
for (size_t d = 0; d < in.dim(); d++) {
29+
for (const auto d : c10::irange(in.dim())) {
2930
if (flip_dim[d]) {
3031
unflip_coord[d] = in.size(d) - ix_coord[d] - 1;
3132
} else {
@@ -54,10 +55,10 @@ Tensor& flip_out(
5455
ET_KERNEL_CHECK(ctx, check_flip_args(in, dims, out), InvalidArgument, out);
5556

5657
bool flip_dim_data[kTensorDimensionLimit];
57-
for (size_t i = 0; i < in.dim(); i++) {
58+
for (const auto i : c10::irange(in.dim())) {
5859
flip_dim_data[i] = false;
5960
}
60-
for (size_t i = 0; i < dims.size(); i++) {
61+
for (const auto i : c10::irange(dims.size())) {
6162
const auto d = dims[i] < 0 ? dims[i] + nonzero_dim(in) : dims[i];
6263
flip_dim_data[d] = true;
6364
}
@@ -70,7 +71,7 @@ Tensor& flip_out(
7071
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
7172
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
7273

73-
for (size_t ix = 0; ix < out.numel(); ++ix) {
74+
for (const auto ix : c10::irange(in.numel())) {
7475
out_data[ix] = in_data[unflip_flat_ix(ix, in, flip_dim)];
7576
}
7677
});

kernels/portable/cpu/op_full.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* This source code is licensed under the BSD-style license found in the
66
* LICENSE file in the root directory of this source tree.
77
*/
8+
#include <c10/util/irange.h>
89

910
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
@@ -44,7 +45,7 @@ Tensor& full_out(
4445
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
4546
CTYPE_OUT val_casted = static_cast<CTYPE_OUT>(val);
4647
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
47-
for (size_t i = 0; i < out.numel(); ++i) {
48+
for (const auto i : c10::irange(out.numel())) {
4849
data_out[i] = val_casted;
4950
}
5051
});

kernels/portable/cpu/op_full_like.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* This source code is licensed under the BSD-style license found in the
66
* LICENSE file in the root directory of this source tree.
77
*/
8+
#include <c10/util/irange.h>
89

910
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
@@ -60,7 +61,7 @@ Tensor& full_like_out(
6061
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
6162
CTYPE_OUT val_casted = static_cast<CTYPE_OUT>(val);
6263
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
63-
for (size_t i = 0; i < out.numel(); ++i) {
64+
for (const auto i : c10::irange(out.numel())) {
6465
data_out[i] = val_casted;
6566
}
6667
});

kernels/portable/cpu/op_gather.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cinttypes>
1011
#include <cstdint>
1112
#include <cstring>
@@ -37,12 +38,12 @@ void gather_helper(
3738
return;
3839
}
3940

40-
for (size_t ix = 0; ix < index.numel(); ++ix) {
41+
for (const auto ix : c10::irange(index.numel())) {
4142
size_t ix_coord[kTensorDimensionLimit];
4243
indexToCoordinate(index, ix, ix_coord);
4344

4445
size_t in_coord[kTensorDimensionLimit];
45-
for (size_t i = 0; i < out.dim(); ++i) {
46+
for (const auto i : c10::irange(out.dim())) {
4647
if (i == dim) {
4748
in_coord[i] = index_data[ix];
4849
} else {

0 commit comments

Comments
 (0)