Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion runtime/core/array_ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <array>
#include <cstdint>

#include <c10/util/irange.h>
#include <executorch/runtime/platform/assert.h>

namespace executorch {
Expand Down Expand Up @@ -149,7 +150,7 @@ class ArrayRef final {
if (Length != RHS.Length) {
return false;
}
for (size_t i = 0; i < this->Length; i++) {
for (const auto i : c10::irange(this->Length)) {
if (Data[i] != RHS.Data[i]) {
return false;
}
Expand Down
29 changes: 15 additions & 14 deletions runtime/core/exec_aten/testing_util/tensor_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <algorithm>
#include <cstdint>

#include <c10/util/irange.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
Expand Down Expand Up @@ -78,7 +79,7 @@ inline bool check_strides(
// a.strides == (1, 1, 2). We want to sort create a mapping to make the
// sorted_stride as (2, 1, 1) while sorted_size == (3, 2, 1)
std::vector<std::int32_t> sorted_idx(sizes.size());
for (size_t i = 0; i < sizes.size(); i++) {
for (const auto i : c10::irange(sizes.size())) {
sorted_idx[i] = i;
}
std::sort(
Expand All @@ -98,7 +99,7 @@ inline bool check_strides(
// Use the mapping to rearrange the sizes and strides
std::vector<std::int32_t> sorted_sizes(sizes.size());
std::vector<std::int32_t> sorted_strides(sizes.size());
for (size_t i = 0; i < sizes.size(); i++) {
for (const auto i : c10::irange(sizes.size())) {
sorted_sizes[i] = sizes[sorted_idx[i]] == 0 ? 1 : sizes[sorted_idx[i]];
sorted_strides[i] = strides[sorted_idx[i]];
}
Expand Down Expand Up @@ -132,7 +133,7 @@ inline bool check_dim_order(
}
size_t gauss_sum = 0;
std::vector<int> count(dim_order.size(), 0);
for (int i = 0; i < dim_order.size(); i++) {
for (const auto i : c10::irange(dim_order.size())) {
if (dim_order[i] < 0 || dim_order[i] >= sizes.size()) {
return false;
}
Expand Down Expand Up @@ -378,7 +379,7 @@ class TensorFactory {
std::vector<executorch::aten::StridesType> contiguous_strides =
internal::strides_from_dim_order(sizes, contiguous_dim_order);

for (int32_t i = 0; i < input.dim(); i++) {
for (const auto i : c10::irange(input.dim())) {
ET_CHECK_MSG(
input.strides()[i] == contiguous_strides[i],
"Input tensor is not contiguous");
Expand All @@ -394,10 +395,10 @@ class TensorFactory {
std::vector<ctype> channels_last_data(
N * C * H * W); // Create a new blob with the same total size to contain
// channels_last data
for (int32_t n = 0; n < N; ++n) {
for (int32_t c = 0; c < C; ++c) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (const auto n : c10::irange(N)) {
for (const auto c : c10::irange(C)) {
for (const auto h : c10::irange(H)) {
for (const auto w : c10::irange(W)) {
// Calculate the index in the original blob
int32_t old_index = ((n * C + c) * H + h) * W + w;
// Calculate the index in the new blob
Expand Down Expand Up @@ -598,7 +599,7 @@ inline void validate_strides(
}
}
// No two dimensions can have same stride value
for (int32_t i = 0; i < strides.size(); ++i) {
for (const auto i : c10::irange(strides.size())) {
for (int32_t j = i + 1; j < strides.size(); ++j) {
if ((sizes[i] == 0) || (sizes[j] == 0) ||
((sizes[i] == 1) || (sizes[j] == 1))) {
Expand Down Expand Up @@ -814,7 +815,7 @@ class TensorFactory {
// given strides is empty.
if (!sizes.empty() && dim_order.empty()) {
default_dim_order.resize(sizes.size(), 1);
for (size_t i = 0; i < sizes.size(); ++i) {
for (const auto i : c10::irange(sizes.size())) {
default_dim_order[i] = i;
}
}
Expand Down Expand Up @@ -888,10 +889,10 @@ class TensorFactory {
std::vector<ctype> channels_last_data(
N * C * H * W); // Create a new blob with the same total size to contain
// channels_last data
for (int32_t n = 0; n < N; ++n) {
for (int32_t c = 0; c < C; ++c) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (const auto n : c10::irange(N)) {
for (const auto c : c10::irange(C)) {
for (const auto h : c10::irange(H)) {
for (const auto w : c10::irange(W)) {
// Calculate the index in the original blob
int32_t old_index = ((n * C + c) * H + h) * W + w;
// Calculate the index in the new blob
Expand Down
13 changes: 7 additions & 6 deletions runtime/core/exec_aten/testing_util/tensor_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cstring>
#include <ostream>

#include <c10/util/irange.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
Expand Down Expand Up @@ -50,7 +51,7 @@ bool data_is_close(
if (a == b) {
return true;
}
for (size_t i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
const auto ai = a[i];
const auto bi = b[i];

Expand Down Expand Up @@ -201,7 +202,7 @@ bool tensor_lists_are_close(
if (num_tensors_a != num_tensors_b) {
return false;
}
for (size_t i = 0; i < num_tensors_a; i++) {
for (const auto i : c10::irange(num_tensors_a)) {
if (!tensors_are_close(tensors_a[i], tensors_b[i], rtol, opt_atol)) {
return false;
}
Expand Down Expand Up @@ -245,7 +246,7 @@ template <typename T>
std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
// break up or truncate data when it's huge
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << data[i];
if (i < numel - 1) {
os << ", ";
Expand All @@ -257,7 +258,7 @@ std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
template <typename T>
std::ostream&
print_data(std::ostream& os, const etensor::complex<T>* data, size_t numel) {
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << data[i].real_ << " + " << data[i].imag_ << "j";
if (i < numel - 1) {
os << ", ";
Expand All @@ -276,7 +277,7 @@ template <>
std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
// break up or truncate data when it's huge
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << (uint64_t)data[i];
if (i < numel - 1) {
os << ", ";
Expand All @@ -292,7 +293,7 @@ std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
*/
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
os << "ETensor(sizes={";
for (auto dim = 0; dim < t.dim(); dim++) {
for (const auto dim : c10::irange(t.dim())) {
os << t.size(dim);
if (dim < t.dim() - 1) {
os << ", ";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
Expand Down Expand Up @@ -86,7 +87,7 @@ using torch::executor::TensorImpl;
"Arrays are not equal size." #a1 " size:%zu," #a2 " size:%zu", \
a1.size(), \
a2.size()); \
for (size_t i = 0; i < a1.size(); ++i) { \
for (const auto i : c10::irange(a1.size())) { \
ET_CHECK_MSG( \
a1[i] == a2[i], \
"Value mismatch at index %zu, " #a1 \
Expand Down Expand Up @@ -784,7 +785,7 @@ void run_zeros_like_test(Tensor input) {

// A Tensor created manually, that should be identical to `actual`.
std::vector<int32_t> expected_data;
for (int i = 0; i < input.numel(); i++) {
for (const auto i : c10::irange(input.numel())) {
expected_data.push_back(0);
}
#ifdef USE_ATEN_LIB
Expand Down Expand Up @@ -842,7 +843,7 @@ void run_ones_like_test(Tensor input) {

// A Tensor created manually, that should be identical to `actual`.
std::vector<int32_t> expected_data;
for (int i = 0; i < input.numel(); i++) {
for (const auto i : c10::irange(input.numel())) {
expected_data.push_back(1);
}
#ifdef USE_ATEN_LIB
Expand Down
7 changes: 4 additions & 3 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <cstdio>
#include <cstring>

#include <c10/util/irange.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/platform/assert.h>
#include <executorch/runtime/platform/compiler.h>
Expand All @@ -22,7 +23,7 @@ namespace runtime {
namespace {
template <typename DimOrderType>
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
for (int32_t i = 0; i < dims; ++i) {
for (const auto i : c10::irange(dims)) {
if (dim_order[i] >= dims) {
return false;
}
Expand All @@ -42,7 +43,7 @@ template <typename DimOrderType>
inline bool is_contiguous_dim_order(
const DimOrderType* dim_order,
const size_t dims) {
for (int i = 0; i < dims; ++i) {
for (const auto i : c10::irange(dims)) {
if (dim_order[i] != i) {
return false;
}
Expand Down Expand Up @@ -254,7 +255,7 @@ ET_NODISCARD inline Error stride_to_dim_order(

sorter.quick_sort(array, 0, dims - 1);

for (auto i = 0; i < dims; i++) {
for (const auto i : c10::irange(dims)) {
dim_order[i] = array[i].dim_order;
}
return Error::Ok;
Expand Down
17 changes: 9 additions & 8 deletions runtime/core/exec_aten/util/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <cstddef> // size_t
#include <limits>

#include <c10/util/irange.h>
#include <executorch/runtime/core/array_ref.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -275,7 +276,7 @@
a_strides = a__.strides(); \
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
b_strides = b__.strides(); \
for (size_t i = 0; i < a__.dim(); i++) { \
for (const auto i : c10::irange(a__.dim())) { \
ET_CHECK_MSG( \
a_strides[i] == b_strides[i], \
"a.strides()[%zu] shall equal to b.strides()[%zu], " \
Expand Down Expand Up @@ -307,7 +308,7 @@
b_strides = b__.strides(); \
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
c_strides = c__.strides(); \
for (size_t i = 0; i < a__.dim(); i++) { \
for (const auto i : c10::irange(a__.dim())) { \
ET_CHECK_MSG( \
a_strides[i] == b_strides[i] && b_strides[i] == c_strides[i], \
"a_strides[%zu], b_strides[%zu] and c_strides[%zu] " \
Expand Down Expand Up @@ -892,7 +893,7 @@ inline size_t getLeadingDims(
dim,
ssize_t(tensor.dim()));
size_t dims = 1;
for (size_t i = 0; i < dim; ++i) {
for (const auto i : c10::irange(dim)) {
dims *= static_cast<size_t>(tensor.size(i));
}
return dims;
Expand Down Expand Up @@ -929,7 +930,7 @@ inline size_t coordinateToIndex(
const executorch::aten::Tensor& tensor,
const size_t* const coordinate) {
size_t index = 0;
for (int d = 0; d < tensor.dim(); ++d) {
for (const auto d : c10::irange(tensor.dim())) {
index += coordinate[d] * getTrailingDims(tensor, d);
}
return index;
Expand Down Expand Up @@ -961,7 +962,7 @@ inline size_t coordinateToIndexWithTrailingDimsMemo(
const size_t* const coordinate,
const size_t trailing_dims_memo[kTensorDimensionLimit]) {
size_t index = 0;
for (int d = 0; d < tensor.dim(); ++d) {
for (const auto d : c10::irange(tensor.dim())) {
index += coordinate[d] * trailing_dims_memo[d];
}
return index;
Expand All @@ -983,7 +984,7 @@ inline void indexToCoordinate(
size_t index,
size_t* coordinate) {
ET_CHECK(index < tensor.numel());
for (auto i = 0; i < tensor.dim(); ++i) {
for (const auto i : c10::irange(tensor.dim())) {
auto dim = tensor.dim() - 1 - i;
size_t dim_size = tensor.size(dim);
coordinate[dim] = index % dim_size;
Expand Down Expand Up @@ -1173,7 +1174,7 @@ ET_NODISCARD inline Error resize_tensor(
std::array<executorch::aten::SizesType, kTensorDimensionLimit>
new_sizes_casted{};
size_t new_sizes_ndim = new_sizes.size();
for (size_t i = 0; i < new_sizes_ndim; ++i) {
for (const auto i : c10::irange(new_sizes_ndim)) {
new_sizes_casted[i] =
static_cast<executorch::aten::SizesType>(new_sizes[i]);
}
Expand Down Expand Up @@ -1304,7 +1305,7 @@ inline size_t calculate_linear_index(
const executorch::aten::StridesType* strides,
const size_t ndim) {
size_t index = 0;
for (size_t i = 0; i < ndim; i++) {
for (const auto i : c10::irange(ndim)) {
index += coordinate[i] * strides[i];
}
return index;
Expand Down
7 changes: 4 additions & 3 deletions runtime/core/exec_aten/util/tensor_util_aten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>

#include <ATen/Tensor.h> // @manual
#include <c10/util/irange.h>
#include <executorch/runtime/platform/assert.h>

namespace executorch {
Expand Down Expand Up @@ -41,7 +42,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) {

if (!validate_dim_order(dim_order, t.dim())) {
ET_LOG(Error, "Tensor dim order is not valid:");
for (size_t d = 0; d < t.dim(); ++d) {
for (const auto d : c10::irange(t.dim())) {
ET_LOG(
Error,
" dim_order(%zu): %zu",
Expand All @@ -66,7 +67,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) {
ET_LOG(
Error,
"Expected tensor to have default or channels last dim order, but got");
for (size_t d = 0; d < t.dim(); ++d) {
for (const auto d : c10::irange(t.dim())) {
ET_LOG(
Error,
" dim_order(%zu): %zu",
Expand Down Expand Up @@ -96,7 +97,7 @@ bool tensors_have_same_dim_order(
bool all_channels_last =
is_channels_last_dim_order(first_dim_order, tensor_list[0].dim());

for (size_t i = 1; i < tensor_list.size(); ++i) {
for (const auto i : c10::irange(1, tensor_list.size())) {
ET_CHECK_OR_RETURN_FALSE(
get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) ==
Error::Ok,
Expand Down
Loading
Loading