Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions runtime/core/exec_aten/testing_util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def define_common_targets():
"//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:tensor_dimension_limit",
"//executorch/runtime/core/portable_type/c10/c10:c10",
],
exported_external_deps = [
"gmock" + aten_suffix,
Expand Down
29 changes: 15 additions & 14 deletions runtime/core/exec_aten/testing_util/tensor_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <algorithm>
#include <cstdint>

#include <c10/util/irange.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
Expand Down Expand Up @@ -78,7 +79,7 @@ inline bool check_strides(
// a.strides == (1, 1, 2). We want to sort create a mapping to make the
// sorted_stride as (2, 1, 1) while sorted_size == (3, 2, 1)
std::vector<std::int32_t> sorted_idx(sizes.size());
for (size_t i = 0; i < sizes.size(); i++) {
for (const auto i : c10::irange(sizes.size())) {
sorted_idx[i] = i;
}
std::sort(
Expand All @@ -98,7 +99,7 @@ inline bool check_strides(
// Use the mapping to rearrange the sizes and strides
std::vector<std::int32_t> sorted_sizes(sizes.size());
std::vector<std::int32_t> sorted_strides(sizes.size());
for (size_t i = 0; i < sizes.size(); i++) {
for (const auto i : c10::irange(sizes.size())) {
sorted_sizes[i] = sizes[sorted_idx[i]] == 0 ? 1 : sizes[sorted_idx[i]];
sorted_strides[i] = strides[sorted_idx[i]];
}
Expand Down Expand Up @@ -132,7 +133,7 @@ inline bool check_dim_order(
}
size_t gauss_sum = 0;
std::vector<int> count(dim_order.size(), 0);
for (int i = 0; i < dim_order.size(); i++) {
for (const auto i : c10::irange(dim_order.size())) {
if (dim_order[i] >= sizes.size()) {
return false;
}
Expand Down Expand Up @@ -378,7 +379,7 @@ class TensorFactory {
std::vector<executorch::aten::StridesType> contiguous_strides =
internal::strides_from_dim_order(sizes, contiguous_dim_order);

for (int32_t i = 0; i < input.dim(); i++) {
for (const auto i : c10::irange(input.dim())) {
ET_CHECK_MSG(
input.strides()[i] == contiguous_strides[i],
"Input tensor is not contiguous");
Expand All @@ -394,10 +395,10 @@ class TensorFactory {
std::vector<ctype> channels_last_data(
N * C * H * W); // Create a new blob with the same total size to contain
// channels_last data
for (int32_t n = 0; n < N; ++n) {
for (int32_t c = 0; c < C; ++c) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (const auto n : c10::irange(N)) {
for (const auto c : c10::irange(C)) {
for (const auto h : c10::irange(H)) {
for (const auto w : c10::irange(W)) {
// Calculate the index in the original blob
int32_t old_index = ((n * C + c) * H + h) * W + w;
// Calculate the index in the new blob
Expand Down Expand Up @@ -614,7 +615,7 @@ inline void validate_strides(
}
}
// No two dimensions can have same stride value
for (int32_t i = 0; i < strides.size(); ++i) {
for (const auto i : c10::irange(strides.size())) {
for (int32_t j = i + 1; j < strides.size(); ++j) {
if ((sizes[i] == 0) || (sizes[j] == 0) ||
((sizes[i] == 1) || (sizes[j] == 1))) {
Expand Down Expand Up @@ -830,7 +831,7 @@ class TensorFactory {
// given strides is empty.
if (!sizes.empty() && dim_order.empty()) {
default_dim_order.resize(sizes.size(), 1);
for (size_t i = 0; i < sizes.size(); ++i) {
for (const auto i : c10::irange(sizes.size())) {
default_dim_order[i] = i;
}
}
Expand Down Expand Up @@ -904,10 +905,10 @@ class TensorFactory {
std::vector<ctype> channels_last_data(
N * C * H * W); // Create a new blob with the same total size to contain
// channels_last data
for (int32_t n = 0; n < N; ++n) {
for (int32_t c = 0; c < C; ++c) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (const auto n : c10::irange(N)) {
for (const auto c : c10::irange(C)) {
for (const auto h : c10::irange(H)) {
for (const auto w : c10::irange(W)) {
// Calculate the index in the original blob
int32_t old_index = ((n * C + c) * H + h) * W + w;
// Calculate the index in the new blob
Expand Down
13 changes: 7 additions & 6 deletions runtime/core/exec_aten/testing_util/tensor_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cstring>
#include <ostream>

#include <c10/util/irange.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
Expand Down Expand Up @@ -50,7 +51,7 @@ bool data_is_close(
if (a == b) {
return true;
}
for (size_t i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
const auto ai = a[i];
const auto bi = b[i];

Expand Down Expand Up @@ -201,7 +202,7 @@ bool tensor_lists_are_close(
if (num_tensors_a != num_tensors_b) {
return false;
}
for (size_t i = 0; i < num_tensors_a; i++) {
for (const auto i : c10::irange(num_tensors_a)) {
if (!tensors_are_close(tensors_a[i], tensors_b[i], rtol, opt_atol)) {
return false;
}
Expand Down Expand Up @@ -245,7 +246,7 @@ template <typename T>
std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
// break up or truncate data when it's huge
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << data[i];
if (i < numel - 1) {
os << ", ";
Expand All @@ -257,7 +258,7 @@ std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
template <typename T>
std::ostream&
print_data(std::ostream& os, const etensor::complex<T>* data, size_t numel) {
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << data[i].real_ << " + " << data[i].imag_ << "j";
if (i < numel - 1) {
os << ", ";
Expand All @@ -276,7 +277,7 @@ template <>
std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
// break up or truncate data when it's huge
for (auto i = 0; i < numel; i++) {
for (const auto i : c10::irange(numel)) {
os << (uint64_t)data[i];
if (i < numel - 1) {
os << ", ";
Expand All @@ -292,7 +293,7 @@ std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
*/
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
os << "ETensor(sizes={";
for (auto dim = 0; dim < t.dim(); dim++) {
for (const auto dim : c10::irange(t.dim())) {
os << t.size(dim);
if (dim < t.dim() - 1) {
os << ", ";
Expand Down
1 change: 1 addition & 0 deletions runtime/core/exec_aten/testing_util/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,6 @@ def define_common_targets():
preprocessor_flags = preprocessor_flags,
deps = [
"//executorch/runtime/core/exec_aten/testing_util:tensor_util" + aten_suffix,
"//executorch/runtime/core/portable_type/c10/c10:c10",
],
)
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
Expand Down Expand Up @@ -86,7 +87,7 @@ using torch::executor::TensorImpl;
"Arrays are not equal size." #a1 " size:%zu," #a2 " size:%zu", \
a1.size(), \
a2.size()); \
for (size_t i = 0; i < a1.size(); ++i) { \
for (const auto i : c10::irange(a1.size())) { \
ET_CHECK_MSG( \
a1[i] == a2[i], \
"Value mismatch at index %zu, " #a1 \
Expand Down Expand Up @@ -784,7 +785,7 @@ void run_zeros_like_test(Tensor input) {

// A Tensor created manually, that should be identical to `actual`.
std::vector<int32_t> expected_data;
for (int i = 0; i < input.numel(); i++) {
for (const auto i : c10::irange(input.numel())) {
expected_data.push_back(0);
}
#ifdef USE_ATEN_LIB
Expand Down Expand Up @@ -842,7 +843,7 @@ void run_ones_like_test(Tensor input) {

// A Tensor created manually, that should be identical to `actual`.
std::vector<int32_t> expected_data;
for (int i = 0; i < input.numel(); i++) {
for (const auto i : c10::irange(input.numel())) {
expected_data.push_back(1);
}
#ifdef USE_ATEN_LIB
Expand Down
5 changes: 3 additions & 2 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <cstdio>
#include <cstring>

#include <c10/util/irange.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/platform/assert.h>
#include <executorch/runtime/platform/compiler.h>
Expand All @@ -23,7 +24,7 @@ namespace runtime {
namespace {
template <typename DimOrderType>
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
for (size_t i = 0; i < dims; ++i) {
for (const auto i : c10::irange(dims)) {
if (dim_order[i] >= static_cast<DimOrderType>(dims)) {
return false;
}
Expand All @@ -43,7 +44,7 @@ template <typename DimOrderType>
inline bool is_contiguous_dim_order(
const DimOrderType* dim_order,
const size_t dims) {
for (size_t i = 0; i < dims; ++i) {
for (const auto i : c10::irange(dims)) {
if (dim_order[i] != static_cast<DimOrderType>(i)) {
return false;
}
Expand Down
2 changes: 2 additions & 0 deletions runtime/core/exec_aten/util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def define_common_targets():
],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core/portable_type/c10/c10:c10",
],
visibility = [
"//executorch/...",
Expand All @@ -62,6 +63,7 @@ def define_common_targets():
exported_deps = [
":tensor_dimension_limit",
"//executorch/runtime/core:core",
"//executorch/runtime/core/portable_type/c10/c10:c10",
] + [
"//executorch/runtime/core/exec_aten:lib" + aten_suffix,
":scalar_type_util" + aten_suffix,
Expand Down
15 changes: 8 additions & 7 deletions runtime/core/exec_aten/util/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include <limits>

#include <c10/util/irange.h>
#include <executorch/runtime/core/array_ref.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -277,7 +278,7 @@
a_strides = a__.strides(); \
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
b_strides = b__.strides(); \
for (size_t i = 0; i < a__.dim(); i++) { \
for (const auto i : c10::irange(a__.dim())) { \
ET_CHECK_MSG( \
a_strides[i] == b_strides[i], \
"a.strides()[%zu] shall equal to b.strides()[%zu], " \
Expand Down Expand Up @@ -309,7 +310,7 @@
b_strides = b__.strides(); \
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
c_strides = c__.strides(); \
for (size_t i = 0; i < a__.dim(); i++) { \
for (const auto i : c10::irange(a__.dim())) { \
ET_CHECK_MSG( \
a_strides[i] == b_strides[i] && b_strides[i] == c_strides[i], \
"a_strides[%zu], b_strides[%zu] and c_strides[%zu] " \
Expand Down Expand Up @@ -967,7 +968,7 @@ inline size_t coordinateToIndex(
const executorch::aten::Tensor& tensor,
const size_t* const coordinate) {
size_t index = 0;
for (int d = 0; d < tensor.dim(); ++d) {
for (const auto d : c10::irange(tensor.dim())) {
index += coordinate[d] * getTrailingDims(tensor, d);
}
return index;
Expand Down Expand Up @@ -999,7 +1000,7 @@ inline size_t coordinateToIndexWithTrailingDimsMemo(
const size_t* const coordinate,
const size_t trailing_dims_memo[kTensorDimensionLimit]) {
size_t index = 0;
for (int d = 0; d < tensor.dim(); ++d) {
for (const auto d : c10::irange(tensor.dim())) {
index += coordinate[d] * trailing_dims_memo[d];
}
return index;
Expand All @@ -1021,7 +1022,7 @@ inline void indexToCoordinate(
size_t index,
size_t* coordinate) {
ET_CHECK(index < static_cast<size_t>(tensor.numel()));
for (auto i = 0; i < tensor.dim(); ++i) {
for (const auto i : c10::irange(tensor.dim())) {
auto dim = tensor.dim() - 1 - i;
size_t dim_size = tensor.size(dim);
coordinate[dim] = index % dim_size;
Expand Down Expand Up @@ -1211,7 +1212,7 @@ ET_NODISCARD inline Error resize_tensor(
std::array<executorch::aten::SizesType, kTensorDimensionLimit>
new_sizes_casted{};
size_t new_sizes_ndim = new_sizes.size();
for (size_t i = 0; i < new_sizes_ndim; ++i) {
for (const auto i : c10::irange(new_sizes_ndim)) {
new_sizes_casted[i] =
static_cast<executorch::aten::SizesType>(new_sizes[i]);
}
Expand Down Expand Up @@ -1342,7 +1343,7 @@ inline size_t calculate_linear_index(
const executorch::aten::StridesType* strides,
const size_t ndim) {
size_t index = 0;
for (size_t i = 0; i < ndim; i++) {
for (const auto i : c10::irange(ndim)) {
index += coordinate[i] * strides[i];
}
return index;
Expand Down
7 changes: 4 additions & 3 deletions runtime/core/exec_aten/util/tensor_util_aten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>

#include <ATen/Tensor.h> // @manual
#include <c10/util/irange.h>
#include <executorch/runtime/platform/assert.h>

namespace executorch {
Expand Down Expand Up @@ -41,7 +42,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) {

if (!validate_dim_order(dim_order, t.dim())) {
ET_LOG(Error, "Tensor dim order is not valid:");
for (size_t d = 0; d < t.dim(); ++d) {
for (const auto d : c10::irange(t.dim())) {
ET_LOG(
Error,
" dim_order(%zu): %zu",
Expand All @@ -66,7 +67,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) {
ET_LOG(
Error,
"Expected tensor to have default or channels last dim order, but got");
for (size_t d = 0; d < t.dim(); ++d) {
for (const auto d : c10::irange(t.dim())) {
ET_LOG(
Error,
" dim_order(%zu): %zu",
Expand Down Expand Up @@ -96,7 +97,7 @@ bool tensors_have_same_dim_order(
bool all_channels_last =
is_channels_last_dim_order(first_dim_order, tensor_list[0].dim());

for (size_t i = 1; i < tensor_list.size(); ++i) {
for (const auto i : c10::irange(1, tensor_list.size())) {
ET_CHECK_OR_RETURN_FALSE(
get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) ==
Error::Ok,
Expand Down
Loading
Loading