Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@
#pragma once

#include <c10/util/irange.h>
#include <array>
#include <cstdint>
#include <cstdio>
#include <cstring>

#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
#include <executorch/runtime/platform/assert.h>
#include <executorch/runtime/platform/compiler.h>

Expand Down Expand Up @@ -261,6 +263,49 @@ ET_NODISCARD inline Error stride_to_dim_order(
return Error::Ok;
}

/**
* Maximum size of a string returned by dim_order_to_c_string, for
* stack allocation.
*/
constexpr size_t kDimOrderStringSizeLimit = 1 + /* opening parenthesis */
4 * kTensorDimensionLimit + /* maximum size of each printed element,
* including comma and space */
1; /* padding for NULL terminator */
template <typename DimOrderType>
inline std::array<char, kDimOrderStringSizeLimit> dim_order_to_c_string(
const DimOrderType* dim_order,
const size_t dims) {
std::array<char, kDimOrderStringSizeLimit> result = {0};
char* p = result.data();
static_assert(
kDimOrderStringSizeLimit >= 3,
"Invalid value for kDimOrderStringSizeLimit");
size_t remaining_size = kDimOrderStringSizeLimit - 1;

auto chars_written = snprintf(p, remaining_size, "(");
if (chars_written >= remaining_size) {
return result;
}
remaining_size -= chars_written;
p += chars_written;

for (size_t i = 0; i < dims; ++i) {
chars_written = snprintf(
p,
remaining_size,
i != dims - 1 ? "%d, " : "%d",
static_cast<int>(dim_order[i]));
if (chars_written >= remaining_size) {
return result;
}
remaining_size -= chars_written;
p += chars_written;
}

snprintf(p, remaining_size, ")");
return result;
}

} // namespace runtime
} // namespace executorch

Expand Down
17 changes: 17 additions & 0 deletions runtime/core/exec_aten/util/test/dim_order_util_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <gtest/gtest.h>

using executorch::runtime::dim_order_to_c_string;
using executorch::runtime::dim_order_to_stride;
using executorch::runtime::Error;
using executorch::runtime::is_channels_last_dim_order;
Expand Down Expand Up @@ -286,3 +287,19 @@ TEST(DimOrderUtilTest, IsChannelsLastDimOrderFailCasesTest) {
EXPECT_FALSE(is_channels_last_dim_order(dim_order_4d, 4));
EXPECT_FALSE(is_channels_last_dim_order(dim_order_5d, 5));
}

TEST(DimOrderUtilTest, DimOrderToCStringSimple) {
std::array<executorch::aten::DimOrderType, 4> dim_order = {0, 1, 2, 3};
const auto expected = "(0, 1, 2, 3)";
auto c_str = dim_order_to_c_string(dim_order.data(), dim_order.size());

EXPECT_TRUE(strcmp(c_str.data(), expected) == 0);
}
TEST(DimOrderUtilTest, DimOrderToCStringHandlesOverflow) {
const auto dim_count = 1000;
std::vector<executorch::aten::DimOrderType> dim_order(dim_count);
std::iota(dim_order.begin(), dim_order.end(), 0);

auto c_str = dim_order_to_c_string(dim_order.data(), dim_count);
EXPECT_EQ(c_str[c_str.size() - 1], '\0');
}
27 changes: 27 additions & 0 deletions runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1032,6 +1032,7 @@ Method::set_input(const EValue& input_evalue, size_t input_idx) {
const auto& t_dst = e.toTensor();
const auto& t_src = input_evalue.toTensor();

// Validate scalar type and dim order
ET_CHECK_OR_RETURN_ERROR(
t_dst.scalar_type() == t_src.scalar_type(),
InvalidArgument,
Expand All @@ -1040,6 +1041,32 @@ Method::set_input(const EValue& input_evalue, size_t input_idx) {
input_idx,
executorch::runtime::toString(t_dst.scalar_type()),
executorch::runtime::toString(t_src.scalar_type()));

if (!tensors_have_same_dim_order(t_dst, t_src)) {
#if ET_LOG_ENABLED
std::array<exec_aten::DimOrderType, kTensorDimensionLimit> dst_dim_order;
std::array<exec_aten::DimOrderType, kTensorDimensionLimit> src_dim_order;
ET_CHECK_OK_OR_RETURN_ERROR(
get_dim_order(t_dst, dst_dim_order.data(), dst_dim_order.size()));
ET_CHECK_OK_OR_RETURN_ERROR(
get_dim_order(t_src, src_dim_order.data(), src_dim_order.size()));

auto dst_dim_order_c_str =
dim_order_to_c_string(dst_dim_order.data(), t_dst.dim());
auto src_dim_order_c_str =
dim_order_to_c_string(src_dim_order.data(), t_src.dim());

ET_LOG(
Error,
"Input %zu has unexpected dim order: expected %s but was %s.",
input_idx,
dst_dim_order_c_str.data(),
src_dim_order_c_str.data());

#endif
return Error::InvalidArgument;
}

// Reset the shape for the Method's input as the size of forwarded input
// tensor for shape dynamism. Also is a safety check if need memcpy.
Error err = resize_tensor(t_dst, t_src.sizes());
Expand Down
32 changes: 32 additions & 0 deletions runtime/executor/test/method_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,38 @@ TEST_F(MethodTest, ProgramDataSeparationTest) {
ASSERT_EQ(err, Error::Ok);
}

TEST_F(MethodTest, InputDimOrderMismatchTest) {
/*
* Verify the input tensor dim order is checked against the expected value.
*/

ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
Result<Method> method = programs_["cat"]->load_method("forward", &mmm.get());
ASSERT_EQ(method.error(), Error::Ok);

// Set up io. Input and Output should share the same memory.
constexpr int buffer_size = 16;
float buffer[buffer_size]; // Initial input is (2,4) we then cat a (1,4) to it
// twice for a final shape of (4,4)
for (int i = 0; i < buffer_size; ++i) {
buffer[i] = 0.f;
}
int32_t sizes[2] = {2, 4};
uint8_t dim_order[2] = {1, 0};
int32_t strides[2] = {1, 4};
executorch::aten::TensorImpl impl(
executorch::aten::ScalarType::Float,
2,
sizes,
buffer,
dim_order,
strides);

auto input_err =
method->set_input(EValue(executorch::aten::Tensor(&impl)), 0);
ASSERT_EQ(input_err, Error::InvalidArgument);
}

/*
* TODO(T161163608): Test is disabled due to a resize bug in tensor_index_out of
* the portable op lib
Expand Down
Loading