Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion runtime/core/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ def define_common_targets():
"named_data_map.h",
"result.h",
"span.h",
"tensor_layout.h",
],
visibility = [
"//executorch/...",
Expand Down Expand Up @@ -133,3 +132,14 @@ def define_common_targets():
"//executorch/...",
],
)

runtime.cxx_library(
name = "tensor_layout",
srcs = ["tensor_layout.cpp"],
exported_headers = ["tensor_layout.h"],
exported_deps = [
":core",
"//executorch/runtime/core/exec_aten:lib",
],
visibility = ["//executorch/..."],
)
54 changes: 54 additions & 0 deletions runtime/core/tensor_layout.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/core/span.h>
#include <executorch/runtime/core/tensor_layout.h>

namespace executorch {
namespace runtime {

namespace {
Result<size_t> calculate_nbytes(
const Span<const int32_t>& sizes,
const exec_aten::ScalarType& scalar_type) {
ssize_t n = 1;
for (ssize_t i = 0; i < sizes.size(); i++) {
if (sizes[i] < 0) {
return Error::InvalidArgument;
}
n *= sizes[i];
}
// Use the full namespace to disambiguate from c10::elementSize.
return n * executorch::runtime::elementSize(scalar_type);
}
} // namespace

Result<TensorLayout> TensorLayout::create(
Span<const int32_t> sizes,
Span<const uint8_t> dim_order,
executorch::aten::ScalarType scalar_type) {
auto nbytes = calculate_nbytes(sizes, scalar_type);
if (!nbytes.ok()) {
return nbytes.error();
}

if (dim_order.size() != sizes.size()) {
return Error::InvalidArgument;
}

for (size_t i = 0; i < dim_order.size(); i++) {
if (dim_order[i] >= sizes.size()) {
return Error::InvalidArgument;
}
}
return TensorLayout(sizes, dim_order, scalar_type, nbytes.get());
}
} // namespace runtime
} // namespace executorch
72 changes: 37 additions & 35 deletions runtime/core/tensor_layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,55 +10,48 @@

#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/core/result.h>
#include <executorch/runtime/core/span.h>

namespace executorch {
namespace runtime {

namespace {
size_t calculate_nbytes(
const Span<const int32_t>& sizes,
const exec_aten::ScalarType& scalar_type) {
ssize_t n = 1;
for (ssize_t i = 0; i < sizes.size(); i++) {
ET_CHECK(sizes[i] >= 0);
n *= sizes[i];
}
// Use the full namespace to disambiguate from c10::elementSize.
return n * executorch::runtime::elementSize(scalar_type);
}
} // namespace

/**
* Metadata describing the layout of external tensors (tensors that are not
stored in the PTE file).
*
* The NamedDataMap used to create the TensorLayout must outlive the
TensorLayout.
* Describes the layout of a tensor.
*/
class TensorLayout {
class ET_EXPERIMENTAL TensorLayout final {
public:
TensorLayout(
executorch::aten::ScalarType scalar_type,
Span<const int32_t> sizes,
Span<const uint8_t> dim_order)
: sizes_(sizes),
dim_order_(dim_order),
scalar_type_(scalar_type),
nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
TensorLayout() = delete;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think you need to do this since you declare a non-default ctor.

https://en.cppreference.com/w/cpp/language/default_constructor

Implicitly-declared default constructor

If there is no user-declared constructor or constructor template for a class type, the compiler will implicitly declare a default constructor as an inline public member of its class.


TensorLayout(const TensorLayout&) = default;
TensorLayout(TensorLayout&&) = default;
TensorLayout& operator=(const TensorLayout&) = default;
TensorLayout& operator=(TensorLayout&& other) = default;
~TensorLayout() = default;
/**
* Creates a TensorLayout from the given parameters.
*
* @param[in] sizes The sizes of the tensor. Note: the span passed here must
* outlive the TensorLayout and all copies of it.
* @param[in] dim_order The dim order of the tensor. Note: the span passed
* here must outlive the TensorLayout and all copies of it.
* @param[in] scalar_type The scalar type of the tensor.
* @return A Result containing the TensorLayout on success, or an error.
*/
static executorch::runtime::Result<TensorLayout> create(
Span<const int32_t> sizes,
Span<const uint8_t> dim_order,
executorch::aten::ScalarType scalar_type);

/// Returns the sizes of the tensor.
/**
* Returns the sizes of the tensor.
*
* NOTE: The TensorLayout must outlive the spans returned here.
*/
Span<const int32_t> sizes() const {
return sizes_;
}

/// Returns the dim order of the tensor.
/**
* Returns the dim order of the tensor.
*
* NOTE: The TensorLayout must outlive the spans returned here.
*/
Span<const uint8_t> dim_order() const {
return dim_order_;
}
Expand All @@ -74,6 +67,15 @@ class TensorLayout {
}

private:
TensorLayout(
Span<const int32_t> sizes,
Span<const uint8_t> dim_order,
executorch::aten::ScalarType scalar_type,
size_t nbytes)
: sizes_(sizes),
dim_order_(dim_order),
scalar_type_(scalar_type),
nbytes_(nbytes) {}
/// The sizes of the tensor.
Span<const int32_t> sizes_;

Expand Down
3 changes: 1 addition & 2 deletions runtime/core/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ def define_common_targets():
name = "tensor_layout_test",
srcs = ["tensor_layout_test.cpp"],
deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core/exec_aten:lib",
"//executorch/runtime/core:tensor_layout",
],
)

Expand Down
52 changes: 45 additions & 7 deletions runtime/core/test/tensor_layout_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,26 +6,31 @@
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/result.h>
#include <executorch/runtime/core/tensor_layout.h>

#include <gtest/gtest.h>

using namespace ::testing;
using executorch::aten::ScalarType;
using executorch::runtime::Error;
using executorch::runtime::Result;
using executorch::runtime::Span;
using executorch::runtime::TensorLayout;

TEST(TestTensorLayout, Ctor) {
int32_t sizes[2] = {1, 2};
uint8_t dim_order[2] = {0, 1};
std::array<int32_t, 2> sizes = {1, 2};
std::array<uint8_t, 2> dim_order = {0, 1};
Span<const int32_t> sizes_span = {sizes.data(), sizes.size()};
Span<const uint8_t> dim_order_span = {dim_order.data(), dim_order.size()};

Span<const int32_t> sizes_span = {sizes, sizes + 2};
Span<const uint8_t> dim_order_span = {dim_order, dim_order + 2};

TensorLayout layout =
TensorLayout(ScalarType::Float, sizes_span, dim_order_span);
Result<TensorLayout> layout_res =
TensorLayout::create(sizes_span, dim_order_span, ScalarType::Float);
EXPECT_TRUE(layout_res.ok());

TensorLayout layout = layout_res.get();
EXPECT_EQ(layout.scalar_type(), ScalarType::Float);

EXPECT_EQ(layout.sizes().size(), sizes_span.size());
Expand All @@ -38,3 +43,36 @@ TEST(TestTensorLayout, Ctor) {

EXPECT_EQ(layout.nbytes(), 8);
}

TEST(TestTensorLayout, Ctor_InvalidDimOrder) {
std::array<int32_t, 1> sizes = {2};
std::array<uint8_t, 1> dim_order = {1};
Span<const int32_t> sizes_span = {sizes.data(), sizes.size()};
Span<const uint8_t> dim_order_span = {dim_order.data(), dim_order.size()};

Result<TensorLayout> layout_res =
TensorLayout::create(sizes_span, dim_order_span, ScalarType::Float);
EXPECT_EQ(layout_res.error(), Error::InvalidArgument);
}

TEST(TestTensorLayout, Ctor_InvalidSizes) {
std::array<int32_t, 1> sizes = {-1};
std::array<uint8_t, 1> dim_order = {0};
Span<const int32_t> sizes_span = {sizes.data(), sizes.size()};
Span<const uint8_t> dim_order_span = {dim_order.data(), dim_order.size()};

Result<TensorLayout> layout_res =
TensorLayout::create(sizes_span, dim_order_span, ScalarType::Float);
EXPECT_EQ(layout_res.error(), Error::InvalidArgument);
}

TEST(TestTensorLayout, Ctor_SizesDimOrderMismatch) {
std::array<int32_t, 1> sizes = {2};
std::array<uint8_t, 2> dim_order = {0, 1};
Span<const int32_t> sizes_span = {sizes.data(), sizes.size()};
Span<const uint8_t> dim_order_span = {dim_order.data(), dim_order.size()};

Result<TensorLayout> layout_res =
TensorLayout::create(sizes_span, dim_order_span, ScalarType::Float);
EXPECT_EQ(layout_res.error(), Error::InvalidArgument);
}
Loading