Skip to content

Commit f4004de

Browse files
committed
[et][dim order] dim order variant empty operator
Pull Request resolved: #7154 ghstack-source-id: 256155831 @exported-using-ghexport This diff introduce dim order variant of empty operator, to replace the original empty operator when using dim order in ExecuTorch Differential Revision: [D66683250](https://our.internmc.facebook.com/intern/diff/D66683250/)
1 parent aa67cd9 commit f4004de

File tree

8 files changed

+377
-0
lines changed

8 files changed

+377
-0
lines changed
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
10+
#include <executorch/runtime/kernel/kernel_includes.h>
11+
#include <executorch/runtime/platform/assert.h>
12+
13+
#include <cstdint>
14+
#include <cstring>
15+
16+
namespace torch {
17+
namespace executor {
18+
namespace native {
19+
20+
using exec_aten::IntArrayRef;
21+
using exec_aten::Tensor;
22+
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
23+
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;
24+
// Out Aten tensor shall have same memory format stride as dim_order
25+
const size_t kMaxNumOfDimensions = 16;
26+
27+
namespace {
28+
29+
inline bool _check__empty_out_dim_order(
30+
OptionalIntArrayRef dim_order,
31+
Tensor& out) {
32+
exec_aten::ArrayRef<int64_t> dim_order_ref;
33+
std::vector<int64_t> dim_order_vec;
34+
35+
if (dim_order.has_value()) {
36+
// out tensor's dim order shall equal to input dim order
37+
dim_order_ref = exec_aten::ArrayRef<int64_t>(
38+
dim_order.value().data(), dim_order.value().size());
39+
} else { // dim_order is not set, out tensor should be contiguous dim order
40+
for (int i = 0; i < out.dim(); i++) {
41+
dim_order_vec.push_back(i);
42+
}
43+
dim_order_ref = exec_aten::ArrayRef<int64_t>(dim_order_vec);
44+
}
45+
46+
// dim order size shall equal to input dim
47+
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim());
48+
49+
ET_LOG_AND_RETURN_IF_FALSE(
50+
is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) ||
51+
is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size()));
52+
53+
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
54+
exec_aten::StridesType target_strides[kMaxNumOfDimensions];
55+
dim_order_to_stride_nocheck(
56+
out.sizes().data(),
57+
dim_order_ref.data(),
58+
dim_order_ref.size(),
59+
target_strides);
60+
61+
for (size_t i = 0; i < dim_order_ref.size(); i++) {
62+
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
63+
}
64+
65+
return true;
66+
}
67+
68+
} // namespace
69+
70+
/*
71+
* Empty out tensor with specified dim order
72+
*
73+
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
74+
* -> Tensor(a!)
75+
*/
76+
Tensor& _empty_dim_order_out(
77+
KernelRuntimeContext& context,
78+
IntArrayRef size,
79+
OptionalIntArrayRef dim_order,
80+
Tensor& out) {
81+
(void)context;
82+
83+
// Check if dim_order is valid
84+
ET_KERNEL_CHECK(
85+
context,
86+
_check__empty_out_dim_order(dim_order, out),
87+
InvalidArgument,
88+
out);
89+
90+
// Resize for dynamic shape
91+
ET_KERNEL_CHECK_MSG(
92+
context,
93+
resize_tensor(out, size) == Error::Ok,
94+
InvalidArgument,
95+
out,
96+
"Failed to resize output tensor.");
97+
98+
return out;
99+
}
100+
101+
Tensor& _empty_dim_order_out(
102+
IntArrayRef size,
103+
OptionalIntArrayRef dim_order,
104+
Tensor& out) {
105+
executorch::runtime::KernelRuntimeContext ctx{};
106+
return _empty_dim_order_out(ctx, size, dim_order, out);
107+
}
108+
109+
} // namespace native
110+
} // namespace executor
111+
} // namespace torch

kernels/aten/cpu/targets.bzl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "d
99
# ops, and must be split. They can, however, share common code via a library dep
1010
# if necessary.
1111
_EDGE_DIALECT_OPS = (
12+
op_target(
13+
name = "op__empty_dim_order",
14+
),
1215
op_target(
1316
name = "op__to_dim_order_copy",
1417
deps = [

kernels/aten/edge_dialect_aten_op.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,11 @@
22
#
33
# This yaml file contains operators that are defined by ExecuTorch and used in ATen mode.
44

5+
- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
6+
kernels:
7+
- arg_meta: null
8+
kernel_name: torch::executor::_empty_dim_order_out
9+
510
- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
611
kernels:
712
- arg_meta: null
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
10+
#include <executorch/runtime/kernel/kernel_includes.h>
11+
#include <executorch/runtime/platform/assert.h>
12+
13+
#include <cstdint>
14+
#include <cstring>
15+
16+
namespace torch {
17+
namespace executor {
18+
namespace native {
19+
20+
using exec_aten::Tensor;
21+
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
22+
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;
23+
24+
namespace {
25+
26+
bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) {
27+
DimOrderArrayRef out_dim_order = out.dim_order();
28+
29+
if (dim_order.has_value()) {
30+
// out tensor's dim order shall equal to input dim order
31+
IntArrayRef dim_order_ref = dim_order.value();
32+
33+
ET_LOG_AND_RETURN_IF_FALSE(
34+
is_channels_last_dim_order(
35+
dim_order.value().data(), dim_order.value().size()) ||
36+
is_contiguous_dim_order(
37+
dim_order.value().data(), dim_order.value().size()));
38+
39+
// Out tensor shall have same dim order as dim_order
40+
auto out_dim_order = out.dim_order();
41+
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size());
42+
for (size_t i = 0; i < dim_order_ref.size(); i++) {
43+
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]);
44+
}
45+
} else { // dim_order is not set, out tensor should be contiguous memory
46+
// format
47+
ET_LOG_AND_RETURN_IF_FALSE(
48+
is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size()));
49+
}
50+
return true;
51+
}
52+
53+
} // namespace
54+
55+
/*
56+
* Empty out tensor with specified dim order
57+
*
58+
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
59+
* -> Tensor(a!)
60+
*/
61+
Tensor& _empty_dim_order_out(
62+
KernelRuntimeContext& context,
63+
IntArrayRef size,
64+
OptionalIntArrayRef dim_order,
65+
Tensor& out) {
66+
(void)context;
67+
68+
// Check if dim_order is valid
69+
_check__empty_out_dim_order(dim_order, out);
70+
71+
// Resize for dynamic shape
72+
ET_KERNEL_CHECK_MSG(
73+
context,
74+
resize_tensor(out, size) == Error::Ok,
75+
InvalidArgument,
76+
out,
77+
"Failed to resize output tensor.");
78+
79+
return out;
80+
}
81+
82+
} // namespace native
83+
} // namespace executor
84+
} // namespace torch

kernels/portable/functions.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -937,6 +937,11 @@
937937
- arg_meta: null
938938
kernel_name: torch::executor::zeros_out
939939

940+
- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
941+
kernels:
942+
- arg_meta: null
943+
kernel_name: torch::executor::_empty_dim_order_out
944+
940945
- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
941946
kernels:
942947
- arg_meta: null
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10+
#include <executorch/kernels/test/TestUtil.h>
11+
#include <executorch/kernels/test/supported_features.h>
12+
#include <executorch/runtime/core/exec_aten/exec_aten.h>
13+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15+
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16+
17+
#include <gtest/gtest.h>
18+
19+
using namespace ::testing;
20+
using exec_aten::DimOrderType;
21+
using exec_aten::IntArrayRef;
22+
using exec_aten::optional;
23+
using exec_aten::OptionalArrayRef;
24+
using exec_aten::ScalarType;
25+
using exec_aten::Tensor;
26+
using torch::executor::testing::TensorFactory;
27+
28+
class OpEmptyDimOrderOutTest : public OperatorTest {
29+
protected:
30+
Tensor& op_empty_dim_order_out(
31+
IntArrayRef size,
32+
OptionalArrayRef<int64_t> dim_order,
33+
Tensor& out) {
34+
return torch::executor::dim_order_ops::_empty_dim_order_outf(
35+
context_, size, dim_order, out);
36+
}
37+
38+
template <ScalarType DTYPE>
39+
void test_op_empty_dim_order_out(std::vector<int32_t>&& size_int32_t) {
40+
TensorFactory<DTYPE> tf;
41+
std::vector<int64_t> sizes(size_int32_t.begin(), size_int32_t.end());
42+
auto aref = exec_aten::ArrayRef<int64_t>(sizes.data(), sizes.size());
43+
OptionalArrayRef<int64_t> dim_order;
44+
Tensor out = tf.ones(size_int32_t);
45+
46+
op_empty_dim_order_out(aref, dim_order, out);
47+
}
48+
49+
void too_short_dim_order_die() {
50+
TensorFactory<ScalarType::Float> tf;
51+
52+
int64_t sizes[3] = {3, 2, 4};
53+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
54+
55+
int64_t raw_dim_order[2] = {0, 1};
56+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
57+
Tensor out =
58+
tf.ones({3, 2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
59+
ET_EXPECT_KERNEL_FAILURE(
60+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
61+
}
62+
63+
void illegal_dim_order_die() {
64+
TensorFactory<ScalarType::Float> tf;
65+
66+
int64_t sizes[2] = {3, 2};
67+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
68+
69+
int64_t raw_dim_order[2] = {1, 2};
70+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
71+
Tensor out =
72+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
73+
ET_EXPECT_KERNEL_FAILURE(
74+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
75+
}
76+
77+
void wrong_dim_order_die() {
78+
TensorFactory<ScalarType::Float> tf;
79+
80+
int64_t sizes[4] = {3, 2, 4, 5};
81+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
82+
83+
// should be {0, 2, 3, 1}
84+
int64_t raw_dim_order[4] = {0, 1, 2, 3};
85+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
86+
Tensor out = tf.full_channels_last(
87+
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
88+
ET_EXPECT_KERNEL_FAILURE(
89+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
90+
}
91+
};
92+
93+
#define GENERATE_TEST(_, DTYPE) \
94+
TEST_F(OpEmptyDimOrderOutTest, DTYPE##Tensors) { \
95+
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 3, 4}); \
96+
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 0, 4}); \
97+
test_op_empty_dim_order_out<ScalarType::DTYPE>({}); \
98+
}
99+
100+
ET_FORALL_REAL_TYPES_AND(Bool, GENERATE_TEST)
101+
102+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundSameAsExpected) {
103+
TensorFactory<ScalarType::Float> tf;
104+
105+
int64_t sizes[2] = {3, 2};
106+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
107+
OptionalArrayRef<int64_t> dim_order;
108+
Tensor out =
109+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
110+
op_empty_dim_order_out(sizes_aref, dim_order, out);
111+
}
112+
113+
TEST_F(OpEmptyDimOrderOutTest, ContiguousDimOrderSuccees) {
114+
TensorFactory<ScalarType::Float> tf;
115+
116+
int64_t sizes[2] = {3, 2};
117+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
118+
119+
int64_t raw_dim_order[2] = {0, 1};
120+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
121+
Tensor out =
122+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
123+
op_empty_dim_order_out(sizes_aref, dim_order, out);
124+
}
125+
126+
TEST_F(OpEmptyDimOrderOutTest, ChannelsLastsDimOrderSuccees) {
127+
TensorFactory<ScalarType::Float> tf;
128+
129+
int64_t sizes[4] = {3, 2, 4, 5};
130+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
131+
132+
int64_t raw_dim_order[4] = {0, 2, 3, 1};
133+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
134+
Tensor out = tf.full_channels_last(
135+
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
136+
op_empty_dim_order_out(sizes_aref, dim_order, out);
137+
}
138+
139+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundLargerThanExpected) {
140+
TensorFactory<ScalarType::Float> tf;
141+
142+
int64_t sizes[2] = {3, 2};
143+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
144+
OptionalArrayRef<int64_t> dim_order;
145+
Tensor out =
146+
tf.ones({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
147+
op_empty_dim_order_out(sizes_aref, dim_order, out);
148+
}
149+
150+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUnbound) {
151+
if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
152+
GTEST_SKIP() << "Dynamic shape unbound not supported";
153+
}
154+
TensorFactory<ScalarType::Float> tf;
155+
156+
int64_t sizes[2] = {3, 2};
157+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
158+
OptionalArrayRef<int64_t> dim_order;
159+
Tensor out =
160+
tf.ones({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
161+
op_empty_dim_order_out(sizes_aref, dim_order, out);
162+
}

0 commit comments

Comments
 (0)