Skip to content

Commit 927bdda

Browse files
larryliu0820Zonglin Peng
authored andcommitted
Add unfold_copy.out
Differential Revision: D70597013 Pull Request resolved: #8952
1 parent d5dfaac commit 927bdda

File tree

8 files changed

+271
-0
lines changed

8 files changed

+271
-0
lines changed

kernels/aten/functions.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,8 @@
403403

404404
- op: unbind_copy.int_out
405405

406+
- op: unfold_copy.out
407+
406408
- op: unsafe_split.Tensor_out
407409

408410
- op: unsqueeze_copy.dim_out
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
#include <c10/util/irange.h>
2+
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
3+
#include <executorch/runtime/kernel/kernel_includes.h>
4+
#include <executorch/runtime/platform/assert.h>
5+
#include <cstring>
6+
namespace torch {
7+
namespace executor {
8+
namespace native {
9+
10+
using Tensor = executorch::aten::Tensor;
11+
12+
// unfold_copy(Tensor self, int dimension, int size, int step, *, Tensor(a!)
13+
// out) -> Tensor(a!)
14+
Tensor unfold_copy_out(
15+
KernelRuntimeContext& ctx,
16+
const Tensor& self,
17+
int64_t dim,
18+
int64_t size,
19+
int64_t step,
20+
Tensor& out) {
21+
(void)ctx;
22+
// Check if dimension is valid
23+
ET_KERNEL_CHECK(
24+
ctx, check_unfold_copy_args(self, dim, size, step), InvalidArgument, out);
25+
if (dim < 0) {
26+
dim += nonzero_dim(self);
27+
}
28+
// Calculate output size
29+
// @lint-ignore CLANGTIDY facebook-hte-CArray
30+
Tensor::SizesType expected_output_size[kTensorDimensionLimit];
31+
size_t expected_out_dim = 0;
32+
33+
get_unfold_copy_out_target_size(
34+
self, dim, size, step, expected_output_size, &expected_out_dim);
35+
36+
ET_KERNEL_CHECK(
37+
ctx,
38+
resize_tensor(out, {expected_output_size, expected_out_dim}) == Error::Ok,
39+
InvalidArgument,
40+
out);
41+
42+
// Copy data
43+
const size_t leading_dims = getLeadingDims(self, dim);
44+
const size_t trailing_dims = getTrailingDims(self, dim);
45+
ScalarType in_type = self.scalar_type();
46+
ScalarType out_type = out.scalar_type();
47+
48+
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "unfold_copy.out", CTYPE_IN, [&]() {
49+
const CTYPE_IN* input_ptr = self.const_data_ptr<CTYPE_IN>();
50+
ET_SWITCH_REALHBBF16_TYPES(
51+
out_type, ctx, "unfold_copy.out", CTYPE_OUT, [&] {
52+
CTYPE_OUT* out_ptr = out.mutable_data_ptr<CTYPE_OUT>();
53+
for (const auto i : c10::irange(leading_dims)) {
54+
const CTYPE_IN* src =
55+
input_ptr + i * self.size(dim) * trailing_dims;
56+
for (const auto j : c10::irange(out.size(dim))) {
57+
const CTYPE_IN* dim_src = src + j * step * trailing_dims;
58+
for (const auto k : c10::irange(trailing_dims)) {
59+
for (const auto l : c10::irange(size)) {
60+
*out_ptr = convert<CTYPE_OUT, CTYPE_IN>(
61+
dim_src[k + l * trailing_dims]);
62+
out_ptr++;
63+
}
64+
}
65+
}
66+
}
67+
});
68+
});
69+
return out;
70+
}
71+
} // namespace native
72+
} // namespace executor
73+
} // namespace torch

kernels/portable/cpu/util/copy_ops_util.cpp

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -964,5 +964,46 @@ void get_diagonal_copy_out_target_size(
964964
out_sizes[in.dim() - 2] = diagonal_size;
965965
}
966966

967+
bool check_unfold_copy_args(
968+
const Tensor& self,
969+
int64_t dim,
970+
int64_t size,
971+
int64_t step) {
972+
if (dim < 0) {
973+
dim += nonzero_dim(self);
974+
}
975+
ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(self, dim));
976+
ET_CHECK_OR_RETURN_FALSE(
977+
size >= 0, "size is %" PRId64 " but must be >= 0", size);
978+
ET_CHECK_OR_RETURN_FALSE(
979+
size <= self.size(dim),
980+
"maximum size for tensor at dimension %" PRId64
981+
" is %zd but size is %" PRId64,
982+
dim,
983+
self.size(dim),
984+
size);
985+
ET_CHECK_OR_RETURN_FALSE(
986+
step > 0, "step is %" PRId64 " but must be > 0", step);
987+
return true;
988+
}
989+
990+
void get_unfold_copy_out_target_size(
991+
const Tensor& self,
992+
int64_t dim,
993+
int64_t size,
994+
int64_t step,
995+
executorch::aten::SizesType* out_sizes,
996+
size_t* out_ndim) {
997+
for (auto i : c10::irange(self.dim())) {
998+
out_sizes[i] = self.size(i);
999+
}
1000+
// At `dim` dimension, we split the tensor into `size` chunks with `step`
1001+
// stride.
1002+
out_sizes[dim] = (self.size(dim) - size + step) / step;
1003+
1004+
out_sizes[self.dim()] = size;
1005+
*out_ndim = self.dim() + 1;
1006+
}
1007+
9671008
} // namespace executor
9681009
} // namespace torch

kernels/portable/cpu/util/copy_ops_util.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,5 +233,19 @@ void get_diagonal_copy_out_target_size(
233233
executorch::aten::SizesType* out_sizes,
234234
size_t* out_ndim);
235235

236+
bool check_unfold_copy_args(
237+
const Tensor& self,
238+
int64_t dim,
239+
int64_t size,
240+
int64_t step);
241+
242+
void get_unfold_copy_out_target_size(
243+
const Tensor& self,
244+
int64_t dim,
245+
int64_t size,
246+
int64_t step,
247+
executorch::aten::SizesType* out_sizes,
248+
size_t* out_ndim);
249+
236250
} // namespace executor
237251
} // namespace torch

kernels/portable/functions.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -917,6 +917,11 @@
917917
- arg_meta: null
918918
kernel_name: torch::executor::unbind_copy_int_out
919919

920+
- op: unfold_copy.out
921+
kernels:
922+
- arg_meta: null
923+
kernel_name: torch::executor::unfold_copy_out
924+
920925
- op: unsqueeze_copy.out
921926
kernels:
922927
- arg_meta: null
Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10+
#include <executorch/runtime/core/exec_aten/exec_aten.h>
11+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
12+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
13+
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
14+
15+
#include <executorch/kernels/test/TestUtil.h>
16+
17+
#include <gtest/gtest.h>
18+
19+
using namespace ::testing;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
22+
using torch::executor::testing::TensorFactory;
23+
24+
class OpUnfoldTest : public OperatorTest {
25+
protected:
26+
Tensor& op_unfold_copy_out(
27+
const Tensor& self,
28+
int64_t dim,
29+
int64_t size,
30+
int64_t step,
31+
Tensor& out) {
32+
return torch::executor::aten::unfold_copy_outf(
33+
context_, self, dim, size, step, out);
34+
}
35+
36+
template <class CTYPE, ScalarType DTYPE>
37+
void test_unfold_copy_dtype() {
38+
TensorFactory<DTYPE> tf;
39+
40+
auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
41+
auto expected = tf.make({3, 2, 2}, {1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9});
42+
auto actual_out = tf.zeros_like(expected);
43+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/2, /*step=*/1, actual_out);
44+
EXPECT_TENSOR_CLOSE(actual_out, expected);
45+
}
46+
};
47+
48+
TEST_F(OpUnfoldTest, SmokeTest) {
49+
TensorFactory<ScalarType::Float> tf;
50+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
51+
const auto expected = tf.make({3, 1, 2}, {1, 2, 4, 5, 7, 8});
52+
auto output = tf.zeros_like(expected);
53+
54+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/2, /*step=*/2, output);
55+
EXPECT_TENSOR_CLOSE(output, expected);
56+
}
57+
58+
TEST_F(OpUnfoldTest, DType) {
59+
#define TEST_ENTRY(ctype, dtype) \
60+
test_unfold_copy_dtype<ctype, ScalarType::dtype>();
61+
ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
62+
#undef TEST_ENTRY
63+
}
64+
65+
TEST_F(OpUnfoldTest, ZeroDimension) {
66+
TensorFactory<ScalarType::Float> tf;
67+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
68+
const auto expected =
69+
tf.make({2, 3, 2}, {1, 4, 2, 5, 3, 6, 4, 7, 5, 8, 6, 9});
70+
auto output = tf.zeros_like(expected);
71+
72+
op_unfold_copy_out(input, /*dim=*/0, /*size=*/2, /*step=*/1, output);
73+
EXPECT_TENSOR_CLOSE(output, expected);
74+
}
75+
76+
TEST_F(OpUnfoldTest, NegativeDimension) {
77+
TensorFactory<ScalarType::Float> tf;
78+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
79+
const auto expected = tf.make({3, 1, 2}, {1, 2, 4, 5, 7, 8});
80+
auto output = tf.zeros_like(expected);
81+
82+
op_unfold_copy_out(input, /*dim=*/-1, /*size=*/2, /*step=*/2, output);
83+
EXPECT_TENSOR_CLOSE(output, expected);
84+
}
85+
86+
TEST_F(OpUnfoldTest, LargeStep) {
87+
TensorFactory<ScalarType::Float> tf;
88+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
89+
const auto expected = tf.make({3, 1, 2}, {1, 2, 4, 5, 7, 8});
90+
auto output = tf.zeros_like(expected);
91+
92+
op_unfold_copy_out(input, /*dim=*/-1, /*size=*/2, /*step=*/5, output);
93+
EXPECT_TENSOR_CLOSE(output, expected);
94+
}
95+
96+
TEST_F(OpUnfoldTest, ZeroSize) {
97+
TensorFactory<ScalarType::Float> tf;
98+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
99+
const auto expected = tf.make({3, 4, 0}, {});
100+
auto output = tf.zeros_like(expected);
101+
102+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/0, /*step=*/1, output);
103+
EXPECT_TENSOR_CLOSE(output, expected);
104+
}
105+
106+
TEST_F(OpUnfoldTest, NegativeSizeAndNegativeStepDies) {
107+
TensorFactory<ScalarType::Float> tf;
108+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
109+
auto output = tf.zeros({3, 1, 2});
110+
111+
ET_EXPECT_KERNEL_FAILURE(
112+
context_,
113+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/-1, /*step=*/1, output));
114+
ET_EXPECT_KERNEL_FAILURE(
115+
context_,
116+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/1, /*step=*/-1, output));
117+
}
118+
119+
TEST_F(OpUnfoldTest, InvalidDimAndSizeTooLargeDies) {
120+
TensorFactory<ScalarType::Float> tf;
121+
const auto input = tf.make({3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
122+
auto output = tf.zeros({3, 1, 2});
123+
ET_EXPECT_KERNEL_FAILURE(
124+
context_,
125+
op_unfold_copy_out(input, /*dim=*/3, /*size=*/2, /*step=*/1, output));
126+
ET_EXPECT_KERNEL_FAILURE(
127+
context_,
128+
op_unfold_copy_out(input, /*dim=*/1, /*size=*/10, /*step=*/1, output));
129+
}

kernels/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,7 @@ def define_common_targets():
324324
_common_op_test("op_tril_test", ["aten", "portable"])
325325
_common_op_test("op_trunc_test", ["aten", "portable"])
326326
_common_op_test("op_unbind_copy_test", ["aten", "portable"])
327+
_common_op_test("op_unfold_copy_test", ["aten", "portable"])
327328
_common_op_test("op_unsqueeze_copy_test", ["aten", "portable"])
328329
_common_op_test("op_upsample_bilinear2d_test", ["aten", "portable"])
329330
_common_op_test("op_upsample_nearest2d_test", ["aten", "portable"])

shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1223,6 +1223,12 @@ ATEN_OPS = (
12231223
"//executorch/kernels/portable/cpu/util:copy_ops_util",
12241224
],
12251225
),
1226+
op_target(
1227+
name = "op_unfold_copy",
1228+
deps = [
1229+
"//executorch/kernels/portable/cpu/util:copy_ops_util",
1230+
],
1231+
),
12261232
op_target(
12271233
name = "op_unsqueeze_copy",
12281234
deps = [

0 commit comments

Comments
 (0)