66 * LICENSE file in the root directory of this source tree.
77 */
88
9+ #include < executorch/kernels/aten/cpu/util/copy_ops_util.h>
910#include < executorch/runtime/core/exec_aten/util/dim_order_util.h>
1011#include < executorch/runtime/kernel/kernel_includes.h>
1112
@@ -25,71 +26,6 @@ using OptionalArrayRef = executorch::aten::OptionalArrayRef<T>;
2526template <typename T>
2627using Optional = std::optional<T>;
2728
28- namespace {
29- Optional<MemoryFormat> get_memory_format (OptionalArrayRef<int64_t > dim_order) {
30- if (!dim_order.has_value ()) {
31- return executorch::aten::nullopt ;
32- }
33- if (is_contiguous_dim_order (
34- dim_order.value ().data (), dim_order.value ().size ())) {
35- return MemoryFormat::Contiguous;
36- } else if (is_channels_last_dim_order (
37- dim_order.value ().data (), dim_order.value ().size ())) {
38- return MemoryFormat::ChannelsLast;
39- } else {
40- ET_ASSERT_UNREACHABLE ();
41- }
42- }
43-
44- bool check__to_dim_order_copy_args (
45- const Tensor& input,
46- bool non_blocking,
47- executorch::aten::OptionalArrayRef<int64_t > dim_order,
48- Tensor& out) {
49- // Right now we only support blocking data transfer
50- ET_LOG_AND_RETURN_IF_FALSE (non_blocking == false );
51-
52- // dim_order is set, the target dim_order will be either contiguous or
53- // channels_last memory format
54- if (dim_order.has_value ()) {
55- executorch::aten::ArrayRef<int64_t > dim_order_ref = dim_order.value ();
56-
57- // dim order size shall equal to input dim
58- ET_LOG_AND_RETURN_IF_FALSE (dim_order_ref.size () == input.dim ());
59-
60- ET_LOG_AND_RETURN_IF_FALSE (
61- is_channels_last_dim_order (
62- dim_order.value ().data (), dim_order.value ().size ()) ||
63- is_contiguous_dim_order (
64- dim_order.value ().data (), dim_order.value ().size ()));
65-
66- // Out Aten tensor shall have same memory format stride as dim_order
67- const size_t kMaxNumOfDimensions = 16 ;
68- ET_LOG_AND_RETURN_IF_FALSE (kMaxNumOfDimensions >= out.dim ());
69- executorch::aten::StridesType target_strides[kMaxNumOfDimensions ];
70- dim_order_to_stride_nocheck (
71- out.sizes ().data (),
72- dim_order_ref.data (),
73- dim_order_ref.size (),
74- target_strides);
75- ET_LOG_AND_RETURN_IF_FALSE (out.dim () == dim_order_ref.size ());
76- for (size_t i = 0 ; i < dim_order_ref.size (); i++) {
77- ET_LOG_AND_RETURN_IF_FALSE (target_strides[i] == out.strides ()[i]);
78- }
79-
80- } else { // dim_order is not set, preserve the dim order of input
81-
82- auto out_strides = out.strides ();
83- auto input_strides = input.strides ();
84- ET_LOG_AND_RETURN_IF_FALSE (input_strides.size () == out_strides.size ());
85- for (size_t i = 0 ; i < input_strides.size (); i++) {
86- ET_LOG_AND_RETURN_IF_FALSE (input_strides[i] == out_strides[i]);
87- }
88- }
89- return true ;
90- }
91- } // namespace
92-
9329// _to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]?
9430// dim_order=None, Tensor(a!) out) -> Tensor(a!)
9531Tensor& _to_dim_order_copy_out (
0 commit comments