Skip to content

Commit beeeebd

Browse files
committed
Remove duplicate helper functions (moved to utils)
1 parent 1f60bb7 commit beeeebd

File tree

2 files changed

+1
-88
lines changed

2 files changed

+1
-88
lines changed

kernels/aten/cpu/op__to_dim_order_copy.cpp

Lines changed: 1 addition & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/kernels/aten/cpu/util/copy_ops_util.h>
910
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
1011
#include <executorch/runtime/kernel/kernel_includes.h>
1112

@@ -25,71 +26,6 @@ using OptionalArrayRef = executorch::aten::OptionalArrayRef<T>;
2526
template <typename T>
2627
using Optional = std::optional<T>;
2728

28-
namespace {
29-
Optional<MemoryFormat> get_memory_format(OptionalArrayRef<int64_t> dim_order) {
30-
if (!dim_order.has_value()) {
31-
return executorch::aten::nullopt;
32-
}
33-
if (is_contiguous_dim_order(
34-
dim_order.value().data(), dim_order.value().size())) {
35-
return MemoryFormat::Contiguous;
36-
} else if (is_channels_last_dim_order(
37-
dim_order.value().data(), dim_order.value().size())) {
38-
return MemoryFormat::ChannelsLast;
39-
} else {
40-
ET_ASSERT_UNREACHABLE();
41-
}
42-
}
43-
44-
bool check__to_dim_order_copy_args(
45-
const Tensor& input,
46-
bool non_blocking,
47-
executorch::aten::OptionalArrayRef<int64_t> dim_order,
48-
Tensor& out) {
49-
// Right now we only support blocking data transfer
50-
ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false);
51-
52-
// dim_order is set, the target dim_order will be either contiguous or
53-
// channels_last memory format
54-
if (dim_order.has_value()) {
55-
executorch::aten::ArrayRef<int64_t> dim_order_ref = dim_order.value();
56-
57-
// dim order size shall equal to input dim
58-
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim());
59-
60-
ET_LOG_AND_RETURN_IF_FALSE(
61-
is_channels_last_dim_order(
62-
dim_order.value().data(), dim_order.value().size()) ||
63-
is_contiguous_dim_order(
64-
dim_order.value().data(), dim_order.value().size()));
65-
66-
// Out Aten tensor shall have same memory format stride as dim_order
67-
const size_t kMaxNumOfDimensions = 16;
68-
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
69-
executorch::aten::StridesType target_strides[kMaxNumOfDimensions];
70-
dim_order_to_stride_nocheck(
71-
out.sizes().data(),
72-
dim_order_ref.data(),
73-
dim_order_ref.size(),
74-
target_strides);
75-
ET_LOG_AND_RETURN_IF_FALSE(out.dim() == dim_order_ref.size());
76-
for (size_t i = 0; i < dim_order_ref.size(); i++) {
77-
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
78-
}
79-
80-
} else { // dim_order is not set, preserve the dim order of input
81-
82-
auto out_strides = out.strides();
83-
auto input_strides = input.strides();
84-
ET_LOG_AND_RETURN_IF_FALSE(input_strides.size() == out_strides.size());
85-
for (size_t i = 0; i < input_strides.size(); i++) {
86-
ET_LOG_AND_RETURN_IF_FALSE(input_strides[i] == out_strides[i]);
87-
}
88-
}
89-
return true;
90-
}
91-
} // namespace
92-
9329
// _to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]?
9430
// dim_order=None, Tensor(a!) out) -> Tensor(a!)
9531
Tensor& _to_dim_order_copy_out(

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -29,29 +29,6 @@ using OptionalArrayRef = executorch::aten::OptionalArrayRef<T>;
2929
template <typename T>
3030
using Optional = std::optional<T>;
3131

32-
namespace {
33-
34-
template <typename SELF_CTYPE, typename OUT_CTYPE>
35-
void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) {
36-
auto self_data = self.mutable_data_ptr<SELF_CTYPE>();
37-
auto out_data = out.mutable_data_ptr<OUT_CTYPE>();
38-
39-
// Here we make a slightly off-label use of
40-
// BroadcastIndexesRange. It always assumes it doesn't have to care
41-
// about different dim_order between input and output, but we can
42-
// just force it to respect strides (and thus dim_order) for its
43-
// inputs using support_noncontiguous_input_tensors=true, and then pretend
44-
// the output is just another input.
45-
for (const auto [unused_index, self_data_index, out_data_index] :
46-
BroadcastIndexesRange<2, /*support_noncontiguous_input_tensors=*/true>(
47-
/*dummy output*/ self, self, out)) {
48-
(void)unused_index;
49-
out_data[out_data_index] =
50-
static_cast<OUT_CTYPE>(self_data[self_data_index]);
51-
}
52-
}
53-
} // namespace
54-
5532
// _to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]?
5633
// dim_order=None, Tensor(a!) out) -> Tensor(a!)
5734
Tensor& _to_dim_order_copy_out(

0 commit comments

Comments
 (0)