1717#include < executorch/backends/cuda/runtime/c10/util/safe_numerics.h>
1818#include < executorch/backends/cuda/runtime/slim/core/Storage.h>
1919#include < executorch/backends/cuda/runtime/slim/util/SizeUtil.h>
20+ #include < executorch/backends/cuda/runtime/slim/util/ArrayRefUtil.h>
2021#include < executorch/runtime/core/exec_aten/exec_aten.h>
2122
2223namespace executorch ::backends::cuda::slim {
@@ -55,8 +56,8 @@ class SlimTensor {
5556 public:
5657 SlimTensor (
5758 Storage&& storage,
58- executorch::backends::cuda::c10 ::IntArrayRef sizes,
59- executorch::backends::cuda::c10 ::IntArrayRef strides,
59+ executorch::aten ::IntArrayRef sizes,
60+ executorch::aten ::IntArrayRef strides,
6061 executorch::backends::cuda::c10::ScalarType dtype,
6162 int64_t storage_offset = 0 )
6263 : storage_(std::move(storage)),
@@ -99,14 +100,11 @@ class SlimTensor {
99100
100101 // Create a non-owning storage that wraps the ETensor's data
101102 // ETensor is guaranteed to be on CPU
102- Storage storage (new MaybeOwningStorage (
103- CPU_DEVICE,
104- data_ptr,
105- etensor->nbytes ()
106- ));
103+ Storage storage (
104+ new MaybeOwningStorage (CPU_DEVICE, data_ptr, etensor->nbytes ()));
107105
108106 // Initialize the SlimTensor with the wrapped storage
109- *this = SlimTensor (std::move (storage), sizes, strides, slim_dtype, 0 );
107+ *this = SlimTensor (std::move (storage), vec_to_et ( sizes), vec_to_et ( strides) , slim_dtype, 0 );
110108 }
111109
112110 SlimTensor (const SlimTensor&) = default;
@@ -134,8 +132,8 @@ class SlimTensor {
134132 return executorch::backends::cuda::c10::elementSize (dtype_);
135133 }
136134
137- executorch::backends::cuda::c10 ::IntArrayRef sizes () const {
138- return sizes_and_strides_.sizes_arrayref ();
135+ executorch::aten ::IntArrayRef sizes () const {
136+ return c10_to_et ( sizes_and_strides_.sizes_arrayref () );
139137 }
140138
141139 int64_t size (int64_t dim) const {
@@ -144,8 +142,8 @@ class SlimTensor {
144142 return sizes_and_strides_.size_at (static_cast <size_t >(wrapped_dim));
145143 }
146144
147- executorch::backends::cuda::c10 ::IntArrayRef strides () const {
148- return sizes_and_strides_.strides_arrayref ();
145+ executorch::aten ::IntArrayRef strides () const {
146+ return c10_to_et ( sizes_and_strides_.strides_arrayref () );
149147 }
150148
151149 int64_t stride (int64_t dim) const {
@@ -213,8 +211,8 @@ class SlimTensor {
213211 }
214212
215213 void set_sizes_and_strides (
216- executorch::backends::cuda::c10 ::IntArrayRef sizes,
217- executorch::backends::cuda::c10 ::IntArrayRef strides,
214+ executorch::aten ::IntArrayRef sizes,
215+ executorch::aten ::IntArrayRef strides,
218216 std::optional<int64_t > storage_offset = std::nullopt ) {
219217 const int64_t new_dim = static_cast <int64_t >(sizes.size ());
220218 STANDALONE_CHECK (
@@ -225,8 +223,8 @@ class SlimTensor {
225223 strides.size (),
226224 " )" );
227225
228- std::vector<int64_t > new_sizes = sizes. vec ( );
229- std::vector<int64_t > new_strides = strides. vec ( );
226+ std::vector<int64_t > new_sizes = to_vec (sizes );
227+ std::vector<int64_t > new_strides = to_vec (strides );
230228
231229 // stride calculation logic
232230 bool overflowed = false ;
@@ -259,8 +257,7 @@ class SlimTensor {
259257 refresh_contiguous ();
260258 }
261259
262- void set_sizes_contiguous (
263- executorch::backends::cuda::c10::IntArrayRef new_size) {
260+ void set_sizes_contiguous (executorch::aten::IntArrayRef new_size) {
264261 sizes_and_strides_.set_sizes (new_size);
265262 refresh_numel ();
266263 empty_tensor_restride (
@@ -271,7 +268,7 @@ class SlimTensor {
271268 executorch::backends::cuda::c10::MemoryFormat memory_format);
272269
273270 SlimTensor resize_ (
274- executorch::backends::cuda::c10 ::IntArrayRef sizes,
271+ executorch::aten ::IntArrayRef sizes,
275272 std::optional<c10::MemoryFormat> optional_memory_format);
276273
277274 // Conversion operations
@@ -283,8 +280,8 @@ class SlimTensor {
283280 Storage new_storage (new MaybeOwningStorage (storage_->clone (device)));
284281 return SlimTensor (
285282 std::move (new_storage),
286- sizes_and_strides_.sizes_arrayref (),
287- sizes_and_strides_.strides_arrayref (),
283+ c10_to_et ( sizes_and_strides_.sizes_arrayref () ),
284+ c10_to_et ( sizes_and_strides_.strides_arrayref () ),
288285 dtype_,
289286 storage_offset_);
290287 }
@@ -525,34 +522,32 @@ class SlimTensor {
525522 executorch::backends::cuda::slim::compute_contiguous_strides (
526523 this ->sizes ());
527524 return _clone_impl (
528- this ->sizes (), contig_strides, this ->dtype (), this ->device ());
525+ this ->sizes (), vec_to_et ( contig_strides) , this ->dtype (), this ->device ());
529526 }
530527
531528 // View operations
532529 SlimTensor as_strided (
533- executorch::backends::cuda::c10 ::IntArrayRef sizes,
534- executorch::backends::cuda::c10 ::IntArrayRef strides,
530+ executorch::aten ::IntArrayRef sizes,
531+ executorch::aten ::IntArrayRef strides,
535532 int64_t storage_offset) const ;
536533 SlimTensor as_strided_ (
537- executorch::backends::cuda::c10 ::IntArrayRef sizes,
538- executorch::backends::cuda::c10 ::IntArrayRef strides,
534+ executorch::aten ::IntArrayRef sizes,
535+ executorch::aten ::IntArrayRef strides,
539536 int64_t storage_offset);
540537
541- SlimTensor permute (executorch::backends::cuda::c10 ::IntArrayRef dims) const ;
538+ SlimTensor permute (executorch::aten ::IntArrayRef dims) const ;
542539
543540 // Transpose operations
544541 SlimTensor transpose () const ;
545542 SlimTensor transpose (int64_t dim0, int64_t dim1) const ;
546543 SlimTensor t () const ;
547544
548- SlimTensor reshape (
549- executorch::backends::cuda::c10::IntArrayRef proposed_shape) const ;
545+ SlimTensor reshape (executorch::aten::IntArrayRef proposed_shape) const ;
550546
551547 SlimTensor narrow (int64_t dim, int64_t start, int64_t length) const ;
552548
553549 // Generic element access returning SlimTensor
554- SlimTensor operator [](
555- executorch::backends::cuda::c10::IntArrayRef indices) const {
550+ SlimTensor operator [](executorch::aten::IntArrayRef indices) const {
556551 STANDALONE_CHECK (
557552 indices.size () <= this ->dim (),
558553 " Number of indices (" ,
@@ -597,21 +592,21 @@ class SlimTensor {
597592 int64_t new_storage_offset = this ->storage_offset_ + offset_adjustment;
598593 return SlimTensor (
599594 Storage (this ->storage_ ),
600- new_sizes,
601- new_strides,
595+ vec_to_et ( new_sizes) ,
596+ vec_to_et ( new_strides) ,
602597 this ->dtype_ ,
603598 new_storage_offset);
604599 }
605600 }
606601
607602 // Convenience overload for single index
608603 SlimTensor operator [](int64_t index) const {
609- return (*this )[executorch::backends::cuda::c10 ::IntArrayRef{index}];
604+ return (*this )[executorch::aten ::IntArrayRef{index}];
610605 }
611606
612607 // Convenience overloads for common multi-dimensional cases
613608 SlimTensor operator [](std::initializer_list<int64_t > indices) const {
614- return (*this )[executorch::backends::cuda::c10::IntArrayRef (indices)];
609+ return (*this )[initlist_to_et (indices)];
615610 }
616611
617612 // Extract scalar value from 0-dimensional tensor
@@ -667,8 +662,8 @@ class SlimTensor {
667662
668663 private:
669664 SlimTensor _clone_impl (
670- executorch::backends::cuda::c10 ::IntArrayRef sizes,
671- executorch::backends::cuda::c10 ::IntArrayRef strides,
665+ executorch::aten ::IntArrayRef sizes,
666+ executorch::aten ::IntArrayRef strides,
672667 executorch::backends::cuda::c10::ScalarType dtype,
673668 const executorch::backends::cuda::c10::Device& device) const {
674669 Storage storage = new_storage (sizes, strides, dtype, device);
@@ -679,7 +674,7 @@ class SlimTensor {
679674 }
680675
681676 void refresh_numel () {
682- numel_ = compute_numel (sizes_and_strides_.sizes_arrayref ());
677+ numel_ = compute_numel (c10_to_et ( sizes_and_strides_.sizes_arrayref () ));
683678 }
684679
685680 bool compute_is_contiguous () const {
0 commit comments