Skip to content

Commit f109d14

Browse files
committed
replace c10 intarrayref with et
1 parent cf5b30a commit f109d14

File tree

17 files changed

+367
-148
lines changed

17 files changed

+367
-148
lines changed

backends/cuda/runtime/aoti_delegate_handle.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ namespace backends {
1717
namespace cuda {
1818

1919
using executorch::runtime::Error;
20-
using executorch::runtime::etensor::Tensor;
2120

2221
extern "C" {
2322

backends/cuda/runtime/shims/aoti_torch/c/shim.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob(
199199
const int64_t *strides_ptr, int64_t storage_offset, int32_t dtype,
200200
int32_t device_type, int32_t device_index,
201201
AtenTensorHandle *ret_new_tensor) {
202-
executorch::backends::cuda::c10::IntArrayRef sizes(sizes_ptr, ndim);
203-
executorch::backends::cuda::c10::IntArrayRef strides(strides_ptr, ndim);
202+
executorch::aten::IntArrayRef sizes(sizes_ptr, ndim);
203+
executorch::aten::IntArrayRef strides(strides_ptr, ndim);
204204
*ret_new_tensor =
205205
new executorch::backends::cuda::slim::SlimTensor(executorch::backends::cuda::slim::from_blob(
206206
data, sizes, strides, static_cast<executorch::backends::cuda::c10::ScalarType>(dtype),
@@ -216,8 +216,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2(
216216
int32_t device_type, int32_t device_index, AtenTensorHandle *ret_new_tensor,
217217
int32_t layout, const uint8_t *opaque_metadata,
218218
int64_t opaque_metadata_size) {
219-
executorch::backends::cuda::c10::IntArrayRef sizes(sizes_ptr, ndim);
220-
executorch::backends::cuda::c10::IntArrayRef strides(strides_ptr, ndim);
219+
executorch::aten::IntArrayRef sizes(sizes_ptr, ndim);
220+
executorch::aten::IntArrayRef strides(strides_ptr, ndim);
221221
*ret_new_tensor =
222222
new executorch::backends::cuda::slim::SlimTensor(executorch::backends::cuda::slim::from_blob(
223223
data, sizes, strides, static_cast<executorch::backends::cuda::c10::ScalarType>(dtype),
@@ -231,8 +231,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_empty_strided(
231231
int64_t ndim, const int64_t *sizes_ptr, const int64_t *strides_ptr,
232232
int32_t dtype, int32_t device_type, int32_t device_index,
233233
AtenTensorHandle *ret_new_tensor) {
234-
executorch::backends::cuda::c10::IntArrayRef sizes(sizes_ptr, ndim);
235-
executorch::backends::cuda::c10::IntArrayRef strides(strides_ptr, ndim);
234+
executorch::aten::IntArrayRef sizes(sizes_ptr, ndim);
235+
executorch::aten::IntArrayRef strides(strides_ptr, ndim);
236236
auto empty_strided = executorch::backends::cuda::slim::empty_strided(
237237
sizes, strides, static_cast<executorch::backends::cuda::c10::ScalarType>(dtype),
238238
{static_cast<executorch::backends::cuda::c10::DeviceType>(device_type),
@@ -246,8 +246,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor(
246246
AtenTensorHandle self, int64_t ndim, const int64_t *sizes_ptr,
247247
const int64_t *strides_ptr, int64_t offset_increment,
248248
AtenTensorHandle *ret_new_tensor) {
249-
executorch::backends::cuda::c10::IntArrayRef sizes(sizes_ptr, ndim);
250-
executorch::backends::cuda::c10::IntArrayRef strides(strides_ptr, ndim);
249+
executorch::aten::IntArrayRef sizes(sizes_ptr, ndim);
250+
executorch::aten::IntArrayRef strides(strides_ptr, ndim);
251251
*ret_new_tensor = new executorch::backends::cuda::slim::SlimTensor(
252252
self->storage(), sizes, strides, self->dtype(),
253253
self->storage_offset() + offset_increment);
@@ -257,8 +257,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor(
257257
AOTI_TORCH_EXPORT AOTITorchError
258258
aoti_torch_as_strided(AtenTensorHandle self, const int64_t *sizes_ptr,
259259
const int64_t *strides_ptr, AtenTensorHandle *ret) {
260-
executorch::backends::cuda::c10::IntArrayRef sizes(sizes_ptr, self->dim());
261-
executorch::backends::cuda::c10::IntArrayRef strides(strides_ptr, self->dim());
260+
executorch::aten::IntArrayRef sizes(sizes_ptr, self->dim());
261+
executorch::aten::IntArrayRef strides(strides_ptr, self->dim());
262262
*ret = new executorch::backends::cuda::slim::SlimTensor(
263263
self->storage(), sizes, strides, self->dtype(), self->storage_offset());
264264
return AOTI_TORCH_SUCCESS;

backends/cuda/runtime/slim/core/Empty.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,21 +10,21 @@
1010
namespace executorch::backends::cuda::slim {
1111
// The returned SlimTensor owns the underlying storage
1212
inline SlimTensor
13-
empty_strided(executorch::backends::cuda::c10::IntArrayRef sizes,
14-
executorch::backends::cuda::c10::IntArrayRef strides,
13+
empty_strided(executorch::aten::IntArrayRef sizes,
14+
executorch::aten::IntArrayRef strides,
1515
executorch::backends::cuda::c10::ScalarType dtype,
1616
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE) {
1717
Storage storage = new_storage(sizes, strides, dtype, device);
1818
return SlimTensor(std::move(storage), sizes, strides, dtype, 0);
1919
}
2020

21-
inline SlimTensor empty(executorch::backends::cuda::c10::IntArrayRef sizes,
22-
executorch::backends::cuda::c10::ScalarType dtype,
23-
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE) {
21+
inline SlimTensor empty(executorch::aten::IntArrayRef sizes,
22+
executorch::backends::cuda::c10::ScalarType dtype,
23+
const executorch::backends::cuda::c10::Device& device) {
2424
std::vector<int64_t> contig_strides =
25-
executorch::backends::cuda::slim::compute_contiguous_strides(sizes);
26-
Storage storage = new_storage(sizes, contig_strides, dtype, device);
27-
return SlimTensor(std::move(storage), sizes, contig_strides, dtype, 0);
25+
executorch::backends::cuda::slim::compute_contiguous_strides(et_to_c10(sizes));
26+
Storage storage = new_storage(sizes, vec_to_et(contig_strides), dtype, device);
27+
return SlimTensor(std::move(storage), sizes, vec_to_et(contig_strides), dtype, 0);
2828
}
2929

3030
inline SlimTensor empty_like(const SlimTensor &other) {

backends/cuda/runtime/slim/core/Factory.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include <executorch/backends/cuda/runtime/slim/core/Empty.h>
44

55
namespace executorch::backends::cuda::slim {
6-
inline SlimTensor zeros(executorch::backends::cuda::c10::IntArrayRef sizes,
6+
inline SlimTensor zeros(executorch::aten::IntArrayRef sizes,
77
executorch::backends::cuda::c10::ScalarType dtype,
88
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE) {
99
SlimTensor tensor = empty(sizes, dtype, device);
@@ -15,7 +15,7 @@ inline SlimTensor zeros_like(const SlimTensor &other) {
1515
return zeros(other.sizes(), other.dtype(), other.device());
1616
}
1717

18-
inline SlimTensor ones(executorch::backends::cuda::c10::IntArrayRef sizes,
18+
inline SlimTensor ones(executorch::aten::IntArrayRef sizes,
1919
executorch::backends::cuda::c10::ScalarType dtype,
2020
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE) {
2121
SlimTensor tensor = empty(sizes, dtype, device);

backends/cuda/runtime/slim/core/FromBlob.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
namespace executorch::backends::cuda::slim {
66

77
// The returned SlimTensor does not own the underlying storage
8-
inline SlimTensor from_blob(void *data, executorch::backends::cuda::c10::IntArrayRef sizes,
9-
executorch::backends::cuda::c10::IntArrayRef strides,
8+
inline SlimTensor from_blob(void *data, executorch::aten::IntArrayRef sizes,
9+
executorch::aten::IntArrayRef strides,
1010
executorch::backends::cuda::c10::ScalarType dtype,
1111
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE,
1212
int64_t storage_offset = 0) {
@@ -19,13 +19,13 @@ inline SlimTensor from_blob(void *data, executorch::backends::cuda::c10::IntArra
1919
return SlimTensor(std::move(storage), sizes, strides, dtype, storage_offset);
2020
}
2121

22-
inline SlimTensor from_blob(void *data, executorch::backends::cuda::c10::IntArrayRef sizes,
22+
inline SlimTensor from_blob(void *data, executorch::aten::IntArrayRef sizes,
2323
executorch::backends::cuda::c10::ScalarType dtype,
24-
const executorch::backends::cuda::c10::Device &device = CPU_DEVICE,
25-
int64_t storage_offset = 0) {
24+
const executorch::backends::cuda::c10::Device& device,
25+
int64_t storage_offset) {
2626
std::vector<int64_t> contig_strides =
27-
executorch::backends::cuda::slim::compute_contiguous_strides(sizes);
28-
return from_blob(data, sizes, contig_strides, dtype, device, storage_offset);
27+
executorch::backends::cuda::slim::compute_contiguous_strides(et_to_c10(sizes));
28+
return from_blob(data, sizes, vec_to_et(contig_strides), dtype, device, storage_offset);
2929
}
3030

3131
} // namespace executorch::backends::cuda::slim

backends/cuda/runtime/slim/core/Pad.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
namespace executorch::backends::cuda::slim {
66

77
inline SlimTensor constant_pad_nd(const SlimTensor &self,
8-
executorch::backends::cuda::c10::IntArrayRef pad,
8+
executorch::aten::IntArrayRef pad,
99
const executorch::backends::cuda::c10::Scalar &value) {
1010
STANDALONE_CHECK(pad.size() % 2 == 0, "Length of pad must be even");
1111

12-
executorch::backends::cuda::c10::IntArrayRef input_sizes = self.sizes();
12+
executorch::aten::IntArrayRef input_sizes = self.sizes();
1313
int64_t l_inp = self.dim();
1414
int64_t l_pad = static_cast<int64_t>(pad.size()) / 2;
1515
int64_t l_diff = l_inp - l_pad;
@@ -81,7 +81,7 @@ inline SlimTensor constant_pad_nd(const SlimTensor &self,
8181
return output;
8282
}
8383

84-
inline SlimTensor pad(const SlimTensor &self, executorch::backends::cuda::c10::IntArrayRef pad,
84+
inline SlimTensor pad(const SlimTensor &self, executorch::aten::IntArrayRef pad,
8585
std::string_view mode, std::optional<double> value) {
8686
if (mode == "constant") {
8787
return constant_pad_nd(self, pad, value.value_or(0.0));

backends/cuda/runtime/slim/core/SlimTensor.h

Lines changed: 33 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <executorch/backends/cuda/runtime/c10/util/safe_numerics.h>
1818
#include <executorch/backends/cuda/runtime/slim/core/Storage.h>
1919
#include <executorch/backends/cuda/runtime/slim/util/SizeUtil.h>
20+
#include <executorch/backends/cuda/runtime/slim/util/ArrayRefUtil.h>
2021
#include <executorch/runtime/core/exec_aten/exec_aten.h>
2122

2223
namespace executorch::backends::cuda::slim {
@@ -55,8 +56,8 @@ class SlimTensor {
5556
public:
5657
SlimTensor(
5758
Storage&& storage,
58-
executorch::backends::cuda::c10::IntArrayRef sizes,
59-
executorch::backends::cuda::c10::IntArrayRef strides,
59+
executorch::aten::IntArrayRef sizes,
60+
executorch::aten::IntArrayRef strides,
6061
executorch::backends::cuda::c10::ScalarType dtype,
6162
int64_t storage_offset = 0)
6263
: storage_(std::move(storage)),
@@ -99,14 +100,11 @@ class SlimTensor {
99100

100101
// Create a non-owning storage that wraps the ETensor's data
101102
// ETensor is guaranteed to be on CPU
102-
Storage storage(new MaybeOwningStorage(
103-
CPU_DEVICE,
104-
data_ptr,
105-
etensor->nbytes()
106-
));
103+
Storage storage(
104+
new MaybeOwningStorage(CPU_DEVICE, data_ptr, etensor->nbytes()));
107105

108106
// Initialize the SlimTensor with the wrapped storage
109-
*this = SlimTensor(std::move(storage), sizes, strides, slim_dtype, 0);
107+
*this = SlimTensor(std::move(storage), vec_to_et(sizes), vec_to_et(strides), slim_dtype, 0);
110108
}
111109

112110
SlimTensor(const SlimTensor&) = default;
@@ -134,8 +132,8 @@ class SlimTensor {
134132
return executorch::backends::cuda::c10::elementSize(dtype_);
135133
}
136134

137-
executorch::backends::cuda::c10::IntArrayRef sizes() const {
138-
return sizes_and_strides_.sizes_arrayref();
135+
executorch::aten::IntArrayRef sizes() const {
136+
return c10_to_et(sizes_and_strides_.sizes_arrayref());
139137
}
140138

141139
int64_t size(int64_t dim) const {
@@ -144,8 +142,8 @@ class SlimTensor {
144142
return sizes_and_strides_.size_at(static_cast<size_t>(wrapped_dim));
145143
}
146144

147-
executorch::backends::cuda::c10::IntArrayRef strides() const {
148-
return sizes_and_strides_.strides_arrayref();
145+
executorch::aten::IntArrayRef strides() const {
146+
return c10_to_et(sizes_and_strides_.strides_arrayref());
149147
}
150148

151149
int64_t stride(int64_t dim) const {
@@ -213,8 +211,8 @@ class SlimTensor {
213211
}
214212

215213
void set_sizes_and_strides(
216-
executorch::backends::cuda::c10::IntArrayRef sizes,
217-
executorch::backends::cuda::c10::IntArrayRef strides,
214+
executorch::aten::IntArrayRef sizes,
215+
executorch::aten::IntArrayRef strides,
218216
std::optional<int64_t> storage_offset = std::nullopt) {
219217
const int64_t new_dim = static_cast<int64_t>(sizes.size());
220218
STANDALONE_CHECK(
@@ -225,8 +223,8 @@ class SlimTensor {
225223
strides.size(),
226224
")");
227225

228-
std::vector<int64_t> new_sizes = sizes.vec();
229-
std::vector<int64_t> new_strides = strides.vec();
226+
std::vector<int64_t> new_sizes = to_vec(sizes);
227+
std::vector<int64_t> new_strides = to_vec(strides);
230228

231229
// stride calculation logic
232230
bool overflowed = false;
@@ -259,8 +257,7 @@ class SlimTensor {
259257
refresh_contiguous();
260258
}
261259

262-
void set_sizes_contiguous(
263-
executorch::backends::cuda::c10::IntArrayRef new_size) {
260+
void set_sizes_contiguous(executorch::aten::IntArrayRef new_size) {
264261
sizes_and_strides_.set_sizes(new_size);
265262
refresh_numel();
266263
empty_tensor_restride(
@@ -271,7 +268,7 @@ class SlimTensor {
271268
executorch::backends::cuda::c10::MemoryFormat memory_format);
272269

273270
SlimTensor resize_(
274-
executorch::backends::cuda::c10::IntArrayRef sizes,
271+
executorch::aten::IntArrayRef sizes,
275272
std::optional<c10::MemoryFormat> optional_memory_format);
276273

277274
// Conversion operations
@@ -283,8 +280,8 @@ class SlimTensor {
283280
Storage new_storage(new MaybeOwningStorage(storage_->clone(device)));
284281
return SlimTensor(
285282
std::move(new_storage),
286-
sizes_and_strides_.sizes_arrayref(),
287-
sizes_and_strides_.strides_arrayref(),
283+
c10_to_et(sizes_and_strides_.sizes_arrayref()),
284+
c10_to_et(sizes_and_strides_.strides_arrayref()),
288285
dtype_,
289286
storage_offset_);
290287
}
@@ -525,34 +522,32 @@ class SlimTensor {
525522
executorch::backends::cuda::slim::compute_contiguous_strides(
526523
this->sizes());
527524
return _clone_impl(
528-
this->sizes(), contig_strides, this->dtype(), this->device());
525+
this->sizes(), vec_to_et(contig_strides), this->dtype(), this->device());
529526
}
530527

531528
// View operations
532529
SlimTensor as_strided(
533-
executorch::backends::cuda::c10::IntArrayRef sizes,
534-
executorch::backends::cuda::c10::IntArrayRef strides,
530+
executorch::aten::IntArrayRef sizes,
531+
executorch::aten::IntArrayRef strides,
535532
int64_t storage_offset) const;
536533
SlimTensor as_strided_(
537-
executorch::backends::cuda::c10::IntArrayRef sizes,
538-
executorch::backends::cuda::c10::IntArrayRef strides,
534+
executorch::aten::IntArrayRef sizes,
535+
executorch::aten::IntArrayRef strides,
539536
int64_t storage_offset);
540537

541-
SlimTensor permute(executorch::backends::cuda::c10::IntArrayRef dims) const;
538+
SlimTensor permute(executorch::aten::IntArrayRef dims) const;
542539

543540
// Transpose operations
544541
SlimTensor transpose() const;
545542
SlimTensor transpose(int64_t dim0, int64_t dim1) const;
546543
SlimTensor t() const;
547544

548-
SlimTensor reshape(
549-
executorch::backends::cuda::c10::IntArrayRef proposed_shape) const;
545+
SlimTensor reshape(executorch::aten::IntArrayRef proposed_shape) const;
550546

551547
SlimTensor narrow(int64_t dim, int64_t start, int64_t length) const;
552548

553549
// Generic element access returning SlimTensor
554-
SlimTensor operator[](
555-
executorch::backends::cuda::c10::IntArrayRef indices) const {
550+
SlimTensor operator[](executorch::aten::IntArrayRef indices) const {
556551
STANDALONE_CHECK(
557552
indices.size() <= this->dim(),
558553
"Number of indices (",
@@ -597,21 +592,21 @@ class SlimTensor {
597592
int64_t new_storage_offset = this->storage_offset_ + offset_adjustment;
598593
return SlimTensor(
599594
Storage(this->storage_),
600-
new_sizes,
601-
new_strides,
595+
vec_to_et(new_sizes),
596+
vec_to_et(new_strides),
602597
this->dtype_,
603598
new_storage_offset);
604599
}
605600
}
606601

607602
// Convenience overload for single index
608603
SlimTensor operator[](int64_t index) const {
609-
return (*this)[executorch::backends::cuda::c10::IntArrayRef{index}];
604+
return (*this)[executorch::aten::IntArrayRef{index}];
610605
}
611606

612607
// Convenience overloads for common multi-dimensional cases
613608
SlimTensor operator[](std::initializer_list<int64_t> indices) const {
614-
return (*this)[executorch::backends::cuda::c10::IntArrayRef(indices)];
609+
return (*this)[initlist_to_et(indices)];
615610
}
616611

617612
// Extract scalar value from 0-dimensional tensor
@@ -667,8 +662,8 @@ class SlimTensor {
667662

668663
private:
669664
SlimTensor _clone_impl(
670-
executorch::backends::cuda::c10::IntArrayRef sizes,
671-
executorch::backends::cuda::c10::IntArrayRef strides,
665+
executorch::aten::IntArrayRef sizes,
666+
executorch::aten::IntArrayRef strides,
672667
executorch::backends::cuda::c10::ScalarType dtype,
673668
const executorch::backends::cuda::c10::Device& device) const {
674669
Storage storage = new_storage(sizes, strides, dtype, device);
@@ -679,7 +674,7 @@ class SlimTensor {
679674
}
680675

681676
void refresh_numel() {
682-
numel_ = compute_numel(sizes_and_strides_.sizes_arrayref());
677+
numel_ = compute_numel(c10_to_et(sizes_and_strides_.sizes_arrayref()));
683678
}
684679

685680
bool compute_is_contiguous() const {

0 commit comments

Comments
 (0)