1616
1717using AtenTensorOpaque = executorch::backends::cuda::slim::SlimTensor;
1818using AtenTensorHandle = executorch::backends::cuda::slim::SlimTensor *;
19- using namespace executorch ::backends::cuda::c10;
20-
2119
2220// AOTIProxyExecutorHandle isn't supported in standalone mode.
2321// Just defining it to void* to make the code compile
@@ -30,7 +28,7 @@ extern "C" {
3028// DeviceType
3129#define AOTI_TORCH_DEVICE_TYPE_IMPL (device_str, device_type ) \
3230 AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_##device_str() { \
33- return (int32_t )DeviceType::device_type; \
31+ return (int32_t ) executorch::backends::cuda::c10:: DeviceType::device_type; \
3432 }
3533
3634AOTI_TORCH_DEVICE_TYPE_IMPL (cpu, CPU)
@@ -42,7 +40,7 @@ AOTI_TORCH_DEVICE_TYPE_IMPL(xpu, XPU)
4240// SclarType
4341#define AOTI_TORCH_DTYPE_IMPL (dtype, stype ) \
4442 AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_##dtype() { \
45- return (int32_t )ScalarType::stype; \
43+ return (int32_t ) executorch::backends::cuda::c10:: ScalarType::stype; \
4644 }
4745
4846AOTI_TORCH_DTYPE_IMPL (float8_e5m2, Float8_e5m2)
@@ -69,7 +67,7 @@ AOTI_TORCH_DTYPE_IMPL(complex128, ComplexDouble)
6967
7068#define AOTI_TORCH_LAYOUT_IMPL (name, enum ) \
7169 AOTI_TORCH_EXPORT int32_t aoti_torch_layout_##name() { \
72- return (int32_t )Layout::enum ; \
70+ return (int32_t ) executorch::backends::cuda::c10:: Layout::enum ; \
7371 }
7472
7573AOTI_TORCH_LAYOUT_IMPL (strided, Strided)
@@ -84,7 +82,7 @@ AOTI_TORCH_LAYOUT_IMPL(jagged, Jagged)
8482
8583#define AOTI_TORCH_MEMORY_FORMAT_IMPL (name, enum ) \
8684 AOTI_TORCH_EXPORT int32_t aoti_torch_memory_format_##name() { \
87- return (int32_t )MemoryFormat::enum ; \
85+ return (int32_t ) executorch::backends::cuda::c10:: MemoryFormat::enum ; \
8886 }
8987
9088AOTI_TORCH_MEMORY_FORMAT_IMPL (contiguous_format, Contiguous)
@@ -112,8 +110,8 @@ AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(int16, int16_t)
112110AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(int32, int32_t )
113111AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(int64, int64_t )
114112AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(bool , bool )
115- AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(complex64, complex <float >)
116- AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(complex128, complex <double >)
113+ AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(complex64, executorch::backends::cuda::c10:: complex <float >)
114+ AOTI_TORCH_SCALAR_TO_TENSOR_IMPL(complex128, executorch::backends::cuda::c10:: complex <double >)
117115#undef AOTI_TORCH_SCALAR_TO_TENSOR_IMPL
118116
119117AOTI_TORCH_EXPORT bool aoti_torch_grad_mode_is_enabled () { return false ; }
@@ -201,13 +199,13 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob(
201199 const int64_t *strides_ptr, int64_t storage_offset, int32_t dtype,
202200 int32_t device_type, int32_t device_index,
203201 AtenTensorHandle *ret_new_tensor) {
204- IntArrayRef sizes (sizes_ptr, ndim);
205- IntArrayRef strides (strides_ptr, ndim);
202+ executorch::backends::cuda::c10:: IntArrayRef sizes (sizes_ptr, ndim);
203+ executorch::backends::cuda::c10:: IntArrayRef strides (strides_ptr, ndim);
206204 *ret_new_tensor =
207205 new executorch::backends::cuda::slim::SlimTensor (executorch::backends::cuda::slim::from_blob (
208- data, sizes, strides, static_cast <ScalarType>(dtype),
209- {static_cast <DeviceType>(device_type),
210- static_cast <DeviceIndex>(device_index)},
206+ data, sizes, strides, static_cast <executorch::backends::cuda::c10:: ScalarType>(dtype),
207+ {static_cast <executorch::backends::cuda::c10:: DeviceType>(device_type),
208+ static_cast <executorch::backends::cuda::c10:: DeviceIndex>(device_index)},
211209 storage_offset));
212210 return AOTI_TORCH_SUCCESS;
213211}
@@ -218,13 +216,13 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2(
218216 int32_t device_type, int32_t device_index, AtenTensorHandle *ret_new_tensor,
219217 int32_t layout, const uint8_t *opaque_metadata,
220218 int64_t opaque_metadata_size) {
221- IntArrayRef sizes (sizes_ptr, ndim);
222- IntArrayRef strides (strides_ptr, ndim);
219+ executorch::backends::cuda::c10:: IntArrayRef sizes (sizes_ptr, ndim);
220+ executorch::backends::cuda::c10:: IntArrayRef strides (strides_ptr, ndim);
223221 *ret_new_tensor =
224222 new executorch::backends::cuda::slim::SlimTensor (executorch::backends::cuda::slim::from_blob (
225- data, sizes, strides, static_cast <ScalarType>(dtype),
226- {static_cast <DeviceType>(device_type),
227- static_cast <DeviceIndex>(device_index)},
223+ data, sizes, strides, static_cast <executorch::backends::cuda::c10:: ScalarType>(dtype),
224+ {static_cast <executorch::backends::cuda::c10:: DeviceType>(device_type),
225+ static_cast <executorch::backends::cuda::c10:: DeviceIndex>(device_index)},
228226 storage_offset));
229227 return AOTI_TORCH_SUCCESS;
230228}
@@ -233,12 +231,12 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_empty_strided(
233231 int64_t ndim, const int64_t *sizes_ptr, const int64_t *strides_ptr,
234232 int32_t dtype, int32_t device_type, int32_t device_index,
235233 AtenTensorHandle *ret_new_tensor) {
236- IntArrayRef sizes (sizes_ptr, ndim);
237- IntArrayRef strides (strides_ptr, ndim);
234+ executorch::backends::cuda::c10:: IntArrayRef sizes (sizes_ptr, ndim);
235+ executorch::backends::cuda::c10:: IntArrayRef strides (strides_ptr, ndim);
238236 auto empty_strided = executorch::backends::cuda::slim::empty_strided (
239- sizes, strides, static_cast <ScalarType>(dtype),
240- {static_cast <DeviceType>(device_type),
241- static_cast <DeviceIndex>(device_index)});
237+ sizes, strides, static_cast <executorch::backends::cuda::c10:: ScalarType>(dtype),
238+ {static_cast <executorch::backends::cuda::c10:: DeviceType>(device_type),
239+ static_cast <executorch::backends::cuda::c10:: DeviceIndex>(device_index)});
242240 *ret_new_tensor =
243241 new executorch::backends::cuda::slim::SlimTensor (empty_strided);
244242 return AOTI_TORCH_SUCCESS;
@@ -248,8 +246,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor(
248246 AtenTensorHandle self, int64_t ndim, const int64_t *sizes_ptr,
249247 const int64_t *strides_ptr, int64_t offset_increment,
250248 AtenTensorHandle *ret_new_tensor) {
251- IntArrayRef sizes (sizes_ptr, ndim);
252- IntArrayRef strides (strides_ptr, ndim);
249+ executorch::backends::cuda::c10:: IntArrayRef sizes (sizes_ptr, ndim);
250+ executorch::backends::cuda::c10:: IntArrayRef strides (strides_ptr, ndim);
253251 *ret_new_tensor = new executorch::backends::cuda::slim::SlimTensor (
254252 self->storage (), sizes, strides, self->dtype (),
255253 self->storage_offset () + offset_increment);
@@ -259,8 +257,8 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor(
259257AOTI_TORCH_EXPORT AOTITorchError
260258aoti_torch_as_strided (AtenTensorHandle self, const int64_t *sizes_ptr,
261259 const int64_t *strides_ptr, AtenTensorHandle *ret) {
262- IntArrayRef sizes (sizes_ptr, self->dim ());
263- IntArrayRef strides (strides_ptr, self->dim ());
260+ executorch::backends::cuda::c10:: IntArrayRef sizes (sizes_ptr, self->dim ());
261+ executorch::backends::cuda::c10:: IntArrayRef strides (strides_ptr, self->dim ());
264262 *ret = new executorch::backends::cuda::slim::SlimTensor (
265263 self->storage (), sizes, strides, self->dtype (), self->storage_offset ());
266264 return AOTI_TORCH_SUCCESS;
0 commit comments