|  | 
|  | 1 | +/* | 
|  | 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. | 
|  | 3 | + * All rights reserved. | 
|  | 4 | + * | 
|  | 5 | + * This source code is licensed under the BSD-style license found in the | 
|  | 6 | + * LICENSE file in the root directory of this source tree. | 
|  | 7 | + */ | 
|  | 8 | + | 
|  | 9 | +#include <executorch/backends/aoti/common_shims.h> | 
|  | 10 | +#include <executorch/runtime/platform/log.h> | 
|  | 11 | +#include <cstdint> | 
|  | 12 | + | 
|  | 13 | +namespace executorch { | 
|  | 14 | +namespace backends { | 
|  | 15 | +namespace aoti { | 
|  | 16 | + | 
|  | 17 | +namespace internal { | 
|  | 18 | +// Global storage for tensor metadata | 
|  | 19 | +std::unordered_map<Tensor*, std::vector<int64_t>> tensor_to_sizes; | 
|  | 20 | +std::unordered_map<Tensor*, std::vector<int64_t>> tensor_to_strides; | 
|  | 21 | +} // namespace internal | 
|  | 22 | + | 
|  | 23 | +extern "C" { | 
|  | 24 | + | 
|  | 25 | +// Autograd mode functions | 
|  | 26 | +int32_t aoti_torch_grad_mode_is_enabled() { | 
|  | 27 | +  // No autograd ever | 
|  | 28 | +  return false; | 
|  | 29 | +} | 
|  | 30 | + | 
|  | 31 | +void aoti_torch_grad_mode_set_enabled(bool enabled) { | 
|  | 32 | +  if (enabled) { | 
|  | 33 | +    throw std::runtime_error("Cannot enable autograd"); | 
|  | 34 | +  } | 
|  | 35 | +} | 
|  | 36 | + | 
|  | 37 | +// Tensor attribute operations | 
|  | 38 | +AOTITorchError aoti_torch_get_data_ptr(Tensor* tensor, void** ret_data_ptr) { | 
|  | 39 | +  *ret_data_ptr = tensor->mutable_data_ptr(); | 
|  | 40 | +  return Error::Ok; | 
|  | 41 | +} | 
|  | 42 | + | 
|  | 43 | +AOTITorchError aoti_torch_get_storage_offset( | 
|  | 44 | +    Tensor* tensor, | 
|  | 45 | +    int64_t* ret_storage_offset) { | 
|  | 46 | +  // Storage offset is always 0 in ET | 
|  | 47 | +  *ret_storage_offset = 0; | 
|  | 48 | + | 
|  | 49 | +  return Error::Ok; | 
|  | 50 | +} | 
|  | 51 | + | 
|  | 52 | +AOTITorchError aoti_torch_get_strides(Tensor* tensor, int64_t** ret_strides) { | 
|  | 53 | +  auto it = internal::tensor_to_strides.find(tensor); | 
|  | 54 | +  if (it == internal::tensor_to_strides.end()) { | 
|  | 55 | +    std::vector<int64_t> strides(tensor->dim()); | 
|  | 56 | +    auto tensor_strides = tensor->strides(); | 
|  | 57 | +    for (int i = 0; i < tensor->dim(); i++) { | 
|  | 58 | +      strides[i] = tensor_strides[i]; | 
|  | 59 | +    } | 
|  | 60 | +    it = internal::tensor_to_strides.emplace(tensor, std::move(strides)).first; | 
|  | 61 | +  } | 
|  | 62 | + | 
|  | 63 | +  // For 0D tensors, data() returns nullptr on empty vectors, but we need to | 
|  | 64 | +  // return a valid pointer | 
|  | 65 | +  if (it->second.empty()) { | 
|  | 66 | +    static int64_t empty_strides_placeholder = 0; | 
|  | 67 | +    *ret_strides = &empty_strides_placeholder; | 
|  | 68 | +  } else { | 
|  | 69 | +    *ret_strides = it->second.data(); | 
|  | 70 | +  } | 
|  | 71 | + | 
|  | 72 | +  return Error::Ok; | 
|  | 73 | +} | 
|  | 74 | + | 
|  | 75 | +AOTITorchError aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype) { | 
|  | 76 | +  *ret_dtype = static_cast<int32_t>(tensor->scalar_type()); | 
|  | 77 | + | 
|  | 78 | +  return Error::Ok; | 
|  | 79 | +} | 
|  | 80 | + | 
|  | 81 | +AOTITorchError aoti_torch_get_sizes(Tensor* tensor, int64_t** ret_sizes) { | 
|  | 82 | +  auto it = internal::tensor_to_sizes.find(tensor); | 
|  | 83 | +  if (it == internal::tensor_to_sizes.end()) { | 
|  | 84 | +    std::vector<int64_t> sizes(tensor->dim()); | 
|  | 85 | +    auto tensor_sizes = tensor->sizes(); | 
|  | 86 | +    for (int i = 0; i < tensor->dim(); i++) { | 
|  | 87 | +      sizes[i] = tensor_sizes[i]; | 
|  | 88 | +    } | 
|  | 89 | +    it = internal::tensor_to_sizes.emplace(tensor, std::move(sizes)).first; | 
|  | 90 | +  } | 
|  | 91 | + | 
|  | 92 | +  // For 0D tensors, data() returns nullptr on empty vectors, but we need to | 
|  | 93 | +  // return a valid pointer | 
|  | 94 | +  if (it->second.empty()) { | 
|  | 95 | +    static int64_t empty_sizes_placeholder = 0; | 
|  | 96 | +    *ret_sizes = &empty_sizes_placeholder; | 
|  | 97 | +  } else { | 
|  | 98 | +    *ret_sizes = it->second.data(); | 
|  | 99 | +  } | 
|  | 100 | + | 
|  | 101 | +  return Error::Ok; | 
|  | 102 | +} | 
|  | 103 | + | 
|  | 104 | +AOTITorchError aoti_torch_get_device_index( | 
|  | 105 | +    Tensor* tensor, | 
|  | 106 | +    int32_t* ret_device_index) { | 
|  | 107 | +  // Let's assume all tensors AOTI using are on CUDA:0 | 
|  | 108 | +  *ret_device_index = 0; | 
|  | 109 | +  return Error::Ok; | 
|  | 110 | +} | 
|  | 111 | + | 
|  | 112 | +AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { | 
|  | 113 | +  *ret_dim = static_cast<int64_t>(tensor->dim()); | 
|  | 114 | +  return Error::Ok; | 
|  | 115 | +} | 
|  | 116 | + | 
|  | 117 | +// Device and layout utility functions | 
|  | 118 | +int32_t aoti_torch_device_type_cpu() { | 
|  | 119 | +  // Let's say cpu is 0 for ET as well | 
|  | 120 | +  return 0; | 
|  | 121 | +} | 
|  | 122 | + | 
|  | 123 | +int32_t aoti_torch_layout_strided() { | 
|  | 124 | +  // ET only support strided layout, the return value will always be 0, a.k.a | 
|  | 125 | +  // at::Layout::Strided; | 
|  | 126 | +  return 0; | 
|  | 127 | +} | 
|  | 128 | + | 
|  | 129 | +// Dtype constants - these return the PyTorch dtype codes | 
|  | 130 | +// Currently only float32 is supported, but using robust enum-based approach | 
|  | 131 | +int32_t aoti_torch_dtype_float32() { | 
|  | 132 | +  return 6; // PyTorch's float32 dtype code | 
|  | 133 | +} | 
|  | 134 | + | 
|  | 135 | +// Cleanup functions | 
|  | 136 | +void cleanup_tensor_metadata() { | 
|  | 137 | +  internal::tensor_to_sizes.clear(); | 
|  | 138 | +  internal::tensor_to_strides.clear(); | 
|  | 139 | +} | 
|  | 140 | + | 
|  | 141 | +} // extern "C" | 
|  | 142 | + | 
|  | 143 | +} // namespace aoti | 
|  | 144 | +} // namespace backends | 
|  | 145 | +} // namespace executorch | 
0 commit comments