@@ -69,9 +69,11 @@ TensorInfo::TensorInfo(
6969 Span<const int32_t > sizes,
7070 Span<const uint8_t > dim_order,
7171 executorch::aten::ScalarType scalar_type,
72- const bool is_memory_planned)
72+ const bool is_memory_planned,
73+ executorch::aten::string_view name)
7374 : sizes_(sizes),
7475 dim_order_ (dim_order),
76+ name_(name),
7577 scalar_type_(scalar_type),
7678 is_memory_planned_(is_memory_planned),
7779 nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
@@ -96,6 +98,10 @@ size_t TensorInfo::nbytes() const {
9698 return nbytes_;
9799}
98100
101+ executorch::aten::string_view TensorInfo::name () const {
102+ return name_;
103+ }
104+
99105MethodMeta::MethodMeta (const executorch_flatbuffer::ExecutionPlan* s_plan)
100106 : s_plan_(s_plan) {}
101107
@@ -150,7 +156,9 @@ Result<TensorInfo> MethodMeta::input_tensor_meta(size_t index) const {
150156 static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
151157 tensor_value->allocation_info () != nullptr ||
152158 tensor_value->data_buffer_idx () !=
153- 0 ); // Count constant returns as memory planned.
159+ 0 /* is_memory_planned */ ,
160+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as memory
161+ // planned.
154162}
155163
156164size_t MethodMeta::num_outputs () const {
@@ -201,7 +209,60 @@ Result<TensorInfo> MethodMeta::output_tensor_meta(size_t index) const {
201209 static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
202210 tensor_value->allocation_info () != nullptr ||
203211 tensor_value->data_buffer_idx () !=
204- 0 ); // Count constant returns as memory planned.
212+ 0 /* is_memory_planned */ ,
213+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as memory
214+ // planned.
215+ }
216+
217+ size_t MethodMeta::num_attributes () const {
218+ size_t counter = 0 ;
219+ auto values = s_plan_->values ();
220+ for (size_t i = 0 ; i < values->size (); ++i) {
221+ auto value = values->Get (i);
222+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
223+ auto tensor_value = value->val_as_Tensor ();
224+ if (tensor_value->extra_tensor_info () != nullptr &&
225+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
226+ nullptr ) {
227+ ++counter;
228+ }
229+ }
230+ }
231+ return counter;
232+ }
233+
234+ Result<TensorInfo> MethodMeta::attribute_tensor_meta (size_t index) const {
235+ size_t counter = 0 ;
236+ auto values = s_plan_->values ();
237+ for (size_t i = 0 ; i < values->size (); ++i) {
238+ auto value = values->Get (i);
239+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
240+ auto tensor_value = value->val_as_Tensor ();
241+ if (tensor_value->extra_tensor_info () != nullptr &&
242+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
243+ nullptr ) {
244+ if (counter == index) {
245+ auto t_name =
246+ tensor_value->extra_tensor_info ()->fully_qualified_name ();
247+ // Count constant returns as memory planned
248+ return TensorInfo (
249+ Span<const int32_t >(
250+ tensor_value->sizes ()->data (), tensor_value->sizes ()->size ()),
251+ Span<const uint8_t >(
252+ tensor_value->dim_order ()->data (),
253+ tensor_value->dim_order ()->size ()),
254+ static_cast <executorch::aten::ScalarType>(
255+ tensor_value->scalar_type ()),
256+ tensor_value->allocation_info () != nullptr ||
257+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
258+ executorch::aten::string_view{t_name->c_str (), t_name->size ()});
259+ }
260+ ++counter;
261+ }
262+ }
263+ }
264+ ET_LOG (Error, " No attribute tensor found at index %zu" , index);
265+ return Error::InvalidArgument;
205266}
206267
207268size_t MethodMeta::num_memory_planned_buffers () const {
0 commit comments