@@ -69,9 +69,11 @@ TensorInfo::TensorInfo(
6969 Span<const int32_t > sizes,
7070 Span<const uint8_t > dim_order,
7171 executorch::aten::ScalarType scalar_type,
72- const bool is_memory_planned)
72+ const bool is_memory_planned,
73+ executorch::aten::string_view name)
7374 : sizes_(sizes),
7475 dim_order_ (dim_order),
76+ name_(name),
7577 scalar_type_(scalar_type),
7678 is_memory_planned_(is_memory_planned),
7779 nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
@@ -96,6 +98,10 @@ size_t TensorInfo::nbytes() const {
9698 return nbytes_;
9799}
98100
101+ executorch::aten::string_view TensorInfo::name () const {
102+ return name_;
103+ }
104+
99105MethodMeta::MethodMeta (const executorch_flatbuffer::ExecutionPlan* s_plan)
100106 : s_plan_(s_plan) {}
101107
@@ -149,8 +155,9 @@ Result<TensorInfo> MethodMeta::input_tensor_meta(size_t index) const {
149155 tensor_value->dim_order ()->data (), tensor_value->dim_order ()->size ()),
150156 static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
151157 tensor_value->allocation_info () != nullptr ||
152- tensor_value->data_buffer_idx () !=
153- 0 ); // Count constant returns as memory planned.
158+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
159+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as
160+ // memory planned.
154161}
155162
156163size_t MethodMeta::num_outputs () const {
@@ -200,8 +207,60 @@ Result<TensorInfo> MethodMeta::output_tensor_meta(size_t index) const {
200207 tensor_value->dim_order ()->data (), tensor_value->dim_order ()->size ()),
201208 static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
202209 tensor_value->allocation_info () != nullptr ||
203- tensor_value->data_buffer_idx () !=
204- 0 ); // Count constant returns as memory planned.
210+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
211+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as
212+ // memory planned.
213+ }
214+
215+ size_t MethodMeta::num_attributes () const {
216+ size_t counter = 0 ;
217+ auto values = s_plan_->values ();
218+ for (size_t i = 0 ; i < values->size (); ++i) {
219+ auto value = values->Get (i);
220+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
221+ auto tensor_value = value->val_as_Tensor ();
222+ if (tensor_value->extra_tensor_info () != nullptr &&
223+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
224+ nullptr ) {
225+ ++counter;
226+ }
227+ }
228+ }
229+ return counter;
230+ }
231+
232+ Result<TensorInfo> MethodMeta::attribute_tensor_meta (size_t index) const {
233+ size_t counter = 0 ;
234+ auto values = s_plan_->values ();
235+ for (size_t i = 0 ; i < values->size (); ++i) {
236+ auto value = values->Get (i);
237+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
238+ auto tensor_value = value->val_as_Tensor ();
239+ if (tensor_value->extra_tensor_info () != nullptr &&
240+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
241+ nullptr ) {
242+ if (counter == index) {
243+ auto t_name =
244+ tensor_value->extra_tensor_info ()->fully_qualified_name ();
245+ // Count constant returns as memory planned
246+ return TensorInfo (
247+ Span<const int32_t >(
248+ tensor_value->sizes ()->data (), tensor_value->sizes ()->size ()),
249+ Span<const uint8_t >(
250+ tensor_value->dim_order ()->data (),
251+ tensor_value->dim_order ()->size ()),
252+ static_cast <executorch::aten::ScalarType>(
253+ tensor_value->scalar_type ()),
254+ tensor_value->allocation_info () != nullptr ||
255+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
256+ executorch::aten::string_view{t_name->c_str (), t_name->size ()});
257+ }
258+ ++counter;
259+ }
260+ }
261+ }
262+ ET_LOG (Error, " No attribute tensor found at index %zu" , index);
263+ return Error::InvalidArgument;
205264}
206265
207266size_t MethodMeta::num_memory_planned_buffers () const {
0 commit comments