@@ -19,6 +19,41 @@ namespace torch {
1919namespace executor {
2020namespace deserialization {
2121
22+ namespace {
23+
24+ // Retrieve the buffer specified by the allocation_info
25+ __ET_NODISCARD Result<void *> getMemPlannedPtr (
26+ const executorch_flatbuffer::AllocationDetails* allocation_info,
27+ size_t nbytes,
28+ HierarchicalAllocator* allocator) {
29+ // Normal non-constant Tensor. Allocate data using mem_id and offset.
30+
31+ // TODO(T142455629): make the allocator actually id based and not indexed
32+ // based. -1 is a hack to get the memory ids 0 aligned because previously
33+ // 0 was reserved
34+ const uint32_t memory_id = allocation_info->memory_id () - 1 ;
35+
36+ // Originally this field was a single uint32_t, but we need 64 bits for
37+ // larger models. To preserve backwards compatibility, the high bits are
38+ // managed in a separate uint32_t field.
39+ const uint32_t memory_offset_low = allocation_info->memory_offset_low ();
40+ const uint32_t memory_offset_high = allocation_info->memory_offset_high ();
41+
42+ size_t memory_offset = memory_offset_low;
43+ if (memory_offset_high > 0 ) {
44+ // The compiler should remove this always-true check on 64-bit systems.
45+ ET_CHECK_OR_RETURN_ERROR (
46+ sizeof (size_t ) >= sizeof (uint64_t ),
47+ NotSupported,
48+ " size_t cannot hold memory offset 0x%08" PRIx32 " .%08" PRIx32,
49+ memory_offset_high,
50+ memory_offset_low);
51+ memory_offset |= static_cast <size_t >(memory_offset_high) << 32 ;
52+ }
53+ return allocator->get_offset_address (memory_id, memory_offset, nbytes);
54+ }
55+ } // namespace
56+
2257__ET_NODISCARD Result<BoxedEvalueList<exec_aten::Tensor>> parseTensorList (
2358 const flatbuffers::Vector<int32_t >* tensor_indices,
2459 EValue* values_,
@@ -53,49 +88,41 @@ __ET_NODISCARD Result<void*> getTensorDataPtr(
5388 const Program* program,
5489 size_t nbytes,
5590 HierarchicalAllocator* allocator) {
56- if (s_tensor->data_buffer_idx () > 0 ) {
57- auto data =
58- program->get_constant_buffer_data (s_tensor->data_buffer_idx (), nbytes);
59- if (!data.ok ()) {
60- return data.error ();
91+ auto data_buffer_idx = s_tensor->data_buffer_idx ();
92+ const executorch_flatbuffer::AllocationDetails* allocation_info =
93+ s_tensor->allocation_info ();
94+
95+ // Memory Planned, with initial state
96+ if (data_buffer_idx > 0 && allocation_info != nullptr ) {
97+ // Stub case for now.
98+
99+ // Get memory planned data pointer
100+
101+ // Call something like program.load_into_buffer(s_tensor->segment_idx,
102+ // s_tensor->data_buffer_idx, mem_planned_buffer, nbytes)
103+
104+ return Error::NotImplemented;
105+
106+ // Constant
107+ } else if (data_buffer_idx > 0 && allocation_info == nullptr ) {
108+ auto const_data =
109+ program->get_constant_buffer_data (data_buffer_idx, nbytes);
110+ if (!const_data.ok ()) {
111+ return const_data.error ();
61112 }
113+
62114 // The const_cast is 'ok' here because the program and runtime should
63115 // guarantee that this data is never modified.
64- return const_cast <void *>(data.get ());
65- }
116+ return const_cast <void *>(const_data.get ());
66117
67- const executorch_flatbuffer::AllocationDetails* allocation_info =
68- s_tensor->allocation_info ();
69- if (allocation_info != nullptr ) {
70- // Normal non-constant Tensor. Allocate data using mem_id and offset.
71-
72- // TODO(T142455629): make the allocator actually id based and not indexed
73- // based. -1 is a hack to get the memory ids 0 aligned because previously
74- // 0 was reserved
75- const uint32_t memory_id = allocation_info->memory_id () - 1 ;
76-
77- // Originally this field was a single uint32_t, but we need 64 bits for
78- // larger models. To preserve backwards compatibility, the high bits are
79- // managed in a separate uint32_t field.
80- const uint32_t memory_offset_low = allocation_info->memory_offset_low ();
81- const uint32_t memory_offset_high = allocation_info->memory_offset_high ();
82-
83- size_t memory_offset = memory_offset_low;
84- if (memory_offset_high > 0 ) {
85- // The compiler should remove this always-true check on 64-bit systems.
86- ET_CHECK_OR_RETURN_ERROR (
87- sizeof (size_t ) >= sizeof (uint64_t ),
88- NotSupported,
89- " size_t cannot hold memory offset 0x%08" PRIx32 " .%08" PRIx32,
90- memory_offset_high,
91- memory_offset_low);
92- memory_offset |= static_cast <size_t >(memory_offset_high) << 32 ;
93- }
94- return allocator->get_offset_address (memory_id, memory_offset, nbytes);
95- }
118+ // Memory planned, no initial state
119+ } else if (data_buffer_idx == 0 && allocation_info != nullptr ) {
120+ return getMemPlannedPtr (allocation_info, nbytes, allocator);
96121
97- // The tensor's data will be allocated as part of execution.
98- return nullptr ;
122+ // Pointer recived at runtime
123+ } else { // data_buffer_idx == 0 && allocation_info == nullptr,
124+ return nullptr ;
125+ }
99126}
100127
101128} // namespace deserialization
0 commit comments