@@ -45,10 +45,23 @@ char* model_pte = nullptr;
4545#include " model_pte.h"
4646#endif
4747
48- using namespace exec_aten ;
49- using namespace std ;
50- using torch::executor::Error;
51- using torch::executor::Result;
48+ using executorch::aten::ScalarType;
49+ using executorch::aten::Tensor;
50+ using executorch::aten::TensorImpl;
51+ using executorch::extension::BufferCleanup;
52+ using executorch::extension::BufferDataLoader;
53+ using executorch::runtime::Error;
54+ using executorch::runtime::EValue;
55+ using executorch::runtime::HierarchicalAllocator;
56+ using executorch::runtime::MemoryAllocator;
57+ using executorch::runtime::MemoryManager;
58+ using executorch::runtime::Method;
59+ using executorch::runtime::MethodMeta;
60+ using executorch::runtime::Program;
61+ using executorch::runtime::Result;
62+ using executorch::runtime::Span;
63+ using executorch::runtime::Tag;
64+ using executorch::runtime::TensorInfo;
5265
5366#define METHOD_ALLOCATOR_POOL_SIZE (70 * 1024 * 1024 )
5467unsigned char __attribute__ ((
@@ -86,11 +99,10 @@ void et_pal_emit_log_message(
8699}
87100
88101namespace {
89- using namespace torch ::executor;
90102
91- Result<util:: BufferCleanup> prepare_input_tensors (
103+ Result<BufferCleanup> prepare_input_tensors (
92104 Method& method,
93- torch::executor:: MemoryAllocator& allocator,
105+ MemoryAllocator& allocator,
94106 std::vector<std::pair<char *, size_t >>& input_buffers) {
95107 MethodMeta method_meta = method.method_meta ();
96108 size_t num_inputs = method_meta.num_inputs ();
@@ -175,18 +187,18 @@ Result<util::BufferCleanup> prepare_input_tensors(
175187 ET_LOG (
176188 Error, " Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t )err);
177189 // The BufferCleanup will free the inputs when it goes out of scope.
178- util:: BufferCleanup cleanup ({inputs, num_allocated});
190+ BufferCleanup cleanup ({inputs, num_allocated});
179191 return err;
180192 }
181193 }
182- return util:: BufferCleanup ({inputs, num_allocated});
194+ return BufferCleanup ({inputs, num_allocated});
183195}
184196
185197#ifdef SEMIHOSTING
186198
187199std::pair<char *, size_t > read_binary_file (
188200 const char * filename,
189- torch::executor:: MemoryAllocator& allocator) {
201+ MemoryAllocator& allocator) {
190202 FILE* fp = fopen (filename, " rb" );
191203 if (!fp) {
192204 ET_LOG (
@@ -238,13 +250,13 @@ int main(int argc, const char* argv[]) {
238250 (void )argv;
239251#endif
240252
241- torch::executor ::runtime_init ();
253+ executorch::runtime ::runtime_init ();
242254 std::vector<std::pair<char *, size_t >> input_buffers;
243255 size_t pte_size = sizeof (model_pte);
244256
245257#ifdef SEMIHOSTING
246258 const char * output_basename = nullptr ;
247- torch::executor:: MemoryAllocator input_allocator (
259+ MemoryAllocator input_allocator (
248260 input_allocation_pool_size, input_allocation_pool);
249261
250262 /* parse input parameters */
@@ -277,10 +289,9 @@ int main(int argc, const char* argv[]) {
277289 }
278290#endif
279291 ET_LOG (Info, " Model in %p %c" , model_pte, model_pte[0 ]);
280- auto loader = torch::executor::util:: BufferDataLoader (model_pte, pte_size);
292+ auto loader = BufferDataLoader (model_pte, pte_size);
281293 ET_LOG (Info, " Model PTE file loaded. Size: %lu bytes." , pte_size);
282- Result<torch::executor::Program> program =
283- torch::executor::Program::load (&loader);
294+ Result<Program> program = Program::load (&loader);
284295 if (!program.ok ()) {
285296 ET_LOG (
286297 Info,
@@ -299,8 +310,7 @@ int main(int argc, const char* argv[]) {
299310 }
300311 ET_LOG (Info, " Running method %s" , method_name);
301312
302- Result<torch::executor::MethodMeta> method_meta =
303- program->method_meta (method_name);
313+ Result<MethodMeta> method_meta = program->method_meta (method_name);
304314 if (!method_meta.ok ()) {
305315 ET_LOG (
306316 Info,
@@ -309,13 +319,11 @@ int main(int argc, const char* argv[]) {
309319 (unsigned int )method_meta.error ());
310320 }
311321
312- torch::executor::MemoryAllocator method_allocator{
313- torch::executor::MemoryAllocator (
314- METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool)};
322+ MemoryAllocator method_allocator (
323+ METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool);
315324
316325 std::vector<uint8_t *> planned_buffers; // Owns the memory
317- std::vector<torch::executor::Span<uint8_t >>
318- planned_spans; // Passed to the allocator
326+ std::vector<Span<uint8_t >> planned_spans; // Passed to the allocator
319327 size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers ();
320328
321329 for (size_t id = 0 ; id < num_memory_planned_buffers; ++id) {
@@ -330,17 +338,16 @@ int main(int argc, const char* argv[]) {
330338 planned_spans.push_back ({planned_buffers.back (), buffer_size});
331339 }
332340
333- torch::executor:: HierarchicalAllocator planned_memory (
341+ HierarchicalAllocator planned_memory (
334342 {planned_spans.data (), planned_spans.size ()});
335343
336- torch::executor:: MemoryAllocator temp_allocator (
344+ MemoryAllocator temp_allocator (
337345 temp_allocation_pool_size, temp_allocation_pool);
338346
339- torch::executor:: MemoryManager memory_manager (
347+ MemoryManager memory_manager (
340348 &method_allocator, &planned_memory, &temp_allocator);
341349
342- Result<torch::executor::Method> method =
343- program->load_method (method_name, &memory_manager);
350+ Result<Method> method = program->load_method (method_name, &memory_manager);
344351 if (!method.ok ()) {
345352 ET_LOG (
346353 Info,
@@ -379,7 +386,7 @@ int main(int argc, const char* argv[]) {
379386 ET_LOG (Info, " Model executed successfully." );
380387 }
381388
382- std::vector<torch::executor:: EValue> outputs (method->outputs_size ());
389+ std::vector<EValue> outputs (method->outputs_size ());
383390 ET_LOG (Info, " %zu outputs: " , outputs.size ());
384391 status = method->get_outputs (outputs.data (), outputs.size ());
385392 ET_CHECK (status == Error::Ok);
0 commit comments