3737
3838static uint8_t method_allocator_pool[18 * 1024U ]; // 4 MB
3939
40- using namespace torch ::executor;
4140#include < xtensa/config/core.h>
4241
4342#define APP_MU MUB
@@ -48,8 +47,8 @@ using namespace torch::executor;
4847/* How many message is used to test message sending */
4948#define MSG_LENGTH 32U
5049
51- using torch::executor ::Error;
52- using torch::executor ::Result;
50+ using executorch::runtime ::Error;
51+ using executorch::runtime ::Result;
5352
5453void LED_INIT ();
5554void LED_TOGGLE ();
@@ -106,13 +105,13 @@ int main(int argc, char** argv) {
106105 BOARD_InitDebugConsole ();
107106 ET_LOG (Info, " Booted up in DSP." );
108107
109- torch::executor ::runtime_init ();
108+ executorch::runtime ::runtime_init ();
110109
111110 auto loader =
112- torch::executor::util ::BufferDataLoader (model_pte, sizeof (model_pte));
111+ executorch::extension ::BufferDataLoader (model_pte, sizeof (model_pte));
113112
114- Result<torch::executor ::Program> program =
115- torch::executor ::Program::load (&loader);
113+ Result<executorch::runtime ::Program> program =
114+ executorch::runtime ::Program::load (&loader);
116115 if (!program.ok ()) {
117116 ET_LOG (
118117 Error,
@@ -132,7 +131,7 @@ int main(int argc, char** argv) {
132131 }
133132 ET_LOG (Info, " ET: Running method %s" , method_name);
134133
135- Result<torch::executor ::MethodMeta> method_meta =
134+ Result<executorch::runtime ::MethodMeta> method_meta =
136135 program->method_meta (method_name);
137136 if (!method_meta.ok ()) {
138137 ET_LOG (
@@ -142,12 +141,12 @@ int main(int argc, char** argv) {
142141 (unsigned int )method_meta.error ());
143142 }
144143
145- torch::executor ::MemoryAllocator method_allocator{
146- torch::executor ::MemoryAllocator (
144+ executorch::runtime ::MemoryAllocator method_allocator{
145+ executorch::runtime ::MemoryAllocator (
147146 sizeof (method_allocator_pool), method_allocator_pool)};
148147
149148 std::vector<std::unique_ptr<uint8_t []>> planned_buffers; // Owns the memory
150- std::vector<torch::executor ::Span<uint8_t >>
149+ std::vector<executorch::runtime ::Span<uint8_t >>
151150 planned_spans; // Passed to the allocator
152151 size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers ();
153152
@@ -161,13 +160,13 @@ int main(int argc, char** argv) {
161160 planned_spans.push_back ({planned_buffers.back ().get (), buffer_size});
162161 }
163162
164- torch::executor ::HierarchicalAllocator planned_memory (
163+ executorch::runtime ::HierarchicalAllocator planned_memory (
165164 {planned_spans.data (), planned_spans.size ()});
166165
167- torch::executor ::MemoryManager memory_manager (
166+ executorch::runtime ::MemoryManager memory_manager (
168167 &method_allocator, &planned_memory);
169168
170- Result<torch::executor ::Method> method =
169+ Result<executorch::runtime ::Method> method =
171170 program->load_method (method_name, &memory_manager);
172171 if (!method.ok ()) {
173172 ET_LOG (
@@ -178,7 +177,7 @@ int main(int argc, char** argv) {
178177 }
179178
180179 ET_LOG (Info, " Method loaded." );
181- torch::executor::util ::prepare_input_tensors (*method);
180+ executorch::extension ::prepare_input_tensors (*method);
182181 ET_LOG (Info, " Starting the model execution..." );
183182
184183 Error status = method->execute ();
0 commit comments