Skip to content

Commit d84d902

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
remove executor.h 7/N (#192)
Summary: Pull Request resolved: #192 Reviewed By: cccclai Differential Revision: D48876338 fbshipit-source-id: a52373718225a01f5ff8f04de830dbc9f82bfb3c
1 parent 3200b94 commit d84d902

File tree

2 files changed

+36
-36
lines changed

2 files changed

+36
-36
lines changed

test/relocatable_runner.cpp

Lines changed: 35 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010
#include <vector>
1111

1212
#include <executorch/extension/data_loader/buffer_data_loader.h>
13-
#include <executorch/runtime/executor/executor.h>
13+
#include <executorch/runtime/executor/method.h>
14+
#include <executorch/runtime/executor/program.h>
1415
#include <executorch/runtime/platform/log.h>
1516
#include <executorch/runtime/platform/runtime.h>
1617
#include <executorch/util/read_file.h>
@@ -30,7 +31,7 @@ using namespace torch::executor;
3031
* For ExecuTorch to work efficiently in these environments, we want to
3132
* initialize the execution plan once once for the model and avoid
3233
* re-initializing it for every inference. This can be achieved by restricting
33-
* the runtime contexts (torch::executor::Program and torch::executor::Executor)
34+
* the runtime contexts (torch::executor::Program and torch::executor::Method)
3435
* to live in a pre-allocated, shared, and persistent memory.
3536
*
3637
* This tool demonstrates that the memory can be managed this way.
@@ -123,7 +124,7 @@ MemoryManager* create_memory_manager(
123124
ET_CHECK(temp_allocator != nullptr);
124125
new (temp_allocator) MemoryAllocator(0, nullptr);
125126

126-
// Assemble all of the allocators into the MemoryManager that the Executor
127+
// Assemble all of the allocators into the MemoryManager that the Method
127128
// will use.
128129
auto* memory_manager = worker_allocator.allocateInstance<MemoryManager>();
129130
ET_CHECK(memory_manager != nullptr);
@@ -133,7 +134,7 @@ MemoryManager* create_memory_manager(
133134
return memory_manager;
134135
}
135136

136-
ExecutionPlan* init_method(
137+
Method* init_method(
137138
Program* program,
138139
const char* method_name,
139140
MemoryAllocator& worker_allocator,
@@ -143,47 +144,46 @@ ExecutionPlan* init_method(
143144
create_memory_manager(program, method_name, worker_allocator);
144145

145146
//
146-
// Create an Executor and ExecutionPlan from the program, using the provided
147-
// allocators. The ExecutionPlan is what actually runs the model. It is
147+
// Create and load a method from the program, using the provided
148+
// allocators. The Method is what actually runs the model. It is
148149
// mutable, so should only be used by a single thread at at time, but it can
149150
// be reused.
150151
//
151152

152-
auto* executor = worker_allocator.allocateInstance<Executor>();
153-
ET_CHECK(executor != nullptr);
154-
new (executor) Executor(program, memory_manager);
155-
156-
Error status = executor->init_execution_plan(method_name);
153+
auto* method = worker_allocator.allocateInstance<Method>();
154+
ET_CHECK(method != nullptr);
155+
auto method_res = program->load_method(method_name, memory_manager);
157156
ET_CHECK_MSG(
158-
status == Error::Ok,
159-
"init_execution_plan('%s') failed with status 0x%" PRIx32,
157+
method_res.error() == Error::Ok,
158+
"loading method('%s') failed with status 0x%" PRIx32,
160159
method_name,
161-
status);
160+
method_res.error());
161+
new (method) Method(std::move(method_res.get()));
162+
162163
ET_LOG(Info, "Model method '%s' initialized.", method_name);
163-
auto& plan = executor->execution_plan();
164164

165165
// Gather the byte size of each input/output tensor.
166-
const size_t input_size = plan.inputs_size();
166+
const size_t input_size = method->inputs_size();
167167
for (size_t i = 0; i < input_size; i++) {
168-
if (!plan.get_input(i).isTensor()) {
168+
if (!method->get_input(i).isTensor()) {
169169
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
170170
continue;
171171
}
172-
const auto& t = plan.get_input(i).toTensor();
172+
const auto& t = method->get_input(i).toTensor();
173173
input_sizes.push_back(t.nbytes());
174174
}
175175

176-
const size_t output_size = plan.outputs_size();
176+
const size_t output_size = method->outputs_size();
177177
for (size_t i = 0; i < output_size; i++) {
178-
const auto& t = plan.get_output(i).toTensor();
178+
const auto& t = method->get_output(i).toTensor();
179179
output_sizes.push_back(t.nbytes());
180180
}
181181

182-
return &plan;
182+
return method;
183183
}
184184

185185
void inference_loop(
186-
ExecutionPlan* plan,
186+
Method* method,
187187
const std::vector<void*>& input_buffers,
188188
const std::vector<void*>& output_buffers) {
189189
ET_LOG(
@@ -194,12 +194,12 @@ void inference_loop(
194194
// Prepare the inputs.
195195
{
196196
size_t bufi = 0;
197-
for (size_t i = 0; i < plan->inputs_size(); i++) {
198-
if (!plan->get_input(i).isTensor()) {
197+
for (size_t i = 0; i < method->inputs_size(); i++) {
198+
if (!method->get_input(i).isTensor()) {
199199
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
200200
continue;
201201
}
202-
const auto& t = plan->get_input(i).toTensor();
202+
const auto& t = method->get_input(i).toTensor();
203203
ET_CHECK_MSG(
204204
bufi < input_buffers.size(), "Not enough input buffers for model");
205205
t.set_data(input_buffers[bufi++]);
@@ -210,12 +210,12 @@ void inference_loop(
210210
// Prepare the outputs.
211211
{
212212
size_t bufi = 0;
213-
for (size_t i = 0; i < plan->outputs_size(); i++) {
214-
if (!plan->get_output(i).isTensor()) {
213+
for (size_t i = 0; i < method->outputs_size(); i++) {
214+
if (!method->get_output(i).isTensor()) {
215215
ET_LOG(Info, "output %zu is not a tensor, skipping", i);
216216
continue;
217217
}
218-
const auto& t = plan->get_output(i).toTensor();
218+
const auto& t = method->get_output(i).toTensor();
219219
ET_CHECK_MSG(
220220
bufi < output_buffers.size(), "Not enough output buffers for model");
221221
t.set_data(output_buffers[bufi++]);
@@ -224,7 +224,7 @@ void inference_loop(
224224
ET_LOG(Info, "Outputs prepared.");
225225

226226
// Run the model.
227-
Error status = plan->execute();
227+
Error status = method->execute();
228228
ET_CHECK_MSG(
229229
status == Error::Ok,
230230
"plan->execute() failed with status 0x%" PRIx32,
@@ -275,10 +275,10 @@ int main(int argc, char** argv) {
275275
ET_CHECK(program != nullptr);
276276

277277
/*
278-
* Step 4: The worker core sets up the Executor and initalizes the execution
279-
* plan. Here we let the control core read out the I/O info from the
280-
* execution plan. This can also be done on the control core from the
281-
* program flatbuffer, though there is no direct API at the moment.
278+
* Step 4: The worker core sets up the Method. Here we let the control
279+
* core read out the I/O info from the Method. This can also be done on
280+
* the control core from the program flatbuffer, though there is no
281+
* direct API at the moment.
282282
*/
283283

284284
// Get the method name to execute.
@@ -295,7 +295,7 @@ int main(int argc, char** argv) {
295295
std::vector<size_t> input_sizes;
296296
std::vector<size_t> output_sizes;
297297

298-
ExecutionPlan* plan = worker::init_method(
298+
Method* method = worker::init_method(
299299
program, method_name, worker_allocator, input_sizes, output_sizes);
300300

301301
ET_LOG(
@@ -331,7 +331,7 @@ int main(int argc, char** argv) {
331331
*/
332332

333333
// Run the inference on the inputs. CHECK-fails on error.
334-
worker::inference_loop(plan, input_buffers, output_buffers);
334+
worker::inference_loop(method, input_buffers, output_buffers);
335335

336336
for (void* buffer : input_buffers) {
337337
free(buffer);

test/targets.bzl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def define_common_targets():
7676
srcs = ["relocatable_runner.cpp"],
7777
deps = [
7878
"//executorch/kernels/portable:generated_lib_all_ops",
79-
"//executorch/runtime/executor:executor",
79+
"//executorch/runtime/executor:program",
8080
"//executorch/configurations:executor_cpu_optimized",
8181
"//executorch/extension/data_loader:buffer_data_loader",
8282
"//executorch/util:read_file",

0 commit comments

Comments
 (0)