Skip to content

Commit dc190f9

Browse files
authored
Revert "Update default executor runner with new optional options" (#14193)
Reverts #14017 https://www.internalfb.com/diff/D82053273 Here's the error from internal failure ``` executorch/extension/runner_util/inputs.cpp:118:13: error: format specifies type 'long' but the argument has type 'std::tuple_element<1, std::pair<char *, unsigned int>>::type' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | ~~~ xplat/executorch/extension/runner_util/inputs.cpp:119:13: error: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | ~~~ xplat/executorch/extension/runner_util/inputs.cpp:118:13: error: format specifies type 'long' but the argument has type 'std::tuple_element<1, std::pair<char *, unsigned int>>::type' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | ~~~ xplat/executorch/extension/runner_util/inputs.cpp:119:13: error: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | executorch/extension/runner_util/inputs.cpp:118:13: error: format specifies type 'long' but the argument has type 'std::tuple_element<1, std::pair<char *, unsigned int>>::type' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | ~~~ [CONTEXT] | %u [CONTEXT] 118 | buffer_size, [CONTEXT] | ^~~~~~~~~~~ [CONTEXT] xplat/executorch/runtime/platform/log.h:181:13: note: expanded from macro 'ET_LOG' [CONTEXT] 180 | _format, \ [CONTEXT] | ~~~~~~~ [CONTEXT] 181 | ##__VA_ARGS__); \ [CONTEXT] | ^~~~~~~~~~~ xplat/executorch/extension/runner_util/inputs.cpp:119:13: error: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Werror,-Wformat] [CONTEXT] 117 | "input size (%ld) and tensor size (%ld) mismatch!", [CONTEXT] | ~~~ [CONTEXT] | %zu [CONTEXT] 118 | buffer_size, [CONTEXT] 119 | tensor_meta->nbytes()); [CONTEXT] | ^~~~~~~~~~~~~~~~~~~~~ [CONTEXT] xplat/executorch/runtime/platform/log.h:181:13: note: expanded from macro 'ET_LOG' [CONTEXT] 180 | _format, \ [CONTEXT] | ~~~~~~~ [CONTEXT] 181 | ##__VA_ARGS__); \ [CONTEXT] | ^~~~~~~~~~~ [CONTEXT] 2 errors generated. ```
1 parent 53b7ec5 commit dc190f9

File tree

8 files changed

+61
-233
lines changed

8 files changed

+61
-233
lines changed

backends/arm/test/ops/test_acos.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
# LICENSE file in the root directory of this source tree.
55
from typing import Tuple
66

7-
import pytest
87
import torch
98

109
from executorch.backends.arm.test import common
@@ -103,12 +102,8 @@ def test_acos_vgf_FP(test_data: Tuple):
103102
[],
104103
[],
105104
tosa_version="TOSA-1.0+FP",
106-
run_on_vulkan_runtime=True,
107105
)
108-
try:
109-
pipeline.run()
110-
except FileNotFoundError as e:
111-
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
106+
pipeline.run()
112107

113108

114109
@common.parametrize("test_data", test_data_suite)
@@ -120,9 +115,5 @@ def test_acos_vgf_INT(test_data: Tuple):
120115
[],
121116
[],
122117
tosa_version="TOSA-1.0+INT",
123-
run_on_vulkan_runtime=True,
124118
)
125-
try:
126-
pipeline.run()
127-
except FileNotFoundError as e:
128-
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
119+
pipeline.run()

backends/arm/test/ops/test_add.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,12 @@ def test_add_tensor_u85_INT_2(test_data: input_t2):
202202
pipeline.run()
203203

204204

205-
@common.parametrize("test_data", Add.test_data)
205+
# TODO/MLETORCH-1282: remove once inputs are not hard coded to ones
206+
skip_keys = {"5d_float", "1d_ones", "1d_randn"}
207+
filtered_test_data = {k: v for k, v in Add.test_data.items() if k not in skip_keys}
208+
209+
210+
@common.parametrize("test_data", filtered_test_data)
206211
@common.SkipIfNoModelConverter
207212
def test_add_tensor_vgf_FP(test_data: input_t1):
208213
pipeline = VgfPipeline[input_t1](
@@ -219,7 +224,7 @@ def test_add_tensor_vgf_FP(test_data: input_t1):
219224
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
220225

221226

222-
@common.parametrize("test_data", Add.test_data)
227+
@common.parametrize("test_data", filtered_test_data)
223228
@common.SkipIfNoModelConverter
224229
def test_add_tensor_vgf_INT(test_data: input_t1):
225230
pipeline = VgfPipeline[input_t1](

backends/arm/test/runner_utils.py

Lines changed: 35 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -223,48 +223,13 @@ def run_target(
223223
elif target_board == "vkml_emulation_layer":
224224
return run_vkml_emulation_layer(
225225
executorch_program_manager,
226-
inputs,
227226
intermediate_path,
228227
elf_path,
229228
)
230229

231230

232-
def save_inputs_to_file(
233-
exported_program: ExportedProgram,
234-
inputs: Tuple[torch.Tensor],
235-
intermediate_path: str | Path,
236-
):
237-
input_file_paths = []
238-
input_names = get_input_names(exported_program)
239-
for input_name, input_ in zip(input_names, inputs):
240-
input_path = save_bytes(intermediate_path, input_, input_name)
241-
input_file_paths.append(input_path)
242-
243-
return input_file_paths
244-
245-
246-
def get_output_from_file(
247-
exported_program: ExportedProgram,
248-
intermediate_path: str | Path,
249-
output_base_name: str,
250-
):
251-
output_np = []
252-
output_node = exported_program.graph_module.graph.output_node()
253-
for i, node in enumerate(output_node.args[0]):
254-
output_shape = node.meta["val"].shape
255-
output_dtype = node.meta["val"].dtype
256-
tosa_ref_output = np.fromfile(
257-
os.path.join(intermediate_path, f"{output_base_name}-{i}.bin"),
258-
_torch_to_numpy_dtype_dict[output_dtype],
259-
)
260-
261-
output_np.append(torch.from_numpy(tosa_ref_output).reshape(output_shape))
262-
return tuple(output_np)
263-
264-
265231
def run_vkml_emulation_layer(
266232
executorch_program_manager: ExecutorchProgramManager,
267-
inputs: Tuple[torch.Tensor],
268233
intermediate_path: str | Path,
269234
elf_path: str | Path,
270235
):
@@ -274,7 +239,7 @@ def run_vkml_emulation_layer(
274239
`intermediate_path`: Directory to save the .pte and capture outputs.
275240
`elf_path`: Path to the Vulkan-capable executor_runner binary.
276241
"""
277-
exported_program = executorch_program_manager.exported_program()
242+
278243
intermediate_path = Path(intermediate_path)
279244
intermediate_path.mkdir(exist_ok=True)
280245
elf_path = Path(elf_path)
@@ -286,29 +251,26 @@ def run_vkml_emulation_layer(
286251
with open(pte_path, "wb") as f:
287252
f.write(executorch_program_manager.buffer)
288253

289-
output_base_name = "out"
290-
out_path = os.path.join(intermediate_path, output_base_name)
291-
292-
cmd_line = f"{elf_path} -model_path {pte_path} -output_file {out_path}"
293-
294-
input_string = None
295-
input_paths = save_inputs_to_file(exported_program, inputs, intermediate_path)
296-
for input_path in input_paths:
297-
if input_string is None:
298-
input_string = f" -inputs={input_path}"
299-
else:
300-
input_string += f",{input_path}"
301-
if input_string is not None:
302-
cmd_line += input_string
303-
cmd_line = cmd_line.split()
304-
254+
cmd_line = [str(elf_path), "-model_path", pte_path]
305255
result = _run_cmd(cmd_line)
306256

257+
result_stdout = result.stdout.decode() # noqa: F841
307258
# TODO: MLETORCH-1234: Support VGF e2e tests in VgfPipeline
308259
# TODO: Add regex to check for error or fault messages in stdout from Emulation Layer
309-
result_stdout = result.stdout.decode() # noqa: F841
260+
# Regex to extract tensor values from stdout
261+
output_np = []
262+
matches = re.findall(
263+
r"Output\s+\d+:\s+tensor\(sizes=\[(.*?)\],\s+\[(.*?)\]\)",
264+
result_stdout,
265+
re.DOTALL,
266+
)
310267

311-
return get_output_from_file(exported_program, intermediate_path, output_base_name)
268+
for shape_str, values_str in matches:
269+
shape = list(map(int, shape_str.split(",")))
270+
values = list(map(float, re.findall(r"[-+]?\d*\.\d+|\d+", values_str)))
271+
output_np.append(torch.tensor(values).reshape(shape))
272+
273+
return tuple(output_np)
312274

313275

314276
def run_corstone(
@@ -350,10 +312,14 @@ def run_corstone(
350312
with open(pte_path, "wb") as f:
351313
f.write(executorch_program_manager.buffer)
352314

353-
input_paths = save_inputs_to_file(exported_program, inputs, intermediate_path)
315+
# Save inputs to file
316+
input_names = get_input_names(exported_program)
317+
input_paths = []
318+
for input_name, input_ in zip(input_names, inputs):
319+
input_path = save_bytes(intermediate_path, input_, input_name)
320+
input_paths.append(input_path)
354321

355-
output_base_name = "out"
356-
out_path = os.path.join(intermediate_path, output_base_name)
322+
out_path = os.path.join(intermediate_path, "out")
357323

358324
cmd_line = f"executor_runner -m {pte_path} -o {out_path}"
359325
for input_path in input_paths:
@@ -435,7 +401,18 @@ def run_corstone(
435401
f"Corstone simulation failed:\ncmd: {' '.join(command_args)}\nlog: \n {result_stdout}\n{result.stderr.decode()}"
436402
)
437403

438-
return get_output_from_file(exported_program, intermediate_path, output_base_name)
404+
output_np = []
405+
output_node = exported_program.graph_module.graph.output_node()
406+
for i, node in enumerate(output_node.args[0]):
407+
output_shape = node.meta["val"].shape
408+
output_dtype = node.meta["val"].dtype
409+
tosa_ref_output = np.fromfile(
410+
os.path.join(intermediate_path, f"out-{i}.bin"),
411+
_torch_to_numpy_dtype_dict[output_dtype],
412+
)
413+
414+
output_np.append(torch.from_numpy(tosa_ref_output).reshape(output_shape))
415+
return tuple(output_np)
439416

440417

441418
def prep_data_for_save(

examples/portable/executor_runner/executor_runner.cpp

Lines changed: 7 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*
22
* Copyright (c) Meta Platforms, Inc. and affiliates.
3-
* All rights reserved.
43
* Copyright 2024-2025 Arm Limited and/or its affiliates.
4+
* All rights reserved.
55
*
66
* This source code is licensed under the BSD-style license found in the
77
* LICENSE file in the root directory of this source tree.
@@ -18,7 +18,6 @@
1818
* all fp32 tensors.
1919
*/
2020

21-
#include <fstream>
2221
#include <iostream>
2322
#include <memory>
2423

@@ -50,16 +49,6 @@ DEFINE_string(
5049
model_path,
5150
"model.pte",
5251
"Model serialized in flatbuffer format.");
53-
DEFINE_string(inputs, "", "Comma-separated list of input files");
54-
DEFINE_string(
55-
output_file,
56-
"",
57-
"Base name of output file. If not empty output will be written to the file(s).");
58-
59-
DEFINE_bool(
60-
print_all_output,
61-
false,
62-
"Prints all output. By default only first and last 100 elements are printed.");
6352
DEFINE_uint32(num_executions, 1, "Number of times to run the model.");
6453
#ifdef ET_EVENT_TRACER_ENABLED
6554
DEFINE_string(etdump_path, "model.etdump", "Write ETDump data to this path.");
@@ -69,8 +58,6 @@ DEFINE_int32(
6958
-1,
7059
"Number of CPU threads for inference. Defaults to -1, which implies we'll use a heuristic to derive the # of performant cores for a specific device.");
7160

72-
using executorch::aten::ScalarType;
73-
using executorch::aten::Tensor;
7461
using executorch::extension::FileDataLoader;
7562
using executorch::runtime::Error;
7663
using executorch::runtime::EValue;
@@ -83,8 +70,6 @@ using executorch::runtime::MethodMeta;
8370
using executorch::runtime::Program;
8471
using executorch::runtime::Result;
8572
using executorch::runtime::Span;
86-
using executorch::runtime::Tag;
87-
using executorch::runtime::TensorInfo;
8873

8974
/// Helper to manage resources for ETDump generation
9075
class EventTraceManager {
@@ -171,31 +156,6 @@ int main(int argc, char** argv) {
171156
"FileDataLoader::from() failed: 0x%" PRIx32,
172157
(uint32_t)loader.error());
173158

174-
std::vector<std::string> inputs_storage;
175-
std::vector<std::pair<char*, size_t>> input_buffers;
176-
177-
std::stringstream list_of_input_files(FLAGS_inputs);
178-
std::string token;
179-
180-
while (std::getline(list_of_input_files, token, ',')) {
181-
std::ifstream input_file_handle(token, std::ios::binary | std::ios::ate);
182-
if (!input_file_handle) {
183-
ET_LOG(Error, "Failed to open input file: %s\n", token.c_str());
184-
return 1;
185-
}
186-
187-
std::streamsize file_size = input_file_handle.tellg();
188-
input_file_handle.seekg(0, std::ios::beg);
189-
190-
inputs_storage.emplace_back(file_size, '\0');
191-
if (!input_file_handle.read(&inputs_storage.back()[0], file_size)) {
192-
ET_LOG(Error, "Failed to read input file: %s\n", token.c_str());
193-
return 1;
194-
}
195-
196-
input_buffers.emplace_back(&inputs_storage.back()[0], file_size);
197-
}
198-
199159
// Parse the program file. This is immutable, and can also be reused between
200160
// multiple execution invocations across multiple threads.
201161
Result<Program> program = Program::load(&loader.get());
@@ -294,17 +254,15 @@ int main(int argc, char** argv) {
294254
// Run the model.
295255
for (uint32_t i = 0; i < FLAGS_num_executions; i++) {
296256
ET_LOG(Debug, "Preparing inputs.");
297-
// Allocate input tensors and set all of their elements to 1 or to the
298-
// contents of input_buffers if available. The `inputs`
257+
// Allocate input tensors and set all of their elements to 1. The `inputs`
299258
// variable owns the allocated memory and must live past the last call to
300259
// `execute()`.
301260
//
302261
// NOTE: we have to re-prepare input tensors on every execution
303262
// because inputs whose space gets reused by memory planning (if
304263
// any such inputs exist) will not be preserved for the next
305264
// execution.
306-
auto inputs = executorch::extension::prepare_input_tensors(
307-
*method, {}, input_buffers);
265+
auto inputs = executorch::extension::prepare_input_tensors(*method);
308266
ET_CHECK_MSG(
309267
inputs.ok(),
310268
"Could not prepare inputs: 0x%" PRIx32,
@@ -337,67 +295,10 @@ int main(int argc, char** argv) {
337295
ET_LOG(Info, "%zu outputs: ", outputs.size());
338296
Error status = method->get_outputs(outputs.data(), outputs.size());
339297
ET_CHECK(status == Error::Ok);
340-
341-
if (FLAGS_output_file.size() > 0) {
342-
for (int i = 0; i < outputs.size(); ++i) {
343-
if (outputs[i].isTensor()) {
344-
Tensor tensor = outputs[i].toTensor();
345-
346-
char out_filename[255];
347-
snprintf(out_filename, 255, "%s-%d.bin", FLAGS_output_file.c_str(), i);
348-
ET_LOG(Info, "Writing output to file: %s", out_filename);
349-
FILE* out_file = fopen(out_filename, "wb");
350-
auto written_size =
351-
fwrite(tensor.const_data_ptr<char>(), 1, tensor.nbytes(), out_file);
352-
fclose(out_file);
353-
}
354-
}
355-
}
356-
357-
if (FLAGS_print_all_output) {
358-
for (int i = 0; i < outputs.size(); ++i) {
359-
if (outputs[i].isTensor()) {
360-
Tensor tensor = outputs[i].toTensor();
361-
362-
for (int j = 0; j < tensor.numel(); ++j) {
363-
if (tensor.scalar_type() == ScalarType::Int) {
364-
printf(
365-
"Output[%d][%d]: (int) %d\n",
366-
i,
367-
j,
368-
tensor.const_data_ptr<int>()[j]);
369-
} else if (tensor.scalar_type() == ScalarType::Float) {
370-
printf(
371-
"Output[%d][%d]: (float) %f\n",
372-
i,
373-
j,
374-
tensor.const_data_ptr<float>()[j]);
375-
} else if (tensor.scalar_type() == ScalarType::Char) {
376-
printf(
377-
"Output[%d][%d]: (char) %d\n",
378-
i,
379-
j,
380-
tensor.const_data_ptr<int8_t>()[j]);
381-
} else if (tensor.scalar_type() == ScalarType::Bool) {
382-
printf(
383-
"Output[%d][%d]: (bool) %s (0x%x)\n",
384-
i,
385-
j,
386-
tensor.const_data_ptr<int8_t>()[j] ? "true " : "false",
387-
tensor.const_data_ptr<int8_t>()[j]);
388-
}
389-
}
390-
} else {
391-
printf("Output[%d]: Not Tensor\n", i);
392-
}
393-
}
394-
} else {
395-
// Print the first and last 100 elements of long lists of scalars.
396-
std::cout << executorch::extension::evalue_edge_items(100);
397-
398-
for (int i = 0; i < outputs.size(); ++i) {
399-
std::cout << "OutputX " << i << ": " << outputs[i] << std::endl;
400-
}
298+
// Print the first and last 100 elements of long lists of scalars.
299+
std::cout << executorch::extension::evalue_edge_items(100);
300+
for (int i = 0; i < outputs.size(); ++i) {
301+
std::cout << "Output " << i << ": " << outputs[i] << std::endl;
401302
}
402303

403304
if (tracer.get_event_tracer()) {

0 commit comments

Comments
 (0)