Skip to content

Commit ac4a118

Browse files
committed
[Backend Tester] Add initial reporting skeleton
1 parent ea773b1 commit ac4a118

File tree

6 files changed

+654
-37
lines changed

6 files changed

+654
-37
lines changed
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
add_executable(executorch-test-runner
2+
test_runner.cpp
3+
# TODO
4+
../../../runtime/platform/runtime.cpp
5+
)
6+
7+
target_link_libraries(
8+
executorch-test-runner
9+
PRIVATE executorch
10+
gflags
11+
extension_flat_tensor
12+
extension_flat_tensor_serialize
13+
extension_module
14+
extension_tensor
15+
optimized_native_cpu_ops_lib
16+
xnnpack_backend)
Lines changed: 260 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,260 @@
1+
#include <executorch/extension/data_loader/file_data_loader.h>
2+
#include <executorch/extension/flat_tensor/flat_tensor_data_map.h>
3+
#include <executorch/extension/flat_tensor/serialize/serialize.h>
4+
#include <executorch/extension/module/module.h>
5+
#include <executorch/extension/tensor/tensor.h>
6+
#include <executorch/runtime/platform/runtime.h>
7+
8+
#include <iostream>
9+
#include <map>
10+
#include <optional>
11+
#include <tuple>
12+
#include <vector>
13+
14+
#include <gflags/gflags.h>
15+
16+
/*
17+
* This runner is intended to built and run as part of the backend test flow. It takes a
18+
* set of inputs from a flat_tensor-format file, runs each case, and then serializes the
19+
* outputs to a file, also in flat_tensor format.
20+
*/
21+
22+
DEFINE_string(
23+
model_path,
24+
"model.pte",
25+
"Model serialized in flatbuffer format.");
26+
27+
DEFINE_string(
28+
input_path,
29+
"inputs.ptd",
30+
"Input tensors in flat tensor (ptd) format.");
31+
32+
DEFINE_string(
33+
output_path,
34+
"outputs.ptd",
35+
"Path to write output tensor in flat tensor (ptd) format.");
36+
37+
DEFINE_string(
38+
method,
39+
"forward",
40+
"The model method to run.");
41+
42+
using executorch::aten::Tensor;
43+
using executorch::runtime::Error;
44+
using executorch::runtime::EValue;
45+
using executorch::runtime::Result;
46+
using executorch::extension::FileDataLoader;
47+
using executorch::extension::FlatTensorDataMap;
48+
using executorch::extension::Module;
49+
using executorch::extension::TensorPtr;
50+
using executorch::ET_RUNTIME_NAMESPACE::TensorLayout;
51+
52+
// Contains method inputs for a single run.
53+
struct TestCase {
54+
std::map<int, TensorPtr> inputs;
55+
};
56+
57+
std::map<std::string, TestCase> collect_test_cases(FlatTensorDataMap& input_map);
58+
TensorPtr create_tensor(TensorLayout& layout, std::unique_ptr<char[], decltype(&free)> buffer);
59+
Result<FlatTensorDataMap> load_input_data(FileDataLoader& loader);
60+
std::optional<std::tuple<std::string, int>> parse_key(const std::string& key);
61+
Result<std::vector<EValue>> run_test_case(Module& module, TestCase& test_case);
62+
void store_outputs(std::map<std::string, TensorPtr>& output_map, const std::string& case_name, const std::vector<EValue>& outputs);
63+
64+
const int TensorAlignment = 16;
65+
66+
int main(int argc, char** argv){
67+
gflags::ParseCommandLineFlags(&argc, &argv, true);
68+
executorch::runtime::runtime_init();
69+
70+
// Load the model.
71+
Module model(FLAGS_model_path.c_str());
72+
auto load_method_error = model.load_method(FLAGS_method.c_str());
73+
if (load_method_error != Error::Ok) {
74+
std::cerr << "Failed to load method \"" << FLAGS_method << "\": " << static_cast<int>(load_method_error) << std::endl;
75+
return -1;
76+
}
77+
78+
// Load the input tensor data. Note that the data loader has to live as long as the flat
79+
// tensor data map does.
80+
auto input_loader_result = FileDataLoader::from(FLAGS_input_path.c_str());
81+
if (!input_loader_result.ok()) {
82+
std::cerr << "Failed to open input file: error " << static_cast<int>(input_loader_result.error()) << std::endl;
83+
}
84+
85+
auto load_result = load_input_data(*input_loader_result);
86+
if (!load_result.ok()) {
87+
return -1;
88+
}
89+
auto input_map = std::move(load_result.get());
90+
91+
auto cases = collect_test_cases(input_map);
92+
std::map<std::string, TensorPtr> output_map;
93+
94+
// Run each case and store the outputs.
95+
for (auto& [name, test_case] : cases) {
96+
auto result = run_test_case(model, test_case);
97+
if (!result.ok()) {
98+
std::cerr << "Failed to run test case \"" << name << "\": " << static_cast<int>(result.error()) << std::endl;
99+
return -1;
100+
}
101+
102+
store_outputs(output_map, name, result.get());
103+
}
104+
105+
// Create a map of Tensor (unowned), rather than TensorPtr (owned).
106+
std::map<std::string, Tensor> output_map_tensors;
107+
for (auto& [key, value] : output_map) {
108+
output_map_tensors.emplace(key, *value);
109+
}
110+
111+
// Write the output data in .ptd format.
112+
auto save_result = executorch::extension::flat_tensor::save_ptd(
113+
FLAGS_output_path.c_str(),
114+
output_map_tensors,
115+
TensorAlignment
116+
);
117+
118+
if (save_result != Error::Ok) {
119+
std::cerr << "Failed to save outputs: " << static_cast<int>(save_result) << std::endl;
120+
return -1;
121+
}
122+
123+
std::cout << "Successfully wrote output tensors to " << FLAGS_output_path << "." << std::endl;
124+
}
125+
126+
// Group inputs by test case and build tensors.
127+
std::map<std::string, TestCase> collect_test_cases(FlatTensorDataMap& input_map) {
128+
std::map<std::string, TestCase> cases;
129+
130+
for (auto i = 0u; i < input_map.get_num_keys().get(); i++) {
131+
auto key = input_map.get_key(i).get();
132+
133+
// Split key into test_case : input index
134+
auto [test_case_name, input_index] = *parse_key(key);
135+
136+
// Get or create the test case instance.
137+
auto& test_case = cases[test_case_name];
138+
139+
// Create a tensor from the layout and data.
140+
auto tensor_layout = input_map.get_tensor_layout(key).get();
141+
auto tensor_data = std::unique_ptr<char[], decltype(&free)>((char*) malloc(tensor_layout.nbytes()), free);
142+
auto load_result = input_map.load_data_into(key, tensor_data.get(), tensor_layout.nbytes());
143+
if (load_result != Error::Ok) {
144+
std::cerr << "Load failed: " << static_cast<int>(load_result) << std::endl;
145+
exit(-1);
146+
}
147+
148+
auto input_tensor = create_tensor(tensor_layout, std::move(tensor_data));
149+
test_case.inputs[input_index] = std::move(input_tensor);
150+
}
151+
152+
return cases;
153+
}
154+
155+
// Create a tensor from a layout and data blob.
156+
TensorPtr create_tensor(TensorLayout& layout, std::unique_ptr<char[], decltype(&free)> buffer) {
157+
// Sizes and dim order are have different types in TensorLayout vs Tensor.
158+
std::vector<executorch::aten::SizesType> sizes;
159+
for (auto x : layout.sizes()) {
160+
sizes.push_back(x);
161+
}
162+
std::vector<executorch::aten::DimOrderType> dim_order;
163+
for (auto x : layout.dim_order()) {
164+
dim_order.push_back(x);
165+
}
166+
167+
auto raw_data = buffer.release();
168+
169+
return executorch::extension::make_tensor_ptr(
170+
sizes,
171+
raw_data,
172+
dim_order,
173+
{}, // Strides - infer from sizes + dim order.
174+
layout.scalar_type(),
175+
exec_aten::TensorShapeDynamism::STATIC,
176+
[](void* ptr) {
177+
free(ptr);
178+
}
179+
);
180+
}
181+
182+
// Load the input data (in .ptd file format) from the given path.
183+
Result<FlatTensorDataMap> load_input_data(FileDataLoader& loader) {
184+
auto input_data_map_load_result = FlatTensorDataMap::load(&loader);
185+
if (!input_data_map_load_result.ok()) {
186+
std::cerr << "Failed to open load input data map: error " << static_cast<int>(input_data_map_load_result.error()) << std::endl;
187+
}
188+
189+
return input_data_map_load_result;
190+
}
191+
192+
// Parse a string key of the form "test_case:input index". Returns a tuple of the test case name
193+
// and input index.
194+
std::optional<std::tuple<std::string, int>> parse_key(const std::string& key) {
195+
auto delimiter = key.find(":");
196+
if (delimiter == std::string::npos) { return std::nullopt; }
197+
198+
auto test_case = key.substr(0, delimiter);
199+
auto index_str = key.substr(delimiter + 1);
200+
auto index = std::stoi(index_str);
201+
202+
return {{ test_case, index }};
203+
}
204+
205+
// Run a given test case and return the resulting output values.
206+
Result<std::vector<EValue>> run_test_case(Module& module, TestCase& test_case) {
207+
for (auto& [index, value] : test_case.inputs) {
208+
auto set_input_error = module.set_input(FLAGS_method, value, index);
209+
if (set_input_error != Error::Ok) {
210+
std::cerr << "Failed to set input " << index << ": " << static_cast<int>(set_input_error) << "." << std::endl;
211+
}
212+
}
213+
214+
return module.execute(FLAGS_method.c_str());
215+
}
216+
217+
// Store output tensors into the named data map.
218+
void store_outputs(
219+
std::map<std::string, TensorPtr>& output_map,
220+
const std::string& case_name,
221+
const std::vector<EValue>& outputs) {
222+
// Because the outputs are likely memory planned, we need to clone the tensor
223+
// here to avoid having the data clobbered by the next run.
224+
225+
for (auto i = 0u; i < outputs.size(); i++) {
226+
if (!outputs[i].isTensor()) {
227+
continue;
228+
}
229+
230+
auto key_name = case_name + ":" + std::to_string(i);
231+
auto& tensor = outputs[i].toTensor();
232+
233+
// Copy tensor storage.
234+
auto tensor_memory = malloc(tensor.nbytes());
235+
memcpy(tensor_memory, tensor.const_data_ptr(), tensor.nbytes());
236+
237+
// Copy tensor metadata.
238+
std::vector<executorch::aten::SizesType> sizes(
239+
tensor.sizes().begin(),
240+
tensor.sizes().end()
241+
);
242+
243+
std::vector<executorch::aten::DimOrderType> dim_order(
244+
tensor.dim_order().begin(),
245+
tensor.dim_order().end()
246+
);
247+
248+
output_map.emplace(key_name, executorch::extension::make_tensor_ptr(
249+
sizes,
250+
tensor_memory,
251+
dim_order,
252+
{}, // Strides - implicit
253+
tensor.scalar_type(),
254+
exec_aten::TensorShapeDynamism::STATIC,
255+
[](void* ptr) {
256+
free(ptr);
257+
}
258+
));
259+
}
260+
}

0 commit comments

Comments
 (0)