Skip to content

Commit 6f4cb1a

Browse files
committed
Update on "[ExecuTorch][#10447] Extend PyBundledModule with extension.BundledModule"
# Context This issue is a step of #9638. In #9638, we want to have `extension.Module` as the single source of implementation in `pybindings`, which means that `pybindings.PyModule` should use `extension.Module` rather than its own `pybindings.Module`. # Proposal Now that we have `extension.BundledModule` ready, we want to test it out by having our existing `PyBundledModule` to extend it, and let `verify_result_with_bundled_expected_output` to use it, so that we can test out the whole thing with https://github.com/pytorch/executorch/blob/fb45e19055a92d2a91a4d4b7008e135232cbb14b/devtools/bundled_program/test/test_end2end.py Differential Revision: [D73564127](https://our.internmc.facebook.com/intern/diff/D73564127/) [ghstack-poisoned]
2 parents 3cfca44 + b036ff0 commit 6f4cb1a

File tree

5 files changed

+39
-42
lines changed

5 files changed

+39
-42
lines changed

extension/module/bundled_module.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
#include <executorch/extension/data_loader/buffer_data_loader.h>
1414
#include <executorch/extension/data_loader/file_data_loader.h>
1515

16-
1716
namespace executorch {
1817
namespace extension {
1918

@@ -28,7 +27,6 @@ std::unique_ptr<BufferDataLoader> program_data_loader(
2827
}
2928
} // namespace
3029

31-
3230
namespace ET_BUNDLED_MODULE_NAMESPACE {
3331

3432
BundledModule::BundledModule(

extension/module/bundled_module.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
#include <executorch/extension/module/module.h>
1212

13-
1413
#ifdef USE_ATEN_LIB
1514
#define ET_BUNDLED_MODULE_NAMESPACE bundled_module::aten
1615
#else // !USE_ATEN_LIB

extension/module/module.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ namespace executorch {
3838
namespace extension {
3939
namespace ET_MODULE_NAMESPACE {
4040

41-
4241
using ET_RUNTIME_NAMESPACE::MethodMeta;
4342
using ET_RUNTIME_NAMESPACE::Program;
4443

extension/module/module.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
#include <executorch/runtime/executor/program.h>
1818

19-
2019
#ifdef USE_ATEN_LIB
2120
#define ET_MODULE_NAMESPACE module::aten
2221
#else // !USE_ATEN_LIB
@@ -515,7 +514,7 @@ namespace executor {
515514
// TODO(T197294990): Remove these deprecated aliases once all users have moved
516515
// to the new `::executorch` namespaces.
517516
using ::executorch::extension::ET_MODULE_NAMESPACE::Module;
518-
} // namespace executorch
517+
} // namespace executor
519518
} // namespace torch
520519

521520
namespace executorch {

extension/pybindings/pybindings.cpp

Lines changed: 38 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,10 @@ using ::executorch::ET_RUNTIME_NAMESPACE::get_registered_kernels;
9494
using ::executorch::ET_RUNTIME_NAMESPACE::Kernel;
9595
using ::executorch::ET_RUNTIME_NAMESPACE::Method;
9696
using ::executorch::ET_RUNTIME_NAMESPACE::Program;
97-
using ::executorch::extension::ET_BUNDLED_MODULE_NAMESPACE::BundledModule;
9897
using ::executorch::extension::BufferDataLoader;
9998
using ::executorch::extension::MallocMemoryAllocator;
10099
using ::executorch::extension::MmapDataLoader;
100+
using ::executorch::extension::ET_BUNDLED_MODULE_NAMESPACE::BundledModule;
101101
using ::executorch::runtime::ArrayRef;
102102
using ::executorch::runtime::DataLoader;
103103
using ::executorch::runtime::Error;
@@ -443,43 +443,43 @@ inline std::unique_ptr<Module> load_module_from_file(
443443
}
444444

445445
inline py::list get_outputs_as_py_list(
446-
const std::vector<EValue>& outputs,
447-
bool clone_outputs = true) {
448-
const auto outputs_size = outputs.size();
449-
py::list list(outputs_size);
450-
for (size_t i = 0; i < outputs_size; ++i) {
451-
auto& v = outputs[i];
452-
if (Tag::None == v.tag) {
453-
list[i] = py::none();
454-
} else if (Tag::Int == v.tag) {
455-
list[i] = py::cast(v.toInt());
456-
} else if (Tag::Double == v.tag) {
457-
list[i] = py::cast(v.toDouble());
458-
} else if (Tag::Bool == v.tag) {
459-
list[i] = py::cast(v.toBool());
460-
} else if (Tag::String == v.tag) {
461-
list[i] = py::cast(std::string(v.toString().data()));
462-
} else if (Tag::Tensor == v.tag) {
446+
const std::vector<EValue>& outputs,
447+
bool clone_outputs = true) {
448+
const auto outputs_size = outputs.size();
449+
py::list list(outputs_size);
450+
for (size_t i = 0; i < outputs_size; ++i) {
451+
auto& v = outputs[i];
452+
if (Tag::None == v.tag) {
453+
list[i] = py::none();
454+
} else if (Tag::Int == v.tag) {
455+
list[i] = py::cast(v.toInt());
456+
} else if (Tag::Double == v.tag) {
457+
list[i] = py::cast(v.toDouble());
458+
} else if (Tag::Bool == v.tag) {
459+
list[i] = py::cast(v.toBool());
460+
} else if (Tag::String == v.tag) {
461+
list[i] = py::cast(std::string(v.toString().data()));
462+
} else if (Tag::Tensor == v.tag) {
463463
#ifdef USE_ATEN_LIB
464-
// Clone so the outputs in python do not share a lifetime with the
465-
// module object
466-
if (clone_outputs) {
467-
list[i] = py::cast(v.toTensor().clone());
468-
} else {
469-
list[i] = py::cast(v.toTensor());
470-
}
464+
// Clone so the outputs in python do not share a lifetime with the
465+
// module object
466+
if (clone_outputs) {
467+
list[i] = py::cast(v.toTensor().clone());
468+
} else {
469+
list[i] = py::cast(v.toTensor());
470+
}
471471
#else
472-
if (clone_outputs) {
473-
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
472+
if (clone_outputs) {
473+
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
474+
} else {
475+
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
476+
}
477+
#endif
474478
} else {
475-
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
479+
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
476480
}
477-
#endif
478-
} else {
479-
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
480481
}
481-
}
482-
return list;
482+
return list;
483483
}
484484

485485
static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U;
@@ -536,7 +536,8 @@ struct PyBundledModule : public BundledModule {
536536
const auto& outputs = result.get();
537537
py::list py_outputs = get_outputs_as_py_list(outputs);
538538

539-
Error status = BundledModule::verify_method_outputs(method_name, testset_idx, rtol, atol);
539+
Error status = BundledModule::verify_method_outputs(
540+
method_name, testset_idx, rtol, atol);
540541
THROW_IF_ERROR(
541542
status,
542543
"Result verification failed with status %" PRIu32,
@@ -860,7 +861,7 @@ struct PyModule final {
860861
}
861862

862863
py::list forward_single_input(
863-
const torch::Tensor& inputTensor,
864+
const torch::Tensor& inputTensor,
864865
bool clone_outputs = true) {
865866
py::list py_list;
866867
py_list.append(py::cast(inputTensor));
@@ -1126,7 +1127,8 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
11261127
py::arg("clone_outputs") = true,
11271128
call_guard);
11281129

1129-
py::class_<PyBundledModule>(m, "BundledModule").def(
1130+
py::class_<PyBundledModule>(m, "BundledModule")
1131+
.def(
11301132
"verify_result_with_bundled_expected_output",
11311133
&PyBundledModule::verify_result_with_bundled_expected_output,
11321134
py::arg("method_name"),

0 commit comments

Comments
 (0)