From 398ba750a47377b9216e02ccb6473c1952ea6f65 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Wed, 11 Jun 2025 15:49:50 -0700 Subject: [PATCH 1/2] hacky solution to solve D73564127's duplicate dependency issues to unblock pybinding backend migration Pull Request resolved: https://github.com/pytorch/executorch/pull/11566 This diff creates different namespace for Module and BundledModule under different mode (aten or portable) to hacky solve the duplicated symbol issues in D73564127 since the their are too many complex dependency issues to solve, and lots of users are asking for pybinding backend migration. In the future this impl should be replaced by shim-like layers. ghstack-source-id: 289817083 Differential Revision: [D76405701](https://our.internmc.facebook.com/intern/diff/D76405701/) --- extension/module/bundled_module.cpp | 4 +++- extension/module/bundled_module.h | 11 ++++++++++ extension/module/module.cpp | 2 ++ extension/module/module.h | 21 +++++++++++++++++-- extension/module/test/bundled_module_test.cpp | 2 +- 5 files changed, 36 insertions(+), 4 deletions(-) diff --git a/extension/module/bundled_module.cpp b/extension/module/bundled_module.cpp index 083aef141a0..50dbfe69237 100644 --- a/extension/module/bundled_module.cpp +++ b/extension/module/bundled_module.cpp @@ -27,6 +27,8 @@ std::unique_ptr program_data_loader( } } // namespace +namespace ET_BUNDLED_MODULE_NAMESPACE { + BundledModule::BundledModule( const void* bundled_program_ptr, std::unique_ptr memory_allocator, @@ -107,6 +109,6 @@ runtime::Error BundledModule::verify_method_outputs( return executorch::BUNDLED_PROGRAM_NAMESPACE::verify_method_outputs( *method, bundled_program_ptr_, testset_idx, rtol, atol); } - +} // namespace ET_BUNDLED_MODULE_NAMESPACE } // namespace extension } // namespace executorch diff --git a/extension/module/bundled_module.h b/extension/module/bundled_module.h index d254a2cdcb5..c75d83bcbad 100644 --- a/extension/module/bundled_module.h +++ b/extension/module/bundled_module.h @@ -10,9 +10,19 @@ #include +#ifdef USE_ATEN_LIB +#define ET_BUNDLED_MODULE_NAMESPACE bundled_module::aten +#else // !USE_ATEN_LIB +#define ET_BUNDLED_MODULE_NAMESPACE bundled_module +#endif // USE_ATEN_LIB + namespace executorch { namespace extension { +using ET_MODULE_NAMESPACE::Module; + +namespace ET_BUNDLED_MODULE_NAMESPACE { + /** * A facade class for loading bundled programs and executing methods within * them. @@ -119,5 +129,6 @@ class BundledModule : public Module { bool is_loaded_from_file_ = false; }; +} // namespace ET_BUNDLED_MODULE_NAMESPACE } // namespace extension } // namespace executorch diff --git a/extension/module/module.cpp b/extension/module/module.cpp index 721e9e5b89e..3212077d2ee 100644 --- a/extension/module/module.cpp +++ b/extension/module/module.cpp @@ -36,6 +36,7 @@ namespace executorch { namespace extension { +namespace ET_MODULE_NAMESPACE { using ET_RUNTIME_NAMESPACE::MethodMeta; using ET_RUNTIME_NAMESPACE::Program; @@ -308,5 +309,6 @@ runtime::Error Module::set_output( output_tensor.mutable_data_ptr(), output_tensor.nbytes(), output_index); } +} // namespace ET_MODULE_NAMESPACE } // namespace extension } // namespace executorch diff --git a/extension/module/module.h b/extension/module/module.h index e75d8383115..080ae53f43a 100644 --- a/extension/module/module.h +++ b/extension/module/module.h @@ -16,6 +16,12 @@ #include +#ifdef USE_ATEN_LIB +#define ET_MODULE_NAMESPACE module::aten +#else // !USE_ATEN_LIB +#define ET_MODULE_NAMESPACE module +#endif // USE_ATEN_LIB + namespace executorch { namespace extension { @@ -24,6 +30,9 @@ using ET_RUNTIME_NAMESPACE::MethodMeta; using ET_RUNTIME_NAMESPACE::NamedDataMap; using ET_RUNTIME_NAMESPACE::Program; +class ExecuTorchJni; + +namespace ET_MODULE_NAMESPACE { /** * A facade class for loading programs and executing methods within them. */ @@ -493,9 +502,10 @@ class Module { protected: std::unordered_map methods_; - friend class ExecuTorchJni; + friend class executorch::extension::ExecuTorchJni; }; +} // namespace ET_MODULE_NAMESPACE } // namespace extension } // namespace executorch @@ -503,6 +513,13 @@ namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::extension::Module; +using ::executorch::extension::ET_MODULE_NAMESPACE::Module; } // namespace executor } // namespace torch + +namespace executorch { +namespace extension { +// backward compatible namespace alias +using ::executorch::extension::ET_MODULE_NAMESPACE::Module; +} // namespace extension +} // namespace executorch diff --git a/extension/module/test/bundled_module_test.cpp b/extension/module/test/bundled_module_test.cpp index a07c5dd5486..0009c15d69e 100644 --- a/extension/module/test/bundled_module_test.cpp +++ b/extension/module/test/bundled_module_test.cpp @@ -9,7 +9,7 @@ #include #include -using namespace ::executorch::extension; +using namespace ::executorch::extension::ET_BUNDLED_MODULE_NAMESPACE; using namespace ::executorch::runtime; class BundledModuleTest : public ::testing::Test { From d62f01f80e0235796e81e9fde5b8f7ed5b9f5653 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Wed, 11 Jun 2025 21:17:05 -0700 Subject: [PATCH 2/2] [ExecuTorch][#10447] Extend `PyBundledModule` with `extension.BundledModule` Pull Request resolved: https://github.com/pytorch/executorch/pull/11565 # Context This issue is a step of https://github.com/pytorch/executorch/discussions/9638. In https://github.com/pytorch/executorch/discussions/9638, we want to have `extension.Module` as the single source of implementation in `pybindings`, which means that `pybindings.PyModule` should use `extension.Module` rather than its own `pybindings.Module`. # Proposal Now that we have `extension.BundledModule` ready, we want to test it out by having our existing `PyBundledModule` to extend it, and let `verify_result_with_bundled_expected_output` to use it, so that we can test out the whole thing with https://github.com/pytorch/executorch/blob/fb45e19055a92d2a91a4d4b7008e135232cbb14b/devtools/bundled_program/test/test_end2end.py ghstack-source-id: 289817714 ghstack-source-id: 289817714 @exported-using-ghexport Differential Revision: [D73564127](https://our.internmc.facebook.com/intern/diff/D73564127/) --- CMakeLists.txt | 8 + devtools/bundled_program/test/test_end2end.py | 31 +--- extension/pybindings/README.md | 3 +- extension/pybindings/pybindings.cpp | 172 +++++++++--------- .../extension/pybindings/pybindings.bzl | 4 + 5 files changed, 96 insertions(+), 122 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c7ed7b1fcb1..3a9bb0f4027 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -579,6 +579,14 @@ if(EXECUTORCH_BUILD_PYBIND) torch ) + if(EXECUTORCH_BUILD_EXTENSION_MODULE) + if(CMAKE_TOOLCHAIN_IOS OR CMAKE_TOOLCHAIN_ANDROID OR APPLE) + list(APPEND _dep_libs extension_module_static) + else() + list(APPEND _dep_libs extension_module) + endif() + endif() + if(EXECUTORCH_BUILD_TESTS) list(APPEND _dep_libs test_backend_compiler_lib) endif() diff --git a/devtools/bundled_program/test/test_end2end.py b/devtools/bundled_program/test/test_end2end.py index 7cee073be0e..3268a0df19a 100644 --- a/devtools/bundled_program/test/test_end2end.py +++ b/devtools/bundled_program/test/test_end2end.py @@ -5,21 +5,7 @@ # LICENSE file in the root directory of this source tree. # flake8: noqa: F401 -import functools -import inspect -import os -import random import unittest -from typing import Callable, Dict, Optional, Tuple, Type - -import executorch.exir as exir - -import executorch.exir.control_flow as control_flow - -# @manual=//executorch/extension/pytree:pybindings -import executorch.extension.pytree as pytree - -import torch from executorch.devtools.bundled_program.core import BundledProgram from executorch.devtools.bundled_program.serialize import ( @@ -35,8 +21,6 @@ try: from executorch.extension.pybindings.portable_lib import ( _load_bundled_program_from_buffer, - _load_for_executorch_from_buffer, - _load_for_executorch_from_bundled_program, ) kernel_mode = "lean" @@ -47,8 +31,6 @@ try: from executorch.extension.pybindings.aten_lib import ( # @manual=//executorch/extension/pybindings:aten_lib _load_bundled_program_from_buffer, - _load_for_executorch_from_buffer, - _load_for_executorch_from_bundled_program, ) assert kernel_mode is None @@ -75,19 +57,8 @@ def test_sample_model_e2e(self): bundled_program_buffer ) - executorch_module = _load_for_executorch_from_bundled_program( - executorch_bundled_program - ) - for method_name in eager_model.method_names: - executorch_module.load_bundled_input( - executorch_bundled_program, - method_name, - 0, - ) - executorch_module.plan_execute(method_name) - executorch_module.verify_result_with_bundled_expected_output( - executorch_bundled_program, + executorch_bundled_program.verify_result_with_bundled_expected_output( method_name, 0, ) diff --git a/extension/pybindings/README.md b/extension/pybindings/README.md index 2cd680e7bb9..4a663a69b49 100644 --- a/extension/pybindings/README.md +++ b/extension/pybindings/README.md @@ -27,8 +27,6 @@ CMAKE_ARGS="-DEXECUTORCH_BUILD_MPS=ON" ./install_executorch.sh - `_reset_profile_results()`: Reset profile results. ## Classes ### ExecuTorchModule -- `load_bundled_input()`: Load bundled input. -- `verify_result_with_bundled_expected_output(bundle: str, method_name: str, testset_idx: int, rtol: float = 1e-5, atol: float = 1e-8)`: Verify result with bundled expected output. - `plan_execute()`: Plan and execute. - `run_method()`: Run method. - `forward()`: Forward. This takes a pytree-flattend PyTorch-tensor-based input. @@ -37,5 +35,6 @@ CMAKE_ARGS="-DEXECUTORCH_BUILD_MPS=ON" ./install_executorch.sh - `__call__()`: Call method. ### BundledModule This class is currently empty and serves as a placeholder for future methods and attributes. +- `verify_result_with_bundled_expected_output(method_name: str, testset_idx: int, rtol: float = 1e-5, atol: float = 1e-8)`: Verify result with bundled expected output. ## Note All functions and methods are guarded by a call guard that redirects `cout` and `cerr` to the Python environment. diff --git a/extension/pybindings/pybindings.cpp b/extension/pybindings/pybindings.cpp index 9cd5adae679..542c7339314 100644 --- a/extension/pybindings/pybindings.cpp +++ b/extension/pybindings/pybindings.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -96,6 +97,7 @@ using ::executorch::ET_RUNTIME_NAMESPACE::Program; using ::executorch::extension::BufferDataLoader; using ::executorch::extension::MallocMemoryAllocator; using ::executorch::extension::MmapDataLoader; +using ::executorch::extension::ET_BUNDLED_MODULE_NAMESPACE::BundledModule; using ::executorch::runtime::ArrayRef; using ::executorch::runtime::DataLoader; using ::executorch::runtime::Error; @@ -440,13 +442,54 @@ inline std::unique_ptr load_module_from_file( program_verification); } +inline py::list get_outputs_as_py_list( + const std::vector& outputs, + bool clone_outputs = true) { + const auto outputs_size = outputs.size(); + py::list list(outputs_size); + for (size_t i = 0; i < outputs_size; ++i) { + auto& v = outputs[i]; + if (Tag::None == v.tag) { + list[i] = py::none(); + } else if (Tag::Int == v.tag) { + list[i] = py::cast(v.toInt()); + } else if (Tag::Double == v.tag) { + list[i] = py::cast(v.toDouble()); + } else if (Tag::Bool == v.tag) { + list[i] = py::cast(v.toBool()); + } else if (Tag::String == v.tag) { + list[i] = py::cast(std::string(v.toString().data())); + } else if (Tag::Tensor == v.tag) { +#ifdef USE_ATEN_LIB + // Clone so the outputs in python do not share a lifetime with the + // module object + if (clone_outputs) { + list[i] = py::cast(v.toTensor().clone()); + } else { + list[i] = py::cast(v.toTensor()); + } +#else + if (clone_outputs) { + list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone()); + } else { + list[i] = py::cast(alias_attensor_to_etensor(v.toTensor())); + } +#endif + } else { + ET_ASSERT_UNREACHABLE_MSG("Invalid model output type"); + } + } + return list; +} + static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U; -struct PyBundledModule final { +struct PyBundledModule : public BundledModule { explicit PyBundledModule( const py::bytes& buffer, uint32_t bundled_input_pool_size) - : bundled_program_ptr_(buffer), + : BundledModule(buffer.cast().data()), + bundled_program_ptr_(buffer), program_ptr_(static_cast( bundled_program_flatbuffer::GetBundledProgram( get_bundled_program_ptr()) @@ -475,6 +518,33 @@ struct PyBundledModule final { return program_len_; } + py::list verify_result_with_bundled_expected_output( + const std::string& method_name, + size_t testset_idx, + double rtol = 1e-5, + double atol = 1e-8) { + // Execute the method + auto result = BundledModule::execute(method_name, testset_idx); + if (!result.ok()) { + THROW_IF_ERROR( + result.error(), + "Method execution failed with status 0x%" PRIx32, + static_cast(result.error())); + } + + // Convert outputs to py::list + const auto& outputs = result.get(); + py::list py_outputs = get_outputs_as_py_list(outputs); + + Error status = BundledModule::verify_method_outputs( + method_name, testset_idx, rtol, atol); + THROW_IF_ERROR( + status, + "Result verification failed with status %" PRIu32, + static_cast(status)); + return py_outputs; + } + private: // Store the bytes object instead of a raw pointer so that this module will // keep the bytes alive. @@ -831,43 +901,6 @@ struct PyModule final { } } - void load_bundled_input( - PyBundledModule& m, - const std::string method_name, - size_t testset_idx) { - const void* bundled_program_ptr = m.get_bundled_program_ptr(); - Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input( - module_->get_method(method_name), bundled_program_ptr, testset_idx); - THROW_IF_ERROR( - status, - "load_bundled_input failed with status 0x%" PRIx32, - static_cast(status)); - } - - py::list verify_result_with_bundled_expected_output( - PyBundledModule& m, - const std::string method_name, - size_t testset_idx, - double rtol = 1e-5, - double atol = 1e-8) { - const void* bundled_program_ptr = m.get_bundled_program_ptr(); - auto& method = module_->get_method(method_name); - Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input( - method, bundled_program_ptr, testset_idx); - THROW_IF_ERROR( - status, - "load_bundled_input failed with status 0x%" PRIx32, - static_cast(status)); - py::list outputs = plan_execute(method_name); - status = executorch::BUNDLED_PROGRAM_NAMESPACE::verify_method_outputs( - method, bundled_program_ptr, testset_idx, rtol, atol); - THROW_IF_ERROR( - status, - "Result verification failed with status %" PRIu32, - static_cast(status)); - return outputs; - } - py::list plan_execute( const std::string method_name, bool clone_outputs = true) { @@ -890,46 +923,6 @@ struct PyModule final { return get_outputs_as_py_list(outputs, clone_outputs); } - py::list get_outputs_as_py_list( - const std::vector& outputs, - bool clone_outputs = true) { - const auto outputs_size = outputs.size(); - py::list list(outputs_size); - for (size_t i = 0; i < outputs_size; ++i) { - auto& v = outputs[i]; - if (Tag::None == v.tag) { - list[i] = py::none(); - } else if (Tag::Int == v.tag) { - list[i] = py::cast(v.toInt()); - } else if (Tag::Double == v.tag) { - list[i] = py::cast(v.toDouble()); - } else if (Tag::Bool == v.tag) { - list[i] = py::cast(v.toBool()); - } else if (Tag::String == v.tag) { - list[i] = py::cast(std::string(v.toString().data())); - } else if (Tag::Tensor == v.tag) { -#ifdef USE_ATEN_LIB - // Clone so the outputs in python do not share a lifetime with the - // module object - if (clone_outputs) { - list[i] = py::cast(v.toTensor().clone()); - } else { - list[i] = py::cast(v.toTensor()); - } -#else - if (clone_outputs) { - list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone()); - } else { - list[i] = py::cast(alias_attensor_to_etensor(v.toTensor())); - } -#endif - } else { - ET_ASSERT_UNREACHABLE_MSG("Invalid model output type"); - } - } - return list; - } - std::unique_ptr method_meta(const std::string method_name) { auto& method = module_->get_method(method_name); return std::make_unique(module_, method.method_meta()); @@ -1089,16 +1082,6 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) { call_guard); py::class_(m, "ExecuTorchModule") - .def("load_bundled_input", &PyModule::load_bundled_input, call_guard) - .def( - "verify_result_with_bundled_expected_output", - &PyModule::verify_result_with_bundled_expected_output, - py::arg("bundle"), - py::arg("method_name"), - py::arg("testset_idx"), - py::arg("rtol") = 1e-5, - py::arg("atol") = 1e-8, - call_guard) .def( "plan_execute", &PyModule::plan_execute, @@ -1144,7 +1127,16 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) { py::arg("clone_outputs") = true, call_guard); - py::class_(m, "BundledModule"); + py::class_(m, "BundledModule") + .def( + "verify_result_with_bundled_expected_output", + &PyBundledModule::verify_result_with_bundled_expected_output, + py::arg("method_name"), + py::arg("testset_idx"), + py::arg("rtol") = 1e-5, + py::arg("atol") = 1e-8, + call_guard); + py::class_(m, "TensorInfo") .def("sizes", &PyTensorInfo::sizes, call_guard) .def("dtype", &PyTensorInfo::dtype, call_guard) diff --git a/shim_et/xplat/executorch/extension/pybindings/pybindings.bzl b/shim_et/xplat/executorch/extension/pybindings/pybindings.bzl index 1616304c3ea..62020a2c22d 100644 --- a/shim_et/xplat/executorch/extension/pybindings/pybindings.bzl +++ b/shim_et/xplat/executorch/extension/pybindings/pybindings.bzl @@ -16,6 +16,8 @@ PORTABLE_MODULE_DEPS = [ "//executorch/extension/data_loader:buffer_data_loader", "//executorch/extension/data_loader:mmap_data_loader", "//executorch/extension/memory_allocator:malloc_memory_allocator", + "//executorch/extension/module:module", + "//executorch/extension/module:bundled_module", "//executorch/runtime/executor/test:test_backend_compiler_lib", "//executorch/devtools/etdump:etdump_flatcc", ] + get_all_cpu_backend_targets() @@ -28,6 +30,8 @@ ATEN_MODULE_DEPS = [ "//executorch/extension/data_loader:buffer_data_loader", "//executorch/extension/data_loader:mmap_data_loader", "//executorch/extension/memory_allocator:malloc_memory_allocator", + "//executorch/extension/module:module_aten", + "//executorch/extension/module:bundled_module_aten", "//executorch/devtools/bundled_program:runtime_aten", "//executorch/runtime/executor/test:test_backend_compiler_lib_aten", "//executorch/devtools/etdump:etdump_flatcc",