Skip to content
Merged
31 changes: 1 addition & 30 deletions devtools/bundled_program/test/test_end2end.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,7 @@
# LICENSE file in the root directory of this source tree.

# flake8: noqa: F401
import functools
import inspect
import os
import random
import unittest
from typing import Callable, Dict, Optional, Tuple, Type

import executorch.exir as exir

import executorch.exir.control_flow as control_flow

# @manual=//executorch/extension/pytree:pybindings
import executorch.extension.pytree as pytree

import torch

from executorch.devtools.bundled_program.core import BundledProgram
from executorch.devtools.bundled_program.serialize import (
Expand All @@ -35,8 +21,6 @@
try:
from executorch.extension.pybindings.portable_lib import (
_load_bundled_program_from_buffer,
_load_for_executorch_from_buffer,
_load_for_executorch_from_bundled_program,
)

kernel_mode = "lean"
Expand All @@ -47,8 +31,6 @@
try:
from executorch.extension.pybindings.aten_lib import ( # @manual=//executorch/extension/pybindings:aten_lib
_load_bundled_program_from_buffer,
_load_for_executorch_from_buffer,
_load_for_executorch_from_bundled_program,
)

assert kernel_mode is None
Expand All @@ -75,19 +57,8 @@ def test_sample_model_e2e(self):
bundled_program_buffer
)

executorch_module = _load_for_executorch_from_bundled_program(
executorch_bundled_program
)

for method_name in eager_model.method_names:
executorch_module.load_bundled_input(
executorch_bundled_program,
method_name,
0,
)
executorch_module.plan_execute(method_name)
executorch_module.verify_result_with_bundled_expected_output(
executorch_bundled_program,
executorch_bundled_program.verify_result_with_bundled_expected_output(
method_name,
0,
)
171 changes: 80 additions & 91 deletions extension/pybindings/pybindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <executorch/extension/data_loader/buffer_data_loader.h>
#include <executorch/extension/data_loader/mmap_data_loader.h>
#include <executorch/extension/memory_allocator/malloc_memory_allocator.h>
#include <executorch/extension/module/bundled_module.h>
#include <executorch/extension/threadpool/threadpool.h>
#include <executorch/runtime/backend/interface.h>
#include <executorch/runtime/core/data_loader.h>
Expand Down Expand Up @@ -441,13 +442,54 @@ inline std::unique_ptr<Module> load_module_from_file(
program_verification);
}

inline py::list get_outputs_as_py_list(
const std::vector<EValue>& outputs,
bool clone_outputs = true) {
const auto outputs_size = outputs.size();
py::list list(outputs_size);
for (size_t i = 0; i < outputs_size; ++i) {
auto& v = outputs[i];
if (Tag::None == v.tag) {
list[i] = py::none();
} else if (Tag::Int == v.tag) {
list[i] = py::cast(v.toInt());
} else if (Tag::Double == v.tag) {
list[i] = py::cast(v.toDouble());
} else if (Tag::Bool == v.tag) {
list[i] = py::cast(v.toBool());
} else if (Tag::String == v.tag) {
list[i] = py::cast(std::string(v.toString().data()));
} else if (Tag::Tensor == v.tag) {
#ifdef USE_ATEN_LIB
// Clone so the outputs in python do not share a lifetime with the
// module object
if (clone_outputs) {
list[i] = py::cast(v.toTensor().clone());
} else {
list[i] = py::cast(v.toTensor());
}
#else
if (clone_outputs) {
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
} else {
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
}
#endif
} else {
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
}
}
return list;
}

static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U;

struct PyBundledModule final {
struct PyBundledModule : public BundledModule {
explicit PyBundledModule(
const py::bytes& buffer,
uint32_t bundled_input_pool_size)
: bundled_program_ptr_(buffer),
: BundledModule(buffer.cast<std::string_view>().data()),
bundled_program_ptr_(buffer),
program_ptr_(static_cast<const void*>(
bundled_program_flatbuffer::GetBundledProgram(
get_bundled_program_ptr())
Expand Down Expand Up @@ -476,6 +518,32 @@ struct PyBundledModule final {
return program_len_;
}

py::list verify_result_with_bundled_expected_output(
const std::string& method_name,
size_t testset_idx,
double rtol = 1e-5,
double atol = 1e-8) {
// Execute the method
auto result = BundledModule::execute(method_name, testset_idx);
if (!result.ok()) {
THROW_IF_ERROR(
result.error(),
"Method execution failed with status 0x%" PRIx32,
static_cast<uint32_t>(result.error()));
}

// Convert outputs to py::list
const auto& outputs = result.get();
py::list py_outputs = get_outputs_as_py_list(outputs);

Error status = BundledModule::verify_method_outputs(method_name, testset_idx, rtol, atol);
THROW_IF_ERROR(
status,
"Result verification failed with status %" PRIu32,
static_cast<uint32_t>(status));
return py_outputs;
}

private:
// Store the bytes object instead of a raw pointer so that this module will
// keep the bytes alive.
Expand Down Expand Up @@ -792,7 +860,7 @@ struct PyModule final {
}

py::list forward_single_input(
const torch::Tensor& inputTensor,
const torch::Tensor& inputTensor,
bool clone_outputs = true) {
py::list py_list;
py_list.append(py::cast(inputTensor));
Expand Down Expand Up @@ -832,43 +900,6 @@ struct PyModule final {
}
}

void load_bundled_input(
PyBundledModule& m,
const std::string method_name,
size_t testset_idx) {
const void* bundled_program_ptr = m.get_bundled_program_ptr();
Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input(
module_->get_method(method_name), bundled_program_ptr, testset_idx);
THROW_IF_ERROR(
status,
"load_bundled_input failed with status 0x%" PRIx32,
static_cast<uint32_t>(status));
}

py::list verify_result_with_bundled_expected_output(
PyBundledModule& m,
const std::string method_name,
size_t testset_idx,
double rtol = 1e-5,
double atol = 1e-8) {
const void* bundled_program_ptr = m.get_bundled_program_ptr();
auto& method = module_->get_method(method_name);
Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input(
method, bundled_program_ptr, testset_idx);
THROW_IF_ERROR(
status,
"load_bundled_input failed with status 0x%" PRIx32,
static_cast<uint32_t>(status));
py::list outputs = plan_execute(method_name);
status = executorch::BUNDLED_PROGRAM_NAMESPACE::verify_method_outputs(
method, bundled_program_ptr, testset_idx, rtol, atol);
THROW_IF_ERROR(
status,
"Result verification failed with status %" PRIu32,
static_cast<uint32_t>(status));
return outputs;
}

py::list plan_execute(
const std::string method_name,
bool clone_outputs = true) {
Expand All @@ -891,46 +922,6 @@ struct PyModule final {
return get_outputs_as_py_list(outputs, clone_outputs);
}

py::list get_outputs_as_py_list(
const std::vector<EValue>& outputs,
bool clone_outputs = true) {
const auto outputs_size = outputs.size();
py::list list(outputs_size);
for (size_t i = 0; i < outputs_size; ++i) {
auto& v = outputs[i];
if (Tag::None == v.tag) {
list[i] = py::none();
} else if (Tag::Int == v.tag) {
list[i] = py::cast(v.toInt());
} else if (Tag::Double == v.tag) {
list[i] = py::cast(v.toDouble());
} else if (Tag::Bool == v.tag) {
list[i] = py::cast(v.toBool());
} else if (Tag::String == v.tag) {
list[i] = py::cast(std::string(v.toString().data()));
} else if (Tag::Tensor == v.tag) {
#ifdef USE_ATEN_LIB
// Clone so the outputs in python do not share a lifetime with the
// module object
if (clone_outputs) {
list[i] = py::cast(v.toTensor().clone());
} else {
list[i] = py::cast(v.toTensor());
}
#else
if (clone_outputs) {
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
} else {
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
}
#endif
} else {
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
}
}
return list;
}

std::unique_ptr<PyMethodMeta> method_meta(const std::string method_name) {
auto& method = module_->get_method(method_name);
return std::make_unique<PyMethodMeta>(module_, method.method_meta());
Expand Down Expand Up @@ -1090,16 +1081,6 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
call_guard);

py::class_<PyModule>(m, "ExecuTorchModule")
.def("load_bundled_input", &PyModule::load_bundled_input, call_guard)
.def(
"verify_result_with_bundled_expected_output",
&PyModule::verify_result_with_bundled_expected_output,
py::arg("bundle"),
py::arg("method_name"),
py::arg("testset_idx"),
py::arg("rtol") = 1e-5,
py::arg("atol") = 1e-8,
call_guard)
.def(
"plan_execute",
&PyModule::plan_execute,
Expand Down Expand Up @@ -1145,7 +1126,15 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
py::arg("clone_outputs") = true,
call_guard);

py::class_<PyBundledModule>(m, "BundledModule");
py::class_<PyBundledModule>(m, "BundledModule").def(
"verify_result_with_bundled_expected_output",
&PyBundledModule::verify_result_with_bundled_expected_output,
py::arg("method_name"),
py::arg("testset_idx"),
py::arg("rtol") = 1e-5,
py::arg("atol") = 1e-8,
call_guard);

py::class_<PyTensorInfo>(m, "TensorInfo")
.def("sizes", &PyTensorInfo::sizes, call_guard)
.def("dtype", &PyTensorInfo::dtype, call_guard)
Expand Down
2 changes: 2 additions & 0 deletions shim_et/xplat/executorch/extension/pybindings/pybindings.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ PORTABLE_MODULE_DEPS = [
"//executorch/extension/data_loader:buffer_data_loader",
"//executorch/extension/data_loader:mmap_data_loader",
"//executorch/extension/memory_allocator:malloc_memory_allocator",
"//executorch/extension/module:bundled_module",
"//executorch/runtime/executor/test:test_backend_compiler_lib",
"//executorch/devtools/etdump:etdump_flatcc",
] + get_all_cpu_backend_targets()
Expand All @@ -28,6 +29,7 @@ ATEN_MODULE_DEPS = [
"//executorch/extension/data_loader:buffer_data_loader",
"//executorch/extension/data_loader:mmap_data_loader",
"//executorch/extension/memory_allocator:malloc_memory_allocator",
"//executorch/extension/module:bundled_module_aten",
"//executorch/devtools/bundled_program:runtime_aten",
"//executorch/runtime/executor/test:test_backend_compiler_lib_aten",
"//executorch/devtools/etdump:etdump_flatcc",
Expand Down
Loading