Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
using executorch::runtime::get_backend_class;
using executorch::runtime::Result;
using executorch::aten::SizesType;
using executorch::runtime::Span;
using executorch::aten::Tensor;
using executorch::runtime::kTensorDimensionLimit;

Expand Down Expand Up @@ -197,7 +198,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {

Error CoreMLBackendDelegate::execute(BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const {
Span<EValue*> args) const {
const auto& nArgs = impl_->get_num_arguments(handle);
std::vector<MultiArray> delegate_args;
size_t nInputs = nArgs.first;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
/// @retval On success, `Error::Ok` otherwise any other `Error` case.
executorch::runtime::Error execute(executorch::runtime::BackendExecutionContext& context,
executorch::runtime::DelegateHandle* handle,
executorch::runtime::EValue** args) const override;
executorch::runtime::Span<executorch::runtime::EValue*> args) const override;

/// Returns `true` if the delegate is available otherwise `false`.
bool is_available() const override;
Expand Down
3 changes: 2 additions & 1 deletion backends/apple/mps/runtime/MPSBackend.mm
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
using executorch::runtime::Error;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::Result;
using executorch::runtime::Span;

class MPSBackend final : public ::executorch::runtime::BackendInterface {
public:
Expand Down Expand Up @@ -72,7 +73,7 @@ bool is_available() const override {
Error execute(
ET_UNUSED BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
auto executor = static_cast<mps::delegate::MPSExecutor*>(handle);
std::vector<const Tensor*> input_pointers;
std::vector<const Tensor*> output_pointers;
Expand Down
3 changes: 2 additions & 1 deletion backends/arm/runtime/EthosUBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::MemoryAllocator;
using executorch::runtime::Result;
using executorch::runtime::Span;

#define ETHOSU_NUM_BASE_ADDRS 3

Expand Down Expand Up @@ -140,7 +141,7 @@ class EthosUBackend final : public ::executorch::runtime::BackendInterface {
Error execute(
BackendExecutionContext& context,
DelegateHandle* input_handle,
EValue** args) const override {
Span<EValue*> args) const override {
#if defined(ET_EVENT_TRACER_ENABLED)
EventTracer* event_tracer = context.event_tracer();
EventTracerEntry event_tracer_local_scope;
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/runtime/VGFBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ class VGFBackend final : public ::executorch::runtime::BackendInterface {
Error execute(
ET_UNUSED BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
VgfRepr* repr = static_cast<VgfRepr*>(handle);

// Copy all inputs from EValue to VkDeviceMemory
Expand Down
9 changes: 5 additions & 4 deletions backends/mediatek/runtime/NeuronBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::MemoryAllocator;
using executorch::runtime::Result;
using executorch::runtime::Span;

const char kHighAddrKey[] = "HighAddr";
const char kImportForeverKey[] = "ImportForever";
Expand Down Expand Up @@ -86,7 +87,7 @@ Result<DelegateHandle*> NeuronBackend::init(
Error NeuronBackend::execute(
ET_UNUSED BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const {
Span<EValue*> args) const {
NeuronExecuTorchDelegate* delegate =
reinterpret_cast<NeuronExecuTorchDelegate*>(handle);
return delegate->execute(context, args);
Expand All @@ -106,7 +107,7 @@ bool NeuronBackend::is_available() const {

Error NeuronExecuTorchDelegate::execute(
BackendExecutionContext& context,
EValue** args) const {
Span<EValue*> args) const {
if (HintNeuronBackend(args) != NEURON_NO_ERROR) {
return Error::InvalidState;
};
Expand Down Expand Up @@ -163,8 +164,8 @@ Error NeuronExecuTorchDelegate::execute(
: Error::InvalidState;
};

int NeuronExecuTorchDelegate::HintNeuronBackend(EValue** args) const {
auto HintImportForever = [this](EValue** args) -> int {
int NeuronExecuTorchDelegate::HintNeuronBackend(Span<EValue*> args) const {
auto HintImportForever = [this](Span<EValue*> args) -> int {
auto& allocator = GET_NEURON_ALLOCATOR;
size_t inputCount = mInputSizes.size(), outputCount = mOutputSizes.size();
for (int i = 0; i < inputCount; i++) {
Expand Down
8 changes: 5 additions & 3 deletions backends/mediatek/runtime/include/NeuronBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ class NeuronBackend final : public ::executorch::runtime::BackendInterface {
::executorch::runtime::Error execute(
ET_UNUSED ::executorch::runtime::BackendExecutionContext& context,
::executorch::runtime::DelegateHandle* handle,
::executorch::runtime::EValue** args) const override;
::executorch::runtime::Span<::executorch::runtime::EValue*> args)
const override;

void destroy(::executorch::runtime::DelegateHandle* handle) const override;

Expand Down Expand Up @@ -115,7 +116,7 @@ class NeuronExecuTorchDelegate {

::executorch::runtime::Error execute(
ET_UNUSED ::executorch::runtime::BackendExecutionContext& context,
::executorch::runtime::EValue** args) const;
::executorch::runtime::Span<::executorch::runtime::EValue*> args) const;

private:
template <bool isInput>
Expand Down Expand Up @@ -148,7 +149,8 @@ class NeuronExecuTorchDelegate {
return NEURON_NO_ERROR;
}

int HintNeuronBackend(::executorch::runtime::EValue** args) const;
int HintNeuronBackend(
::executorch::runtime::Span<::executorch::runtime::EValue*> args) const;

private:
std::vector<size_t> mInputSizes;
Expand Down
2 changes: 1 addition & 1 deletion backends/nxp/runtime/NeutronBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ class NeutronBackend final : public PyTorchBackendInterface {
Error execute(
BackendExecutionContext& context,
DelegateHandle* input_handle,
EValue** args) const override {
Span<EValue*> args) const override {
NeutronConfig* cfg = static_cast<NeutronConfig*>(input_handle);

// Allocate place for input and output pointers.
Expand Down
2 changes: 1 addition & 1 deletion backends/openvino/runtime/OpenvinoBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ exr::Result<exr::DelegateHandle*> OpenvinoBackend::init(
exr::Error OpenvinoBackend::execute(
exr::BackendExecutionContext& context,
exr::DelegateHandle* input_handle,
exr::EValue** args) const {
exr::Span<exr::EValue*> args) const {
ExecutionHandle* execution_handle = (ExecutionHandle*)input_handle;

auto infer_request = execution_handle->infer_request;
Expand Down
2 changes: 1 addition & 1 deletion backends/openvino/runtime/OpenvinoBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class OpenvinoBackend final : public ::exr::BackendInterface {
exr::Error execute(
exr::BackendExecutionContext& context,
exr::DelegateHandle* input_handle,
exr::EValue** args) const override;
exr::Span<exr::EValue*> args) const override;
void destroy(exr::DelegateHandle* handle) const override;

private:
Expand Down
3 changes: 2 additions & 1 deletion backends/qualcomm/runtime/QnnExecuTorchBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::MemoryAllocator;
using executorch::runtime::Result;
using executorch::runtime::Span;

// ========== Public method implementations =========================
constexpr const char* QNN_COMPILE_SPEC = "qnn_compile_spec";
Expand Down Expand Up @@ -116,7 +117,7 @@ Result<DelegateHandle*> QnnExecuTorchBackend::init(
Error QnnExecuTorchBackend::execute(
BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const {
Span<EValue*> args) const {
ET_CHECK_OR_RETURN_ERROR(
delegate_map_rev_.count(handle) != 0,
Internal,
Expand Down
3 changes: 2 additions & 1 deletion backends/qualcomm/runtime/QnnExecuTorchBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ class QnnExecuTorchBackend final
executorch::runtime::Error execute(
ET_UNUSED executorch::runtime::BackendExecutionContext& context,
executorch::runtime::DelegateHandle* handle,
executorch::runtime::EValue** args) const override;
executorch::runtime::Span<executorch::runtime::EValue*> args)
const override;

ET_NODISCARD executorch::runtime::Error set_option(
executorch::runtime::BackendOptionContext& context,
Expand Down
3 changes: 2 additions & 1 deletion backends/vulkan/runtime/VulkanBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::kTensorDimensionLimit;
using executorch::runtime::Result;
using executorch::runtime::Span;

using namespace vkcompute;

Expand Down Expand Up @@ -547,7 +548,7 @@ class VulkanBackend final : public ::executorch::runtime::BackendInterface {
Error execute(
ET_UNUSED BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
EXECUTORCH_SCOPE_PROF("VulkanBackend::execute");

ComputeGraph* compute_graph = static_cast<ComputeGraph*>(handle);
Expand Down
5 changes: 3 additions & 2 deletions backends/xnnpack/runtime/XNNExecutor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::is_contiguous_dim_order;
using executorch::runtime::kTensorDimensionLimit;
using executorch::runtime::Span;

/**
* Initializes the XNNExecutor with the runtime and given number of
Expand Down Expand Up @@ -69,7 +70,7 @@ ET_NODISCARD Error XNNExecutor::initialize(
* runtime correspond to their index in the list of arg passed into
* delegate->execute()
*/
ET_NODISCARD Error XNNExecutor::prepare_args(EValue** args) {
ET_NODISCARD Error XNNExecutor::prepare_args(Span<EValue*> args) {
ET_CHECK_OR_RETURN_ERROR(
runtime_ != nullptr,
Internal,
Expand Down Expand Up @@ -196,7 +197,7 @@ ET_NODISCARD Error XNNExecutor::forward(BackendExecutionContext& context) {
* XNNPACK gives the index tensor to us as int32, we need to convert it
* back to int64 for ExecuTorch.
*/
ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const {
ET_NODISCARD Error XNNExecutor::resize_outputs(Span<EValue*> args) const {
size_t output_idx_start = input_ids_.size();
for (size_t i = output_idx_start; i < externals_.size(); ++i) {
uint32_t ext_id = externals_[i].id;
Expand Down
4 changes: 2 additions & 2 deletions backends/xnnpack/runtime/XNNExecutor.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class XNNExecutor {
* any additional memory planning as needed
*/
ET_NODISCARD executorch::runtime::Error prepare_args(
executorch::runtime::EValue** args);
executorch::runtime::Span<executorch::runtime::EValue*> args);

/**
* Executes the graph using the args prepared at prepare_args().
Expand All @@ -83,7 +83,7 @@ class XNNExecutor {
* Performs any post processing of outputs like tensor resizing
*/
ET_NODISCARD executorch::runtime::Error resize_outputs(
executorch::runtime::EValue** args) const;
executorch::runtime::Span<executorch::runtime::EValue*> args) const;

friend class XNNCompiler;
};
Expand Down
3 changes: 2 additions & 1 deletion backends/xnnpack/runtime/XNNPACKBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::Result;
using executorch::runtime::Span;

class XnnpackBackend final
: public ::executorch::ET_RUNTIME_NAMESPACE::BackendInterface {
Expand Down Expand Up @@ -126,7 +127,7 @@ class XnnpackBackend final
Error execute(
BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
auto executor = static_cast<xnnpack::delegate::XNNExecutor*>(handle);

#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
Expand Down
4 changes: 3 additions & 1 deletion backends/xnnpack/test/runtime/test_xnnexecutor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
using executorch::backends::xnnpack::delegate::XNNExecutor;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::Span;
using executorch::runtime::testing::TensorFactory;

TEST(XNNExecutorTest, ArgumentWithTooManyDimensions) {
Expand Down Expand Up @@ -90,6 +91,7 @@ TEST(XNNExecutorTest, ArgumentWithTooManyDimensions) {
EValue input_ev(input_tensor);
EValue output_ev(output_tensor);
std::array<EValue*, 2> args = {&input_ev, &output_ev};
Span<EValue*> stack_args(args.data(), 2);
// Check for invalid number of dimensions should fail without stack overflow.
EXPECT_EQ(executor.prepare_args(args.data()), Error::InvalidArgument);
EXPECT_EQ(executor.prepare_args(stack_args), Error::InvalidArgument);
}
2 changes: 1 addition & 1 deletion codegen/api/unboxing.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class Unboxing:
Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing.
A sample generated code:
// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
void mul_out(EValue** stack) {
void mul_out(Span<EValue*> stack) {
EValue& self = *stack[0];
EValue& other = *stack[1];
EValue& out = *stack[2];
Expand Down
2 changes: 1 addition & 1 deletion docs/source/compiler-delegate-and-partitioner.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ ET_NODISCARD virtual Result<DelegateHandle*> init(
ET_NODISCARD virtual Error execute(
BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args);
Span<EValue*> args);

// [optional] Runtime destroy. Destroy the resource held by the backend
virtual void destroy(ET_UNUSED DelegateHandle* handle);
Expand Down
2 changes: 1 addition & 1 deletion exir/backend/test/demos/rpc/ExecutorBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ class ExecutorBackend final : public ::executorch::runtime::BackendInterface {
Error execute(
ET_UNUSED BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
Method* client_method = static_cast<Method*>(handle);
auto num_inputs = client_method->inputs_size();
Error status = Error::Ok;
Expand Down
2 changes: 1 addition & 1 deletion runtime/backend/interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class BackendInterface {
ET_NODISCARD virtual Error execute(
BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const = 0;
Span<EValue*> args) const = 0;

/**
* Responsible update the backend status, if any. The backend options are
Expand Down
9 changes: 5 additions & 4 deletions runtime/backend/test/backend_interface_update_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ using executorch::runtime::FreeableBuffer;
using executorch::runtime::get_backend_class;
using executorch::runtime::MemoryAllocator;
using executorch::runtime::Result;
using executorch::runtime::Span;

class MockBackend : public BackendInterface {
public:
Expand All @@ -50,7 +51,7 @@ class MockBackend : public BackendInterface {
Error execute(
__ET_UNUSED BackendExecutionContext& context,
__ET_UNUSED DelegateHandle* handle,
__ET_UNUSED EValue** args) const override {
__ET_UNUSED Span<EValue*> args) const override {
execute_count++;
return Error::Ok;
}
Expand Down Expand Up @@ -243,7 +244,7 @@ TEST_F(BackendInterfaceUpdateTest, UpdateAfterInitBeforeExecute) {

// Now execute
DelegateHandle* handle = handle_or_error.get();
EValue** args = nullptr; // Not used in mock
Span<EValue*> args((EValue**)nullptr, (size_t)0); // Not used in mock
err = mock_backend->execute(execute_context, handle, args);
EXPECT_EQ(err, Error::Ok);

Expand All @@ -269,7 +270,7 @@ TEST_F(BackendInterfaceUpdateTest, UpdateBetweenExecutes) {
DelegateHandle* handle = handle_or_error.get();

// First execute
EValue** args = nullptr;
Span<EValue*> args((EValue**)nullptr, (size_t)0); // Not used in mock
Error err = mock_backend->execute(execute_context, handle, args);
EXPECT_EQ(err, Error::Ok);

Expand Down Expand Up @@ -308,7 +309,7 @@ class StubBackend : public BackendInterface {
Error execute(
BackendExecutionContext& context,
DelegateHandle* handle,
EValue** args) const override {
Span<EValue*> args) const override {
return Error::Ok;
}

Expand Down
4 changes: 2 additions & 2 deletions runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class BackendDelegate final {

Error Execute(
BackendExecutionContext& backend_execution_context,
EValue** args) const {
Span<EValue*> args) const {
EXECUTORCH_SCOPE_PROF("delegate_execute");
return backend_->execute(backend_execution_context, handle_, args);
}
Expand Down Expand Up @@ -1366,7 +1366,7 @@ Error Method::execute_instruction() {
/*method_name=*/serialization_plan_->name()->c_str());
err = delegates_[delegate_idx].Execute(
backend_execution_context,
chain.argument_lists_[step_state_.instr_idx].data());
chain.argument_lists_[step_state_.instr_idx]);
if (err != Error::Ok) {
ET_LOG(
Error,
Expand Down
Loading
Loading