@@ -34,6 +34,7 @@ using executorch::runtime::EValue;
34
34
using executorch::runtime::FreeableBuffer;
35
35
using executorch::runtime::MemoryAllocator;
36
36
using executorch::runtime::Result;
37
+ using executorch::runtime::Span;
37
38
38
39
const char kHighAddrKey [] = " HighAddr" ;
39
40
const char kImportForeverKey [] = " ImportForever" ;
@@ -86,7 +87,7 @@ Result<DelegateHandle*> NeuronBackend::init(
86
87
Error NeuronBackend::execute (
87
88
ET_UNUSED BackendExecutionContext& context,
88
89
DelegateHandle* handle,
89
- EValue** args) const {
90
+ Span< EValue*> args) const {
90
91
NeuronExecuTorchDelegate* delegate =
91
92
reinterpret_cast <NeuronExecuTorchDelegate*>(handle);
92
93
return delegate->execute (context, args);
@@ -106,7 +107,7 @@ bool NeuronBackend::is_available() const {
106
107
107
108
Error NeuronExecuTorchDelegate::execute (
108
109
BackendExecutionContext& context,
109
- EValue** args) const {
110
+ Span< EValue*> args) const {
110
111
if (HintNeuronBackend (args) != NEURON_NO_ERROR) {
111
112
return Error::InvalidState;
112
113
};
@@ -163,8 +164,8 @@ Error NeuronExecuTorchDelegate::execute(
163
164
: Error::InvalidState;
164
165
};
165
166
166
- int NeuronExecuTorchDelegate::HintNeuronBackend (EValue** args) const {
167
- auto HintImportForever = [this ](EValue** args) -> int {
167
+ int NeuronExecuTorchDelegate::HintNeuronBackend (Span< EValue*> args) const {
168
+ auto HintImportForever = [this ](Span< EValue*> args) -> int {
168
169
auto & allocator = GET_NEURON_ALLOCATOR;
169
170
size_t inputCount = mInputSizes .size (), outputCount = mOutputSizes .size ();
170
171
for (int i = 0 ; i < inputCount; i++) {
0 commit comments