@@ -34,6 +34,7 @@ using executorch::runtime::EValue;
3434using executorch::runtime::FreeableBuffer;
3535using executorch::runtime::MemoryAllocator;
3636using executorch::runtime::Result;
37+ using executorch::runtime::Span;
3738
3839const char kHighAddrKey [] = " HighAddr" ;
3940const char kImportForeverKey [] = " ImportForever" ;
@@ -86,7 +87,7 @@ Result<DelegateHandle*> NeuronBackend::init(
8687Error NeuronBackend::execute (
8788 ET_UNUSED BackendExecutionContext& context,
8889 DelegateHandle* handle,
89- EValue** args) const {
90+ Span< EValue*> args) const {
9091 NeuronExecuTorchDelegate* delegate =
9192 reinterpret_cast <NeuronExecuTorchDelegate*>(handle);
9293 return delegate->execute (context, args);
@@ -106,7 +107,7 @@ bool NeuronBackend::is_available() const {
106107
107108Error NeuronExecuTorchDelegate::execute (
108109 BackendExecutionContext& context,
109- EValue** args) const {
110+ Span< EValue*> args) const {
110111 if (HintNeuronBackend (args) != NEURON_NO_ERROR) {
111112 return Error::InvalidState;
112113 };
@@ -163,8 +164,8 @@ Error NeuronExecuTorchDelegate::execute(
163164 : Error::InvalidState;
164165};
165166
166- int NeuronExecuTorchDelegate::HintNeuronBackend (EValue** args) const {
167- auto HintImportForever = [this ](EValue** args) -> int {
167+ int NeuronExecuTorchDelegate::HintNeuronBackend (Span< EValue*> args) const {
168+ auto HintImportForever = [this ](Span< EValue*> args) -> int {
168169 auto & allocator = GET_NEURON_ALLOCATOR;
169170 size_t inputCount = mInputSizes .size (), outputCount = mOutputSizes .size ();
170171 for (int i = 0 ; i < inputCount; i++) {
0 commit comments