We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1c533e5 commit 48b63a6Copy full SHA for 48b63a6
examples/xnnpack/aot_compiler.py
@@ -142,8 +142,10 @@
142
143
# Import quantized ops. This requires portable_lib to be loaded first.
144
from executorch.kernels import quantized # usort: skip # noqa: F401, F403
145
+ from torch.utils._pytree import tree_flatten
146
147
m = _load_for_executorch_from_buffer(exec_prog.buffer)
148
logging.info("Successfully loaded the model")
- res = m.run_method("forward", *example_inputs)
149
+ flattened = tree_flatten(example_inputs)[0]
150
+ res = m.run_method("forward", flattened)
151
logging.info("Successfully ran the model")
0 commit comments