@@ -223,13 +223,48 @@ def run_target(
223223 elif target_board == "vkml_emulation_layer" :
224224 return run_vkml_emulation_layer (
225225 executorch_program_manager ,
226+ inputs ,
226227 intermediate_path ,
227228 elf_path ,
228229 )
229230
230231
232+ def save_inputs_to_file (
233+ exported_program : ExportedProgram ,
234+ inputs : Tuple [torch .Tensor ],
235+ intermediate_path : str | Path ,
236+ ):
237+ input_file_paths = []
238+ input_names = get_input_names (exported_program )
239+ for input_name , input_ in zip (input_names , inputs ):
240+ input_path = save_bytes (intermediate_path , input_ , input_name )
241+ input_file_paths .append (input_path )
242+
243+ return input_file_paths
244+
245+
246+ def get_output_from_file (
247+ exported_program : ExportedProgram ,
248+ intermediate_path : str | Path ,
249+ output_base_name : str ,
250+ ):
251+ output_np = []
252+ output_node = exported_program .graph_module .graph .output_node ()
253+ for i , node in enumerate (output_node .args [0 ]):
254+ output_shape = node .meta ["val" ].shape
255+ output_dtype = node .meta ["val" ].dtype
256+ tosa_ref_output = np .fromfile (
257+ os .path .join (intermediate_path , f"{ output_base_name } -{ i } .bin" ),
258+ _torch_to_numpy_dtype_dict [output_dtype ],
259+ )
260+
261+ output_np .append (torch .from_numpy (tosa_ref_output ).reshape (output_shape ))
262+ return tuple (output_np )
263+
264+
231265def run_vkml_emulation_layer (
232266 executorch_program_manager : ExecutorchProgramManager ,
267+ inputs : Tuple [torch .Tensor ],
233268 intermediate_path : str | Path ,
234269 elf_path : str | Path ,
235270):
@@ -239,7 +274,7 @@ def run_vkml_emulation_layer(
239274 `intermediate_path`: Directory to save the .pte and capture outputs.
240275 `elf_path`: Path to the Vulkan-capable executor_runner binary.
241276 """
242-
277+ exported_program = executorch_program_manager . exported_program ()
243278 intermediate_path = Path (intermediate_path )
244279 intermediate_path .mkdir (exist_ok = True )
245280 elf_path = Path (elf_path )
@@ -251,26 +286,29 @@ def run_vkml_emulation_layer(
251286 with open (pte_path , "wb" ) as f :
252287 f .write (executorch_program_manager .buffer )
253288
254- cmd_line = [str (elf_path ), "-model_path" , pte_path ]
289+ output_base_name = "out"
290+ out_path = os .path .join (intermediate_path , output_base_name )
291+
292+ cmd_line = f"{ elf_path } -model_path { pte_path } -output_file { out_path } "
293+
294+ input_string = None
295+ input_paths = save_inputs_to_file (exported_program , inputs , intermediate_path )
296+ for input_path in input_paths :
297+ if input_string is None :
298+ input_string = f" -inputs={ input_path } "
299+ else :
300+ input_string += f",{ input_path } "
301+ if input_string is not None :
302+ cmd_line += input_string
303+ cmd_line = cmd_line .split ()
304+
255305 result = _run_cmd (cmd_line )
256306
257- result_stdout = result .stdout .decode () # noqa: F841
258307 # TODO: MLETORCH-1234: Support VGF e2e tests in VgfPipeline
259308 # TODO: Add regex to check for error or fault messages in stdout from Emulation Layer
260- # Regex to extract tensor values from stdout
261- output_np = []
262- matches = re .findall (
263- r"Output\s+\d+:\s+tensor\(sizes=\[(.*?)\],\s+\[(.*?)\]\)" ,
264- result_stdout ,
265- re .DOTALL ,
266- )
267-
268- for shape_str , values_str in matches :
269- shape = list (map (int , shape_str .split ("," )))
270- values = list (map (float , re .findall (r"[-+]?\d*\.\d+|\d+" , values_str )))
271- output_np .append (torch .tensor (values ).reshape (shape ))
309+ result_stdout = result .stdout .decode () # noqa: F841
272310
273- return tuple ( output_np )
311+ return get_output_from_file ( exported_program , intermediate_path , output_base_name )
274312
275313
276314def run_corstone (
@@ -312,14 +350,10 @@ def run_corstone(
312350 with open (pte_path , "wb" ) as f :
313351 f .write (executorch_program_manager .buffer )
314352
315- # Save inputs to file
316- input_names = get_input_names (exported_program )
317- input_paths = []
318- for input_name , input_ in zip (input_names , inputs ):
319- input_path = save_bytes (intermediate_path , input_ , input_name )
320- input_paths .append (input_path )
353+ input_paths = save_inputs_to_file (exported_program , inputs , intermediate_path )
321354
322- out_path = os .path .join (intermediate_path , "out" )
355+ output_base_name = "out"
356+ out_path = os .path .join (intermediate_path , output_base_name )
323357
324358 cmd_line = f"executor_runner -m { pte_path } -o { out_path } "
325359 for input_path in input_paths :
@@ -401,18 +435,7 @@ def run_corstone(
401435 f"Corstone simulation failed:\n cmd: { ' ' .join (command_args )} \n log: \n { result_stdout } \n { result .stderr .decode ()} "
402436 )
403437
404- output_np = []
405- output_node = exported_program .graph_module .graph .output_node ()
406- for i , node in enumerate (output_node .args [0 ]):
407- output_shape = node .meta ["val" ].shape
408- output_dtype = node .meta ["val" ].dtype
409- tosa_ref_output = np .fromfile (
410- os .path .join (intermediate_path , f"out-{ i } .bin" ),
411- _torch_to_numpy_dtype_dict [output_dtype ],
412- )
413-
414- output_np .append (torch .from_numpy (tosa_ref_output ).reshape (output_shape ))
415- return tuple (output_np )
438+ return get_output_from_file (exported_program , intermediate_path , output_base_name )
416439
417440
418441def prep_data_for_save (
0 commit comments