@@ -310,6 +310,7 @@ def transform(
310310    workflow_run_attempt : int ,
311311    job_name : str ,
312312    job_id : int ,
313+     schema_version : str ,
313314) ->  List :
314315    """ 
315316    Transform the benchmark results into the format writable into the benchmark database 
@@ -319,45 +320,91 @@ def transform(
319320    for  r  in  benchmark_results :
320321        r ["deviceInfo" ]["device" ] =  job_name 
321322
322-     # TODO (huydhn): This is the current schema of the database oss_ci_benchmark_v2, 
323-     # and I'm trying to fit ET benchmark results into it, which is kind of awkward. 
324-     # However, the schema is going to be updated soon 
325-     return  [
326-         {
327-             # GH-info to identify where the benchmark is run 
328-             "repo" : repo ,
329-             "head_branch" : head_branch ,
330-             "workflow_id" : workflow_run_id ,
331-             "run_attempt" : workflow_run_attempt ,
332-             "job_id" : job_id ,
333-             # The model 
334-             "name" : f"{ r ['benchmarkModel' ]['name' ]}   { r ['benchmarkModel' ].get ('backend' , '' )}  " .strip (),
335-             "dtype" : (
336-                 r ["benchmarkModel" ]["quantization" ]
337-                 if  r ["benchmarkModel" ]["quantization" ]
338-                 else  "unknown" 
339-             ),
340-             # The metric value 
341-             "metric" : r ["metric" ],
342-             "actual" : r ["actualValue" ],
343-             "target" : r ["targetValue" ],
344-             # The device 
345-             "device" : r ["deviceInfo" ]["device" ],
346-             "arch" : r ["deviceInfo" ].get ("os" , "" ),
347-             # Not used here, just set it to something unique here 
348-             "filename" : workflow_name ,
349-             "test_name" : app_type ,
350-             "runner" : job_name ,
351-         }
352-         for  r  in  benchmark_results 
353-     ]
323+     if  schema_version  ==  "v2" :
324+         # TODO (huydhn): Clean up this branch after ExecuTorch dashboard migrates to v3 
325+         return  [
326+             {
327+                 # GH-info to identify where the benchmark is run 
328+                 "repo" : repo ,
329+                 "head_branch" : head_branch ,
330+                 "workflow_id" : workflow_run_id ,
331+                 "run_attempt" : workflow_run_attempt ,
332+                 "job_id" : job_id ,
333+                 # The model 
334+                 "name" : f"{ r ['benchmarkModel' ]['name' ]}   { r ['benchmarkModel' ].get ('backend' , '' )}  " .strip (),
335+                 "dtype" : (
336+                     r ["benchmarkModel" ]["quantization" ]
337+                     if  r ["benchmarkModel" ]["quantization" ]
338+                     else  "unknown" 
339+                 ),
340+                 # The metric value 
341+                 "metric" : r ["metric" ],
342+                 "actual" : r ["actualValue" ],
343+                 "target" : r ["targetValue" ],
344+                 # The device 
345+                 "device" : r ["deviceInfo" ]["device" ],
346+                 "arch" : r ["deviceInfo" ].get ("os" , "" ),
347+                 # Not used here, just set it to something unique here 
348+                 "filename" : workflow_name ,
349+                 "test_name" : app_type ,
350+                 "runner" : job_name ,
351+             }
352+             for  r  in  benchmark_results 
353+         ]
354+     elif  schema_version  ==  "v3" :
355+         quantization  =  (
356+             r ["benchmarkModel" ]["quantization" ]
357+             if  r ["benchmarkModel" ]["quantization" ]
358+             else  "unknown" 
359+         )
360+         # From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database 
361+         return  [
362+             {
363+                 "benchmark" : {
364+                     "name" : "ExecuTorch" ,
365+                     "mode" : "inference" ,
366+                     "dtype" : quantization ,
367+                     "extra_info" : {
368+                         "app_type" : app_type ,
369+                     },
370+                 },
371+                 "model" : {
372+                     "name" : r ["benchmarkModel" ]["name" ],
373+                     "type" : "OSS model" ,
374+                     "backend" : r ["benchmarkModel" ].get ("backend" , "" ),
375+                     "extra_info" : {
376+                         "quantization" : quantization ,
377+                     },
378+                 },
379+                 "metric" : {
380+                     "name" : r ["metric" ],
381+                     "benchmark_values" : [r ["actualValue" ]],
382+                     "target_value" : r ["targetValue" ],
383+                     "extra_info" : {
384+                         "method" : r .get ("method" , "" ),
385+                     },
386+                 },
387+                 "runners" : [
388+                     {
389+                         "name" : r ["deviceInfo" ]["device" ],
390+                         "type" : r ["deviceInfo" ]["os" ],
391+                         "avail_mem_in_gb" : r ["deviceInfo" ].get ("availMem" , "" ),
392+                         "total_mem_in_gb" : r ["deviceInfo" ].get ("totalMem" , "" ),
393+                     }
394+                 ],
395+             }
396+             for  r  in  benchmark_results 
397+         ]
354398
355399
356400def  main () ->  None :
357401    args  =  parse_args ()
358402
359-     # Across all devices 
360-     all_benchmark_results  =  []
403+     # Across all devices, keeping both schemas for now until ExecuTorch dashboard migrates to v3 
404+     all_benchmark_results  =  {
405+         "v2" : [],
406+         "v3" : [],
407+     }
361408
362409    with  open (args .artifacts ) as  f :
363410        for  artifact  in  json .load (f ):
@@ -384,23 +431,31 @@ def main() -> None:
384431                )
385432
386433            if  benchmark_results :
387-                 benchmark_results  =  transform (
388-                     app_type ,
389-                     benchmark_results ,
390-                     args .repo ,
391-                     args .head_branch ,
392-                     args .workflow_name ,
393-                     args .workflow_run_id ,
394-                     args .workflow_run_attempt ,
395-                     job_name ,
396-                     extract_job_id (args .artifacts ),
397-                 )
398-                 all_benchmark_results .extend (benchmark_results )
434+                 for  schema  in  all_benchmark_results .keys ():
435+                     results  =  transform (
436+                         app_type ,
437+                         benchmark_results ,
438+                         args .repo ,
439+                         args .head_branch ,
440+                         args .workflow_name ,
441+                         args .workflow_run_id ,
442+                         args .workflow_run_attempt ,
443+                         job_name ,
444+                         extract_job_id (args .artifacts ),
445+                         schema ,
446+                     )
447+                     all_benchmark_results [schema ].extend (results )
448+ 
449+     for  schema  in  all_benchmark_results .keys ():
450+         if  not  all_benchmark_results .get (schema ):
451+             continue 
452+ 
453+         output_dir  =  os .path .join (args .output_dir , schema )
454+         os .mkdir (output_dir )
399455
400-     if  all_benchmark_results :
401456        output_file  =  os .path .basename (args .artifacts )
402-         with  open (f"{ args . output_dir }  /{ output_file }  " , "w" ) as  f :
403-             json .dump (all_benchmark_results , f )
457+         with  open (f"{ output_dir }  /{ output_file }  " , "w" ) as  f :
458+             json .dump (all_benchmark_results [ schema ] , f )
404459
405460
406461if  __name__  ==  "__main__" :
0 commit comments