1- import os
2- import json
3- import sys
4- from pathlib import Path
1+ import argparse
52import jinja2
3+ import json
4+ import os
65import string
6+ import sys
77
88# Schema: performance_models[data_mode][tps] = [model]
99performance_models = {}
@@ -36,7 +36,7 @@ def add_performance_model(model):
3636 performance_models [data_mode ][data_rate ].append (model )
3737
3838
39- def flatten_performance_models (models ):
39+ def flatten_performance_models ():
4040 """
4141 Flattens performance model into list of grouped models where each group
4242 corresponds to a table in the report.
@@ -58,13 +58,36 @@ def flatten_performance_models(models):
5858 x ["data_mode" ], x ["data_rate" ]))
5959 return models_list
6060
61+ def get_benchmark_entry (model , data_mode , data_rate , value_field , unit , subgroup ):
62+ benchmark_entry = {}
63+ benchmark_entry ["name" ] = model ["testcase" ]
64+ benchmark_entry ["value" ] = model [value_field ]
65+ benchmark_entry ["unit" ] = unit
66+ benchmark_entry ["extra" ] = f"{ data_mode } (TPS: { data_rate } ) - { subgroup } "
67+ return benchmark_entry
68+
69+ def get_benchmark_data ():
70+ """
71+ Splits models by testcase and groups by data mode, data rate, and field type.
72+ """
73+ benchmark_data = []
74+
75+ for data_mode , data_rates in performance_models .items ():
76+ for data_rate , models in data_rates .items ():
77+ for model in models :
78+ benchmark_data .append (get_benchmark_entry (model , data_mode , data_rate , "avgCpu" , "%" , "Average CPU Usage" ))
79+ benchmark_data .append (get_benchmark_entry (model , data_mode , data_rate , "avgMem" , "MB" , "Average Memory Usage" ))
80+
81+ return benchmark_data
6182
6283if __name__ == "__main__" :
63- aoc_version = Path ('VERSION' ).read_text ()
84+ parser = argparse .ArgumentParser ("Generate performance-report.md and performance-data.json from artifacts" )
85+ parser .add_argument ('-v' , '--version' , help = "version to tag the report with" , required = True )
86+ args = parser .parse_args ()
87+ aoc_version = args .version
6488
65- from jinja2 import Environment , PackageLoader , select_autoescape
6689 templateLoader = jinja2 .FileSystemLoader (searchpath = "e2etest/templates/" )
67- env = Environment (autoescape = select_autoescape (['html' , 'xml' , 'tpl' , 'yaml' , 'yml' ]), loader = templateLoader )
90+ env = jinja2 . Environment (autoescape = jinja2 . select_autoescape (['html' , 'xml' , 'tpl' , 'yaml' , 'yml' ]), loader = templateLoader )
6891
6992 # get performance models from artifacts
7093 artifacts_path = "artifacts/"
@@ -79,7 +102,7 @@ def flatten_performance_models(models):
79102 testing_ami = model ["testingAmi" ]
80103 add_performance_model (model )
81104
82- models_list = flatten_performance_models (performance_models )
105+ models_list = flatten_performance_models ()
83106
84107 # render performance models into markdown
85108 template = env .get_template ('performance_model.tpl' )
@@ -92,6 +115,10 @@ def flatten_performance_models(models):
92115 })
93116 print (rendered_result )
94117
95- # write rendered result to docs/performance_model .md
96- with open ("docs/performance_model .md" , "w" ) as f :
118+ # write rendered result to report .md
119+ with open ("performance-report .md" , "w+ " ) as f :
97120 f .write (rendered_result )
121+
122+ # write benchmark-data.json
123+ with open ("performance-data.json" , "w+" ) as f :
124+ json .dump (get_benchmark_data (), f , indent = 4 )
0 commit comments