@@ -949,9 +949,15 @@ def map_command(self, cmd):
949
949
if self .bmSuite :
950
950
bench_name = f"{ self .bmSuite .name ()} -{ bench_name } "
951
951
ts = datetime .now ().strftime ("%Y%m%d-%H%M%S" )
952
- jmap_command = mx .get_jdk ().exe_path ('jmap' )
952
+ vm = self .bmSuite .execution_context .virtual_machine
953
+ if isinstance (vm , GraalPythonVm ) and vm .launcher_type == "jvm" :
954
+ jmap_command = mx .get_jdk ().exe_path ('jmap' )
955
+ else :
956
+ jmap_command = ""
953
957
self .out_file = os .path .join (os .getcwd (), f"heap_tracker_{ bench_name } _{ ts } .txt" )
954
958
iterations = 3
959
+ if "-i" in cmd :
960
+ cmd [cmd .index ("-i" ) + 1 ] = "1"
955
961
return [sys .executable , str (DIR / 'live_heap_tracker.py' ), self .out_file , str (iterations ), jmap_command , * cmd ]
956
962
957
963
def get_rules (self , bmSuiteArgs ):
@@ -964,18 +970,41 @@ def __init__(self, tracker, bmSuiteArgs):
964
970
965
971
def parse (self , text ):
966
972
with open (self .tracker .out_file ) as f :
967
- heap_mb = [ int ( line . strip ()) / (1024 ** 2 ) for line in f if line ]
973
+ heap_mb , uss_mb = zip ( * ( map ( lambda i : int ( i ) / (1024 ** 2 ), line . split ()) for line in f if line ))
968
974
os .unlink (self .tracker .out_file )
969
- self .tracker .out_file = None
970
- deciles = statistics .quantiles (heap_mb , n = 10 )
971
- print (f"Heap size deciles (MiB): { deciles } " )
975
+ heap_deciles = statistics .quantiles (heap_mb , n = 10 )
976
+ uss_deciles = statistics .quantiles (uss_mb , n = 10 )
977
+ print (f"Heap size deciles (MiB): { heap_deciles } " )
978
+ print (f"USS size deciles (MiB): { uss_deciles } " )
979
+ # The heap benchmarks are a separate suite, because they are run
980
+ # very differently, but we want to be able to conveniently query
981
+ # all data about the same suites that we have. So, if this suite
982
+ # name ends with "-heap", we drop that so it gets attributed to the
983
+ # base suite.
984
+ suite = self .tracker .bmSuite .benchSuiteName (self .bmSuiteArgs )
985
+ if suite .endswith ("-heap" ):
986
+ suite = suite [:- len ("-heap" )]
987
+ benchmark = f"{ suite } .{ self .tracker .bmSuite .currently_running_benchmark ()} "
988
+ vm_flags = ' ' .join (self .tracker .bmSuite .vmArgs (self .bmSuiteArgs ))
972
989
return [
973
990
PythonBaseBenchmarkSuite .with_branch_and_commit_dict ({
974
- "benchmark" : self . tracker . bmSuite . currently_running_benchmark () ,
975
- "bench-suite" : self . tracker . bmSuite . benchSuiteName ( self . bmSuiteArgs ) ,
976
- "config.vm-flags" : ' ' . join ( self . tracker . bmSuite . vmArgs ( self . bmSuiteArgs )) ,
991
+ "benchmark" : benchmark ,
992
+ "bench-suite" : suite ,
993
+ "config.vm-flags" : vm_flags ,
977
994
"metric.name" : "allocated-memory" ,
978
- "metric.value" : deciles [- 1 ],
995
+ "metric.value" : heap_deciles [- 1 ],
996
+ "metric.unit" : "MB" ,
997
+ "metric.type" : "numeric" ,
998
+ "metric.score-function" : "id" ,
999
+ "metric.better" : "lower" ,
1000
+ "metric.iteration" : 0
1001
+ }),
1002
+ PythonBaseBenchmarkSuite .with_branch_and_commit_dict ({
1003
+ "benchmark" : benchmark ,
1004
+ "bench-suite" : suite ,
1005
+ "config.vm-flags" : vm_flags ,
1006
+ "metric.name" : "memory" ,
1007
+ "metric.value" : uss_deciles [- 1 ],
979
1008
"metric.unit" : "MB" ,
980
1009
"metric.type" : "numeric" ,
981
1010
"metric.score-function" : "id" ,
@@ -1004,7 +1033,17 @@ def register_tracker(self, name, tracker_type):
1004
1033
def createCommandLineArgs (self , benchmarks , bmSuiteArgs ):
1005
1034
benchmark = benchmarks [0 ]
1006
1035
bench_path = os .path .join (self ._bench_path , f'{ benchmark } .py' )
1007
- return [* self .vmArgs (bmSuiteArgs ), bench_path , * self .runArgs (bmSuiteArgs )]
1036
+ bench_args = self ._benchmarks [benchmark ]
1037
+ run_args = self .runArgs (bmSuiteArgs )
1038
+ cmd_args = []
1039
+ if "-i" in bench_args :
1040
+ # Need to use the harness to parse
1041
+ cmd_args .append (HARNESS_PATH )
1042
+ if "-i" not in run_args :
1043
+ # Explicit iteration count overrides default
1044
+ run_args += bench_args
1045
+ cmd_args .append (bench_path )
1046
+ return [* self .vmArgs (bmSuiteArgs ), * cmd_args , * run_args ]
1008
1047
1009
1048
def successPatterns (self ):
1010
1049
return []
0 commit comments