@@ -516,15 +516,39 @@ def benchSuiteName(self, bmSuiteArgs):
516
516
def subgroup (self ):
517
517
return SUBGROUP_GRAAL_PYTHON
518
518
519
+ def with_branch_and_commit_dict (self , d ):
520
+ """
521
+ We run our benchmark from the graalpython directories, but with other
522
+ suites as primary suites in the CI, so we potentially want to update
523
+ branch and commit info.
524
+ """
525
+ if mx .primary_suite ().dir != os .getcwd ():
526
+ if any (os .path .isdir (d ) and d .startswith ("mx.graalpython" ) for d in os .listdir ()):
527
+ vc = SUITE .vc
528
+ if vc is None :
529
+ return d
530
+ branch = vc .active_branch (SUITE .dir , abortOnError = False ) or "<unknown>"
531
+ info = vc .parent_info (SUITE .dir )
532
+ url = vc .default_pull (SUITE .dir , abortOnError = False ) or "unknown"
533
+ d .update ({
534
+ "branch" : branch ,
535
+ "commit.rev" : vc .parent (SUITE .dir ),
536
+ "commit.repo-url" : url ,
537
+ "commit.author" : info ["author" ],
538
+ "commit.author-ts" : info ["author-ts" ],
539
+ "commit.committer" : info ["committer" ],
540
+ "commit.committer-ts" : info ["committer-ts" ],
541
+ })
542
+ return d
543
+
519
544
def rules (self , output , benchmarks , bm_suite_args ):
520
545
bench_name = self .get_bench_name (benchmarks )
521
546
arg = self .get_arg (bench_name )
522
-
523
547
return [
524
548
# warmup curves
525
549
StdOutRule (
526
550
r"^### iteration=(?P<iteration>[0-9]+), name=(?P<benchmark>[a-zA-Z0-9._\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
527
- {
551
+ self . with_branch_and_commit_dict ( {
528
552
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
529
553
"metric.name" : "warmup" ,
530
554
"metric.iteration" : ("<iteration>" , int ),
@@ -534,12 +558,12 @@ def rules(self, output, benchmarks, bm_suite_args):
534
558
"metric.score-function" : "id" ,
535
559
"metric.better" : "lower" ,
536
560
"config.run-flags" : "" .join (arg ),
537
- }
561
+ })
538
562
),
539
563
# secondary metric(s)
540
564
StdOutRule (
541
565
r"### WARMUP detected at iteration: (?P<endOfWarmup>[0-9]+$)" ,
542
- {
566
+ self . with_branch_and_commit_dict ( {
543
567
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
544
568
"metric.name" : "end-of-warmup" ,
545
569
"metric.iteration" : 0 ,
@@ -549,13 +573,13 @@ def rules(self, output, benchmarks, bm_suite_args):
549
573
"metric.score-function" : "id" ,
550
574
"metric.better" : "lower" ,
551
575
"config.run-flags" : "" .join (arg ),
552
- }
576
+ })
553
577
),
554
578
555
579
# no warmups
556
580
StdOutRule (
557
581
r"^@@@ name=(?P<benchmark>[a-zA-Z0-9._\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
558
- {
582
+ self . with_branch_and_commit_dict ( {
559
583
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
560
584
"metric.name" : "time" ,
561
585
"metric.iteration" : 0 ,
@@ -565,7 +589,7 @@ def rules(self, output, benchmarks, bm_suite_args):
565
589
"metric.score-function" : "id" ,
566
590
"metric.better" : "lower" ,
567
591
"config.run-flags" : "" .join (arg ),
568
- }
592
+ })
569
593
),
570
594
]
571
595
@@ -731,12 +755,11 @@ class PythonVmWarmupBenchmarkSuite(PythonBenchmarkSuite):
731
755
def rules (self , output , benchmarks , bm_suite_args ):
732
756
bench_name = self .get_bench_name (benchmarks )
733
757
arg = self .get_arg (bench_name )
734
-
735
758
return [
736
759
# startup (difference between start of VM to end of first iteration)
737
760
StdOutRule (
738
761
r"### STARTUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
739
- {
762
+ self . with_branch_and_commit_dict ( {
740
763
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
741
764
"metric.name" : "startup" ,
742
765
"metric.iteration" : ("<iteration>" , int ),
@@ -746,12 +769,12 @@ def rules(self, output, benchmarks, bm_suite_args):
746
769
"metric.score-function" : "id" ,
747
770
"metric.better" : "lower" ,
748
771
"config.run-flags" : "" .join (arg ),
749
- }
772
+ })
750
773
),
751
774
752
775
StdOutRule (
753
776
r"### EARLY WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
754
- {
777
+ self . with_branch_and_commit_dict ( {
755
778
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
756
779
"metric.name" : "early-warmup" ,
757
780
"metric.iteration" : ("<iteration>" , int ),
@@ -761,12 +784,12 @@ def rules(self, output, benchmarks, bm_suite_args):
761
784
"metric.score-function" : "id" ,
762
785
"metric.better" : "lower" ,
763
786
"config.run-flags" : "" .join (arg ),
764
- }
787
+ })
765
788
),
766
789
767
790
StdOutRule (
768
791
r"### LATE WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
769
- {
792
+ self . with_branch_and_commit_dict ( {
770
793
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
771
794
"metric.name" : "late-warmup" ,
772
795
"metric.iteration" : ("<iteration>" , int ),
@@ -776,7 +799,7 @@ def rules(self, output, benchmarks, bm_suite_args):
776
799
"metric.score-function" : "id" ,
777
800
"metric.better" : "lower" ,
778
801
"config.run-flags" : "" .join (arg ),
779
- }
802
+ })
780
803
),
781
804
]
782
805
0 commit comments