Skip to content

Commit 915fa29

Browse files
committed
Support running from apptests
1 parent ca3306a commit 915fa29

File tree

1 file changed

+29
-28
lines changed

1 file changed

+29
-28
lines changed

mx.graalpython/mx_graalpython_bisect.py

Lines changed: 29 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,25 @@
99
import mx
1010

1111

12-
SUITE = mx.suite('graalpython')
12+
def get_suite(name):
13+
suite_name = name.lstrip('/')
14+
suite = mx.suite(suite_name, fatalIfMissing=False)
15+
if not suite:
16+
suite = mx.primary_suite().import_suite(suite_name, version=None, urlinfos=None, in_subdir=name.startswith('/'))
17+
assert suite
18+
return suite
19+
20+
21+
def get_downstream_suite(suite):
22+
downstreams = {
23+
'graalpython-apptests': 'graalpython',
24+
'graalpython-extensions': 'graalpython',
25+
'graalpython': '/vm',
26+
'vm': '/vm-enterprise',
27+
}
28+
downstream = downstreams.get(suite.name)
29+
if downstream:
30+
return get_suite(downstream)
1331

1432

1533
def get_commit(suite, ref='HEAD'):
@@ -22,16 +40,16 @@ def get_message(suite, commit):
2240
return suite.vc.git_command(suite.vc_dir, ['log', '--format=%s', '-n', '1', commit]).strip()
2341

2442

25-
def run_bisect_benchmark(suite, bad, good, callback, downstreams, threshold=None):
43+
def run_bisect_benchmark(suite, bad, good, callback, threshold=None):
2644
git_dir = suite.vc_dir
27-
commits = SUITE.vc.git_command(
45+
commits = suite.vc.git_command(
2846
git_dir,
2947
['log', '--merges', '--format=format:%H', f'{good}^..{bad}'],
3048
abortOnError=True,
3149
).splitlines()
3250
if not commits:
3351
sys.exit("No merge commits found in the range. Did you swap good and bad?")
34-
downstream_suite = downstreams.get(suite)
52+
downstream_suite = get_downstream_suite(suite)
3553
values = [None] * len(commits)
3654
if threshold is None:
3755
bad_index = 0
@@ -66,7 +84,7 @@ def run_bisect_benchmark(suite, bad, good, callback, downstreams, threshold=None
6684
downstream_bad = get_commit(downstream_suite)
6785
subresults = {}
6886
if downstream_bad and downstream_good and downstream_bad != downstream_good:
69-
subresult = run_bisect_benchmark(downstream_suite, downstream_bad, downstream_good, callback, downstreams, threshold)
87+
subresult = run_bisect_benchmark(downstream_suite, downstream_bad, downstream_good, callback, threshold)
7088
subresults[bad_index] = subresult
7189
return BisectResult(suite, commits, values, good_index, bad_index, subresults)
7290

@@ -117,18 +135,6 @@ def summarize(self):
117135
return False
118136

119137

120-
def get_suite_py(commit):
121-
suite_py = SUITE.vc.git_command(['show', f'{commit}:mx.graalpython/suite.py'], abortOnError=True)
122-
namespace = {}
123-
exec(suite_py, namespace, namespace)
124-
return namespace['suite']
125-
126-
127-
def get_graal_commit(commit):
128-
suite_py = get_suite_py(commit)
129-
return [imp for imp in suite_py['imports'] if imp['name'] == 'sulong'][0]['version']
130-
131-
132138
def bisect_benchmark(argv):
133139
if 'BISECT_BENCHMARK_CONFIG' in os.environ:
134140
cp = configparser.ConfigParser()
@@ -152,12 +158,7 @@ def bisect_benchmark(argv):
152158
parser.add_argument('--enterprise', action='store_true')
153159
args = parser.parse_args(argv)
154160

155-
vm_suite = mx.suite('vm')
156-
downstreams = {
157-
SUITE: vm_suite,
158-
}
159-
if args.enterprise:
160-
downstreams[vm_suite] = mx.suite('vm-enterprise')
161+
primary_suite = mx.primary_suite()
161162

162163
fetched_enterprise = False
163164

@@ -170,10 +171,10 @@ def benchmark_callback(suite, commit):
170171
if fetched_enterprise:
171172
checkout_args.append('--no-fetch')
172173
mx.run_mx(checkout_args, out=mx.OutputCapture())
173-
mx.run_mx(['--env', 'ee', 'sforceimports'], suite=mx.suite('vm-enterprise'))
174+
mx.run_mx(['--env', 'ee', 'sforceimports'], suite=get_suite('/vm-enterprise'))
174175
fetched_enterprise = True
175176
elif suite.name != 'vm':
176-
mx.run_mx(['--env', 'ce', 'sforceimports'], suite=vm_suite)
177+
mx.run_mx(['--env', 'ce', 'sforceimports'], suite=get_suite('/vm'))
177178
suite.vc.update_to_branch(suite.vc_dir, commit)
178179
mx.run_mx(['sforceimports'], suite=suite)
179180
env = os.environ.copy()
@@ -191,9 +192,9 @@ def benchmark_callback(suite, commit):
191192
sys.exit(f"Failed to get result from the benchmark")
192193
return float(match.group(1))
193194

194-
bad = get_commit(SUITE, args.bad)
195-
good = get_commit(SUITE, args.good)
196-
result = run_bisect_benchmark(SUITE, bad, good, benchmark_callback, downstreams)
195+
bad = get_commit(primary_suite, args.bad)
196+
good = get_commit(primary_suite, args.good)
197+
result = run_bisect_benchmark(primary_suite, bad, good, benchmark_callback)
197198
print()
198199
result.visualize()
199200
print()

0 commit comments

Comments
 (0)