Skip to content

Commit e0b4dd1

Browse files
Get the Full Test Suite Running on PRs for Windows (#174)
(fixes #168)
1 parent 35c04ae commit e0b4dd1

File tree

6 files changed

+155
-94
lines changed

6 files changed

+155
-94
lines changed

.github/workflows/main.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,4 +45,4 @@ jobs:
4545
- name: Display Python version
4646
run: python -c "import sys; print(sys.version)"
4747
- name: Run Tests
48-
run: python runtests.py
48+
run: python -u -m pyperformance.tests

pyperformance/commands.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ def cmd_venv_create(options, root, python, benchmarks):
4545
venv.ensure_pip()
4646
try:
4747
venv.install_pyperformance()
48+
venv.ensure_reqs(requirements)
4849
except _venv.RequirementsInstallationFailedError:
4950
sys.exit(1)
50-
venv.ensure_reqs(requirements, exitonerror=True)
5151
print("The virtual environment %s has been created" % root)
5252

5353

@@ -67,10 +67,10 @@ def cmd_venv_recreate(options, root, python, benchmarks):
6767
)
6868
venv.ensure_pip()
6969
try:
70-
venv.install_pyperformance()
70+
venv.ensure_reqs(requirements)
71+
venv.ensure_reqs(requirements)
7172
except _venv.RequirementsInstallationFailedError:
7273
sys.exit(1)
73-
venv.ensure_reqs(requirements, exitonerror=True)
7474
else:
7575
print("The virtual environment %s already exists" % root)
7676
_utils.safe_rmtree(root)
@@ -84,9 +84,9 @@ def cmd_venv_recreate(options, root, python, benchmarks):
8484
venv.ensure_pip()
8585
try:
8686
venv.install_pyperformance()
87+
venv.ensure_reqs(requirements)
8788
except _venv.RequirementsInstallationFailedError:
8889
sys.exit(1)
89-
venv.ensure_reqs(requirements, exitonerror=True)
9090
print("The virtual environment %s has been recreated" % root)
9191
else:
9292
venv = VenvForBenchmarks.create(
@@ -97,9 +97,9 @@ def cmd_venv_recreate(options, root, python, benchmarks):
9797
venv.ensure_pip()
9898
try:
9999
venv.install_pyperformance()
100+
venv.ensure_reqs(requirements)
100101
except _venv.RequirementsInstallationFailedError:
101102
sys.exit(1)
102-
venv.ensure_reqs(requirements, exitonerror=True)
103103
print("The virtual environment %s has been created" % root)
104104

105105

@@ -225,9 +225,13 @@ def cmd_show(options):
225225

226226

227227
def cmd_compare(options):
228-
from .compare import compare_results, write_csv
228+
from .compare import compare_results, write_csv, VersionMismatchError
229229

230-
results = compare_results(options)
230+
try:
231+
results = compare_results(options)
232+
except VersionMismatchError as exc:
233+
print(f'ERROR: {exc}')
234+
sys.exit(1)
231235

232236
if options.csv:
233237
write_csv(results, options.csv)

pyperformance/compare.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,16 @@
1010
NO_VERSION = "<not set>"
1111

1212

13+
class VersionMismatchError(Exception):
14+
15+
def __init__(self, version1, version2):
16+
super().__init__(
17+
f"Performance versions are different ({version1} != {version2})",
18+
)
19+
self.version1 = version1
20+
self.version2 = version2
21+
22+
1323
def format_result(bench):
1424
mean = bench.mean()
1525
if bench.get_nvalue() >= 2:
@@ -372,10 +382,7 @@ def compare_results(options):
372382
version1 = base_suite.get_metadata().get('performance_version', NO_VERSION)
373383
version2 = changed_suite.get_metadata().get('performance_version', NO_VERSION)
374384
if version1 != version2 or (version1 == version2 == NO_VERSION):
375-
print()
376-
print("ERROR: Performance versions are different: %s != %s"
377-
% (version1, version2))
378-
sys.exit(1)
385+
raise VersionMismatchError(version1, version2)
379386

380387
return results
381388

pyperformance/tests/__init__.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import subprocess
88
import sys
99
import tempfile
10+
import unittest
1011

1112

1213
TESTS_ROOT = os.path.realpath(os.path.dirname(__file__))
@@ -99,7 +100,35 @@ def cleanup(self):
99100

100101

101102
#############################
102-
# fixtures and mixins
103+
# testing fixtures, mixins, and helpers
104+
105+
def apply_to_test_methods(cls, decorator):
106+
for name, func in vars(cls).items():
107+
if not name.startswith('test_'):
108+
continue
109+
func = decorator(func)
110+
setattr(cls, name, func)
111+
112+
113+
def mark(label, func=None):
114+
"""Mark the function/class with the given label.
115+
116+
This may be used as a decorator.
117+
"""
118+
if func is None:
119+
def decorator(func):
120+
return mark(label, func)
121+
return decorator
122+
if isinstance(func, type):
123+
cls = func
124+
apply_to_test_methods(cls, mark(label))
125+
return cls
126+
try:
127+
func._pyperformance_test_labels.append(label)
128+
except AttributeError:
129+
func._pyperformance_test_labels = [label]
130+
return func
131+
103132

104133
class Compat:
105134
"""A mixin that lets older Pythons use newer unittest features."""
@@ -124,6 +153,18 @@ def addClassCleanup(cls, cleanup):
124153
#############################
125154
# functional tests
126155

156+
CPYTHON_ONLY = unittest.skipIf(
157+
sys.implementation.name != 'cpython',
158+
'CPython-only',
159+
)
160+
NON_WINDOWS_ONLY = unittest.skipIf(os.name == 'nt', 'skipping Windows')
161+
162+
# XXX Provide a way to run slow tests.
163+
SLOW = (lambda f:
164+
unittest.skip('way too slow')(
165+
mark('slow', f)))
166+
167+
127168
class Functional(Compat):
128169
"""A mixin for functional tests.
129170

pyperformance/tests/test_commands.py

Lines changed: 32 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,6 @@
99
from pyperformance import tests
1010

1111

12-
CPYTHON_ONLY = unittest.skipIf(
13-
sys.implementation.name != 'cpython',
14-
'CPython-only',
15-
)
16-
17-
1812
class FullStackTests(tests.Functional, unittest.TestCase):
1913

2014
maxDiff = 80 * 100
@@ -108,56 +102,41 @@ def div():
108102
print('---')
109103
print()
110104

105+
def expect_success(*args):
106+
text = self.run_pyperformance(
107+
*args,
108+
capture=None,
109+
)
110+
111+
def expect_failure(*args):
112+
text = self.run_pyperformance(
113+
*args,
114+
capture=None,
115+
exitcode=1,
116+
)
117+
111118
# It doesn't exist yet.
112-
self.run_pyperformance(
113-
'venv', 'show', '--venv', root,
114-
capture=None,
115-
)
119+
expect_success('venv', 'show', '--venv', root)
116120
div()
117121
# It gets created.
118-
self.run_pyperformance(
119-
'venv', 'create', '--venv', root,
120-
capture=None,
121-
)
122+
expect_success('venv', 'create', '--venv', root)
122123
div()
123-
self.run_pyperformance(
124-
'venv', 'show', '--venv', root,
125-
capture=None,
126-
)
124+
expect_success('venv', 'show', '--venv', root)
127125
div()
128126
# It alraedy exists.
129-
self.run_pyperformance(
130-
'venv', 'create', '--venv', root,
131-
capture=None,
132-
exitcode=1,
133-
)
127+
expect_failure('venv', 'create', '--venv', root)
134128
div()
135-
self.run_pyperformance(
136-
'venv', 'show', '--venv', root,
137-
capture=None,
138-
)
129+
expect_success('venv', 'show', '--venv', root)
139130
div()
140131
# It gets re-created.
141-
self.run_pyperformance(
142-
'venv', 'recreate', '--venv', root,
143-
capture=None,
144-
)
132+
expect_success('venv', 'recreate', '--venv', root)
145133
div()
146-
self.run_pyperformance(
147-
'venv', 'show', '--venv', root,
148-
capture=None,
149-
)
134+
expect_success('venv', 'show', '--venv', root)
150135
div()
151136
# It get deleted.
152-
self.run_pyperformance(
153-
'venv', 'remove', '--venv', root,
154-
capture=None,
155-
)
137+
expect_success('venv', 'remove', '--venv', root)
156138
div()
157-
self.run_pyperformance(
158-
'venv', 'show', '--venv', root,
159-
capture=None,
160-
)
139+
expect_success('venv', 'show', '--venv', root)
161140

162141
###################################
163142
# run
@@ -170,7 +149,7 @@ def test_run_and_show(self):
170149
# --debug-single-value: benchmark results don't matter, we only
171150
# check that running benchmarks don't fail.
172151
# XXX Capture and check the output.
173-
self.run_pyperformance(
152+
text = self.run_pyperformance(
174153
'run',
175154
'-b', 'all',
176155
'--debug-single-value',
@@ -259,8 +238,9 @@ def create_compile_config(self, *revisions,
259238
outfile.write(text)
260239
return cfgfile
261240

262-
@CPYTHON_ONLY
263-
@unittest.skip('way too slow')
241+
@tests.CPYTHON_ONLY
242+
@tests.NON_WINDOWS_ONLY
243+
@tests.SLOW
264244
def test_compile(self):
265245
cfgfile = self.create_compile_config()
266246
revision = 'a58ebcc701dd' # tag: v3.10.2
@@ -271,8 +251,9 @@ def test_compile(self):
271251
capture=None,
272252
)
273253

274-
@CPYTHON_ONLY
275-
@unittest.skip('way too slow')
254+
@tests.CPYTHON_ONLY
255+
@tests.NON_WINDOWS_ONLY
256+
@tests.SLOW
276257
def test_compile_all(self):
277258
rev1 = '2cd268a3a934' # tag: v3.10.1
278259
rev2 = 'a58ebcc701dd' # tag: v3.10.2
@@ -284,7 +265,8 @@ def test_compile_all(self):
284265
capture=None,
285266
)
286267

287-
@CPYTHON_ONLY
268+
@tests.CPYTHON_ONLY
269+
@tests.NON_WINDOWS_ONLY
288270
@unittest.expectedFailure
289271
def test_upload(self):
290272
url = '<bogus>'
@@ -387,8 +369,7 @@ def test_compare_wrong_version(self):
387369
Skipped 1 benchmarks only in py36.json: telco
388370
389371
Skipped 1 benchmarks only in py3_performance03.json: call_simple
390-
391-
ERROR: Performance versions are different: 1.0.1 != 0.3
372+
ERROR: Performance versions are different (1.0.1 != 0.3)
392373
''').lstrip())
393374

394375
def test_compare_single_value(self):

0 commit comments

Comments
 (0)