Skip to content

Commit 0191e29

Browse files
authored
Add performance profiling (#923)
1 parent 70d3434 commit 0191e29

File tree

9 files changed

+240
-5
lines changed

9 files changed

+240
-5
lines changed

.github/CONTRIBUTING.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,16 @@ just bench encodings
140140

141141
Omit the benchmark name to list all available benchmarks.
142142

143+
## Profiling
144+
145+
You can run all performance profiling tests with the `test-perf` recipe, for example:
146+
147+
```
148+
just test-perf basic
149+
```
150+
151+
Omit the profiling test name to list all available profiling tests.
152+
143153
## Python Version Management
144154

145155
You can use the `python` variable to specify the version of Python to use for each environment. For example, to run tests with Python 3.12:

.github/workflows/ci.yml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,37 @@ jobs:
168168
use_oidc: true
169169
directory: coverage-files
170170

171+
profile:
172+
name: Run profiling tests
173+
needs:
174+
- test
175+
runs-on: ubuntu-latest
176+
177+
steps:
178+
- name: Checkout code
179+
uses: actions/checkout@v5
180+
181+
- name: Load environment file
182+
uses: ./.github/actions/load-env
183+
184+
- name: Install Python
185+
uses: actions/setup-python@v6
186+
with:
187+
python-version-file: pyproject.toml
188+
189+
- name: Install uv
190+
uses: astral-sh/setup-uv@v7
191+
192+
- name: Install command runner
193+
run: uv tool install rust-just
194+
195+
- name: Install profiling dependencies
196+
run: just env-sync prof
197+
198+
# TODO: set up infra to commit results after each merge and then make tests compare
199+
- name: Run profiling tests
200+
run: just test-perf all --calibrate --rounds 10000
201+
171202
build-wheels:
172203
name: Build wheels on ${{ matrix.os }} for ${{ matrix.archs }}
173204
needs:

.justfile

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ alias pre-commit := prek
5151
[group: "Static Analysis"]
5252
hooks-fix *args: (
5353
env-run "hooks"
54-
"prek run --show-diff-on-failure --all-files"
54+
"prek run --all-files"
5555
args
5656
)
5757
alias hooks := hooks-fix
@@ -60,7 +60,7 @@ alias hooks := hooks-fix
6060
[group: "Static Analysis"]
6161
hooks-check *args: (
6262
env-run "hooks"
63-
"prek run --show-diff-on-failure --all-files --hook-stage manual"
63+
"prek run --all-files --hook-stage manual"
6464
args
6565
)
6666
alias check := hooks-check
@@ -112,13 +112,38 @@ doc-serve: (
112112
"python -c \"import pathlib,webbrowser;webbrowser.open_new_tab(pathlib.Path('docs/build/html/index.html').absolute().as_uri())\""
113113
)
114114

115+
# Run performance tests.
116+
[group: "Profiling"]
117+
test-perf name="" *args: (
118+
env-run "prof"
119+
(
120+
if name == "" {
121+
"python -c \"import os;print('Available tests:', os.linesep.join(['', *sorted(e[5:].split('.')[0] for e in os.listdir('tests/prof/perf') if e.startswith('test_')), 'all (run everything)']))\""
122+
} else {
123+
(
124+
if name =~ "^-" {
125+
error(
126+
"Invalid test name: " + name
127+
)
128+
} else if name == "all" {
129+
"pytest -o testpaths=tests/prof/perf"
130+
} else {
131+
"pytest tests/prof/perf/test_" + name + ".py"
132+
}
133+
)
134+
+ " --benchmark-name short --benchmark-disable-gc"
135+
}
136+
)
137+
args
138+
)
139+
115140
# Run benchmarks.
116141
[group: "Benchmarking"]
117142
bench-run name="" *args: (
118143
env-run "bench"
119144
(
120145
if name == "" {
121-
"python -c \"import os;print(os.linesep.join(sorted(e[6:].split('.')[0] for e in os.listdir('benchmarks') if e.startswith('bench_'))))\""
146+
"python -c \"import os;print('Available benchmarks:', os.linesep.join(['', *sorted(e[6:].split('.')[0] for e in os.listdir('benchmarks') if e.startswith('bench_')), 'all (run everything)']))\""
122147
} else {
123148
"python -m benchmarks.bench_" + name
124149
}
@@ -212,6 +237,8 @@ _env_sync_all: (
212237
_with_env "doc" "sync"
213238
) (
214239
_with_env "bench" "sync"
240+
) (
241+
_with_env "prof" "sync"
215242
)
216243

217244
[private]
@@ -254,7 +281,9 @@ _with_env env action *args:
254281
} else if action == "sync" { \
255282
"sync" \
256283
} else { \
257-
error("Unknown action: " + action) \
284+
error( \
285+
"Unknown action: " + action \
286+
) \
258287
} \
259288
}}{{ \
260289
if rebuild =~ "^(true|1)$" { \
@@ -267,13 +296,17 @@ _with_env env action *args:
267296
"--group test-unit --group test-typing" \
268297
} else if env == "doc" { \
269298
"--group doc" \
299+
} else if env == "prof" { \
300+
"--group prof" \
270301
} else if env == "bench" { \
271302
"--group bench" \
272303
} else if env == "hooks" { \
273304
"--only-group hooks" \
274305
} else if env == "none" { \
275306
"--isolated --no-sync --no-config" \
276307
} else { \
277-
error("Unknown environment: " + env) \
308+
error( \
309+
"Unknown environment: " + env \
310+
) \
278311
} \
279312
}} {{ args }}

pyproject.toml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,12 @@ hooks = [
104104
"prek>=0.2.13",
105105
"ruff==0.14.1",
106106
]
107+
prof = [
108+
"memray; sys_platform != 'win32'",
109+
"pytest",
110+
"pytest-benchmark",
111+
"pytest-memray; sys_platform != 'win32'",
112+
]
107113
test-typing = [
108114
"mypy",
109115
"pyright",

tests/conftest.py

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
import os
2+
import re
3+
import sys
4+
from pathlib import Path
5+
6+
7+
def pytest_addoption(parser):
8+
# Declaring additional command-line options within test suite-specific `conftest.py`
9+
# files is documented as unsupported by pytest. See:
10+
# https://docs.pytest.org/en/stable/reference/reference.html#pytest.hookspec.pytest_addoption.
11+
#
12+
# However, it seems to work in practice with the following limitations:
13+
# - The options only appear in the help text for suites that are configured in
14+
# the default `testpaths` configuration. This is undesirable because we don't
15+
# want a missing argument to trigger heavy tests like performance profiling.
16+
# - The options are parsed differently such that an equals sign is required for
17+
# options with a value rather than the more natural space-separated format.
18+
#
19+
# As a workaround, we parse the command line for options that match the patterns of
20+
# the configurable test suites and add their options to the parser.
21+
configurable_test_suites = {
22+
"tests/prof/perf": add_performance_profiling_options,
23+
}
24+
25+
cwd = Path.cwd()
26+
project_root = Path(__file__).parent.parent
27+
for test_suite_relpath, add_options_func in list(configurable_test_suites.items()):
28+
test_suite_path = project_root.joinpath(test_suite_relpath)
29+
if test_suite_path == cwd or test_suite_path in cwd.parents:
30+
add_options_func(parser)
31+
configurable_test_suites.pop(test_suite_relpath)
32+
break
33+
34+
# Always accept forward slashes and the system separator, escaping the latter in case of backslashes (Windows).
35+
allowed_separators = {"/", re.escape(os.sep)}
36+
separator_pattern = f"[{''.join(sorted(allowed_separators))}]"
37+
configurable_test_suites_patterns = {
38+
re.compile(
39+
rf"\b{test_suite_relpath.replace('/', separator_pattern)}\b"
40+
): add_options_func
41+
for test_suite_relpath, add_options_func in configurable_test_suites.items()
42+
}
43+
44+
for arg in sys.argv:
45+
if not configurable_test_suites_patterns:
46+
break
47+
48+
for pattern, add_options_func in list(
49+
configurable_test_suites_patterns.items()
50+
):
51+
if arg in {"-h", "--help"} or pattern.search(arg):
52+
add_options_func(parser)
53+
configurable_test_suites_patterns.pop(pattern)
54+
break
55+
56+
57+
def add_performance_profiling_options(parser):
58+
group = parser.getgroup("performance profiling")
59+
group.addoption(
60+
"--calibrate",
61+
action="store_true",
62+
help=(
63+
"Override automatic calibration of benchmarks and enable the following options: ",
64+
"--rounds, --warmup-rounds, --iterations",
65+
),
66+
)
67+
group.addoption(
68+
"--rounds",
69+
type=int,
70+
default=1000,
71+
metavar="ROUNDS",
72+
help="Number of rounds for benchmarks (default: 1000)",
73+
)
74+
group.addoption(
75+
"--warmup-rounds",
76+
type=int,
77+
default=50,
78+
metavar="WARMUP_ROUNDS",
79+
help="Number of warmup rounds for benchmarks (default: 50)",
80+
)
81+
group.addoption(
82+
"--iterations",
83+
type=int,
84+
default=10,
85+
metavar="ITERATIONS",
86+
help="Number of iterations in each round of benchmarks (default: 10)",
87+
)
88+
group.addoption(
89+
"--size",
90+
type=int,
91+
default=1000,
92+
metavar="SIZE",
93+
help="Size of the benchmark data (default: 1000)",
94+
)

tests/prof/__init__.py

Whitespace-only changes.

tests/prof/perf/__init__.py

Whitespace-only changes.

tests/prof/perf/conftest.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import pytest
2+
3+
from benchmarks.generate_data import make_filesystem_data
4+
5+
6+
@pytest.fixture
7+
def bench(benchmark, bench_config):
8+
if not bench_config["calibrate"]:
9+
return lambda target, *args, **kwargs: benchmark(target, *args, **kwargs)
10+
11+
return lambda target, *args, **kwargs: benchmark.pedantic(
12+
target,
13+
args=args,
14+
kwargs=kwargs,
15+
rounds=bench_config["rounds"],
16+
iterations=bench_config["iterations"],
17+
warmup_rounds=bench_config["warmup_rounds"],
18+
)
19+
20+
21+
@pytest.fixture
22+
def filesystem_data(bench_config):
23+
return lambda size=bench_config["size"]: make_filesystem_data(size)
24+
25+
26+
@pytest.fixture(scope="class")
27+
def shared_data():
28+
"""
29+
This is used to share data between serially-executed tests within a class.
30+
"""
31+
return {}
32+
33+
34+
@pytest.fixture
35+
def bench_config(request):
36+
return {
37+
"calibrate": request.config.getoption("--calibrate"),
38+
"rounds": request.config.getoption("--rounds"),
39+
"warmup_rounds": request.config.getoption("--warmup-rounds"),
40+
"iterations": request.config.getoption("--iterations"),
41+
"size": request.config.getoption("--size"),
42+
}

tests/prof/perf/test_basic.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import pytest
2+
3+
from benchmarks.bench_validation import bench_msgspec
4+
5+
6+
@pytest.mark.benchmark(group="roundtrip")
7+
class TestRoundtrip:
8+
def test_encode(self, bench, filesystem_data, shared_data):
9+
data = filesystem_data()
10+
encoded = bench(bench_msgspec.encode, data)
11+
shared_data["encoded"] = encoded
12+
13+
def test_decode(self, bench, shared_data):
14+
if shared_data["encoded"] is None:
15+
raise ValueError(
16+
"test_encode must run before test_decode to provide encoded data"
17+
)
18+
19+
bench(bench_msgspec.decode, shared_data["encoded"])

0 commit comments

Comments
 (0)