Skip to content

Commit c92da49

Browse files
committed
works
1 parent 5b68810 commit c92da49

File tree

3 files changed

+46
-130
lines changed

3 files changed

+46
-130
lines changed

code_to_optimize/tests/pytest/benchmarks/test_benchmark_bubble_sort.py

Lines changed: 1 addition & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,73 +1,7 @@
1-
from typing import Callable, Any
2-
31
import pytest
4-
from code_to_optimize.bubble_sort import sorter
5-
6-
7-
class DummyBenchmark:
8-
"""A dummy benchmark object that mimics pytest-benchmark's interface."""
9-
10-
def __init__(self):
11-
self.stats = {}
12-
13-
def __call__(self, func: Callable, *args, **kwargs) -> Any:
14-
"""Call the function and return its result without benchmarking."""
15-
return func(*args, **kwargs)
16-
17-
def pedantic(self, target: Callable, args: tuple = (), kwargs: dict = None,
18-
iterations: int = 1, rounds: int = 1, warmup_rounds: int = 0,
19-
setup: Callable = None) -> Any:
20-
"""Mimics the pedantic method of pytest-benchmark."""
21-
if setup:
22-
setup()
23-
if kwargs is None:
24-
kwargs = {}
25-
return target(*args, **kwargs)
26-
27-
@property
28-
def group(self):
29-
"""Return a dummy group object."""
30-
return type('Group', (), {'name': 'dummy'})()
312

32-
@property
33-
def name(self):
34-
"""Return a dummy name."""
35-
return "dummy_benchmark"
36-
37-
@property
38-
def fullname(self):
39-
"""Return a dummy fullname."""
40-
return "dummy::benchmark"
41-
42-
@property
43-
def params(self):
44-
"""Return empty params."""
45-
return {}
46-
47-
@property
48-
def extra_info(self):
49-
"""Return empty extra info."""
50-
return {}
51-
52-
53-
@pytest.fixture
54-
def benchmark(request):
55-
"""
56-
Provide a benchmark fixture that works whether pytest-benchmark is installed or not.
57-
58-
When pytest-benchmark is disabled with '-p no:benchmark', this provides a dummy
59-
implementation that allows tests to run without modification.
60-
"""
61-
# Check if benchmark fixture is already available (pytest-benchmark is active)
62-
if 'benchmark' in request.fixturenames and hasattr(request, '_fixturemanager'):
63-
try:
64-
# Try to get the real benchmark fixture
65-
return request.getfixturevalue('benchmark')
66-
except (pytest.FixtureLookupError, AttributeError):
67-
pass
3+
from code_to_optimize.bubble_sort import sorter
684

69-
# Return dummy benchmark if real one is not available
70-
return DummyBenchmark()
715

726
def test_sort(benchmark):
737
result = benchmark(sorter, list(reversed(range(500))))
Lines changed: 33 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,96 +1,69 @@
1-
from __future__ import annotations
2-
3-
from typing import Callable, Protocol
1+
from typing import Any, Callable, Optional
42

53
import pytest
64

75

8-
class GroupProtocol(Protocol):
9-
"""A protocol for objects with a 'name' attribute."""
10-
11-
name: str
12-
13-
14-
class CodeFlashBenchmarkCustomPlugin:
15-
@staticmethod
16-
def pytest_plugin_registered(plugin, manager) -> None: # noqa: ANN001
17-
# Not necessary since run with -p no:benchmark, but just in case
18-
if hasattr(plugin, "name") and plugin.name == "pytest-benchmark":
19-
manager.unregister(plugin)
20-
21-
@staticmethod
22-
def pytest_configure(config: pytest.Config) -> None:
23-
"""Register the benchmark marker."""
24-
config.addinivalue_line(
25-
"markers",
26-
"benchmark: mark test as a benchmark that should be run without modification if the benchmark fixture is disabled",
27-
)
28-
29-
# Benchmark fixture
6+
@pytest.fixture
7+
def benchmark(request): # noqa: ANN201, ANN001
308
class CustomBenchmark:
31-
"""A custom benchmark object that mimics pytest-benchmark's interface."""
32-
339
def __init__(self) -> None:
34-
self.stats = {}
10+
self.stats = []
3511

36-
def __call__(self, func: Callable, *args, **kwargs): # type: ignore # noqa: ANN002, ANN003, ANN204, PGH003
37-
"""Call the function and return its result without benchmarking."""
12+
def __call__(self, func, *args, **kwargs): # noqa: ANN204, ANN001, ANN002, ANN003
13+
# Just call the function without measuring anything
3814
return func(*args, **kwargs)
3915

40-
def pedantic( # noqa: ANN201
16+
def __getattr__(self, name): # noqa: ANN204, ANN001
17+
# Return a no-op callable for any attribute
18+
return lambda *args, **kwargs: None # noqa: ARG005
19+
20+
def pedantic(
4121
self,
42-
target, # noqa: ANN001
43-
args, # noqa: ANN001
44-
kwargs, # noqa: ANN001
22+
target: Callable,
23+
args: tuple = (),
24+
kwargs: Optional[dict] = None, # noqa: FA100
4525
iterations: int = 1, # noqa: ARG002
4626
rounds: int = 1, # noqa: ARG002
4727
warmup_rounds: int = 0, # noqa: ARG002
48-
setup=None, # noqa: ANN001
49-
):
28+
setup: Optional[Callable] = None, # noqa: FA100
29+
) -> Any: # noqa: ANN401
5030
"""Mimics the pedantic method of pytest-benchmark."""
51-
if kwargs is None:
52-
kwargs = {}
5331
if setup:
5432
setup()
5533
if kwargs is None:
5634
kwargs = {}
5735
return target(*args, **kwargs)
5836

5937
@property
60-
def group(self) -> GroupProtocol:
61-
"""Return a custom group object."""
62-
return type("Group", (), {"name": "custom"})()
38+
def group(self): # noqa: ANN202
39+
"""Return a dummy group object."""
40+
return type("Group", (), {"name": "dummy"})()
6341

6442
@property
6543
def name(self) -> str:
66-
"""Return a custom name."""
67-
return "custom_benchmark"
44+
"""Return a dummy name."""
45+
return "dummy_benchmark"
6846

6947
@property
7048
def fullname(self) -> str:
71-
"""Return a custom fullname."""
72-
return "custom::benchmark"
49+
"""Return a dummy fullname."""
50+
return "dummy::benchmark"
7351

7452
@property
75-
def params(self) -> dict:
53+
def params(self): # noqa: ANN202
7654
"""Return empty params."""
7755
return {}
7856

7957
@property
80-
def extra_info(self) -> dict:
58+
def extra_info(self): # noqa: ANN202
8159
"""Return empty extra info."""
8260
return {}
8361

84-
@staticmethod
85-
@pytest.fixture
86-
def benchmark(request: pytest.FixtureRequest) -> object:
87-
# Check if benchmark fixture is already available (pytest-benchmark is active)
88-
if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"):
89-
try:
90-
return request.getfixturevalue("benchmark")
91-
except (pytest.FixtureLookupError, AttributeError):
92-
pass
93-
return CodeFlashBenchmarkCustomPlugin.CustomBenchmark(request)
94-
95-
96-
codeflash_benchmark_plugin = CodeFlashBenchmarkCustomPlugin()
62+
# Check if benchmark fixture is already available (pytest-benchmark is active)
63+
if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"):
64+
try:
65+
# Try to get the real benchmark fixture
66+
return request.getfixturevalue("benchmark")
67+
except (pytest.FixtureLookupError, AttributeError):
68+
pass
69+
return CustomBenchmark()

codeflash/verification/test_runner.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,10 @@ def run_behavioral_tests(
7373
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]
7474

7575
pytest_test_env = test_env.copy()
76-
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
76+
pytest_test_env["PYTEST_PLUGINS"] = [
77+
"codeflash.verification.pytest_plugin",
78+
"codeflash.benchmarking.plugin.custom_plugin",
79+
]
7780

7881
if enable_coverage:
7982
coverage_database_file, coverage_config_file = prepare_coverage_files()
@@ -191,7 +194,10 @@ def run_line_profile_tests(
191194
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
192195
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]
193196
pytest_test_env = test_env.copy()
194-
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
197+
pytest_test_env["PYTEST_PLUGINS"] = [
198+
"codeflash.verification.pytest_plugin",
199+
"codeflash.benchmarking.plugin.custom_pytest_plugin",
200+
]
195201
blocklist_args = [f"-p no:{plugin}" for plugin in BENCHMARKING_BLOCKLISTED_PLUGINS]
196202
pytest_test_env["LINE_PROFILE"] = "1"
197203
results = execute_test_subprocess(
@@ -252,7 +258,10 @@ def run_benchmarking_tests(
252258
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
253259
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]
254260
pytest_test_env = test_env.copy()
255-
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
261+
pytest_test_env["PYTEST_PLUGINS"] = [
262+
"codeflash.verification.pytest_plugin",
263+
"codeflash.benchmarking.plugin.custom_pytest_plugin",
264+
]
256265
blocklist_args = [f"-p no:{plugin}" for plugin in BENCHMARKING_BLOCKLISTED_PLUGINS]
257266
results = execute_test_subprocess(
258267
pytest_cmd_list + pytest_args + blocklist_args + result_args + test_files,

0 commit comments

Comments
 (0)