|
1 | | -from __future__ import annotations |
2 | | - |
3 | | -from typing import Callable, Protocol |
| 1 | +from typing import Any, Callable, Optional |
4 | 2 |
|
5 | 3 | import pytest |
6 | 4 |
|
7 | 5 |
|
8 | | -class GroupProtocol(Protocol): |
9 | | - """A protocol for objects with a 'name' attribute.""" |
10 | | - |
11 | | - name: str |
12 | | - |
13 | | - |
14 | | -class CodeFlashBenchmarkCustomPlugin: |
15 | | - @staticmethod |
16 | | - def pytest_plugin_registered(plugin, manager) -> None: # noqa: ANN001 |
17 | | - # Not necessary since run with -p no:benchmark, but just in case |
18 | | - if hasattr(plugin, "name") and plugin.name == "pytest-benchmark": |
19 | | - manager.unregister(plugin) |
20 | | - |
21 | | - @staticmethod |
22 | | - def pytest_configure(config: pytest.Config) -> None: |
23 | | - """Register the benchmark marker.""" |
24 | | - config.addinivalue_line( |
25 | | - "markers", |
26 | | - "benchmark: mark test as a benchmark that should be run without modification if the benchmark fixture is disabled", |
27 | | - ) |
28 | | - |
29 | | - # Benchmark fixture |
| 6 | +@pytest.fixture |
| 7 | +def benchmark(request): # noqa: ANN201, ANN001 |
30 | 8 | class CustomBenchmark: |
31 | | - """A custom benchmark object that mimics pytest-benchmark's interface.""" |
32 | | - |
33 | 9 | def __init__(self) -> None: |
34 | | - self.stats = {} |
| 10 | + self.stats = [] |
35 | 11 |
|
36 | | - def __call__(self, func: Callable, *args, **kwargs): # type: ignore # noqa: ANN002, ANN003, ANN204, PGH003 |
37 | | - """Call the function and return its result without benchmarking.""" |
| 12 | + def __call__(self, func, *args, **kwargs): # noqa: ANN204, ANN001, ANN002, ANN003 |
| 13 | + # Just call the function without measuring anything |
38 | 14 | return func(*args, **kwargs) |
39 | 15 |
|
40 | | - def pedantic( # noqa: ANN201 |
| 16 | + def __getattr__(self, name): # noqa: ANN204, ANN001 |
| 17 | + # Return a no-op callable for any attribute |
| 18 | + return lambda *args, **kwargs: None # noqa: ARG005 |
| 19 | + |
| 20 | + def pedantic( |
41 | 21 | self, |
42 | | - target, # noqa: ANN001 |
43 | | - args, # noqa: ANN001 |
44 | | - kwargs, # noqa: ANN001 |
| 22 | + target: Callable, |
| 23 | + args: tuple = (), |
| 24 | + kwargs: Optional[dict] = None, # noqa: FA100 |
45 | 25 | iterations: int = 1, # noqa: ARG002 |
46 | 26 | rounds: int = 1, # noqa: ARG002 |
47 | 27 | warmup_rounds: int = 0, # noqa: ARG002 |
48 | | - setup=None, # noqa: ANN001 |
49 | | - ): |
| 28 | + setup: Optional[Callable] = None, # noqa: FA100 |
| 29 | + ) -> Any: # noqa: ANN401 |
50 | 30 | """Mimics the pedantic method of pytest-benchmark.""" |
51 | | - if kwargs is None: |
52 | | - kwargs = {} |
53 | 31 | if setup: |
54 | 32 | setup() |
55 | 33 | if kwargs is None: |
56 | 34 | kwargs = {} |
57 | 35 | return target(*args, **kwargs) |
58 | 36 |
|
59 | 37 | @property |
60 | | - def group(self) -> GroupProtocol: |
61 | | - """Return a custom group object.""" |
62 | | - return type("Group", (), {"name": "custom"})() |
| 38 | + def group(self): # noqa: ANN202 |
| 39 | + """Return a dummy group object.""" |
| 40 | + return type("Group", (), {"name": "dummy"})() |
63 | 41 |
|
64 | 42 | @property |
65 | 43 | def name(self) -> str: |
66 | | - """Return a custom name.""" |
67 | | - return "custom_benchmark" |
| 44 | + """Return a dummy name.""" |
| 45 | + return "dummy_benchmark" |
68 | 46 |
|
69 | 47 | @property |
70 | 48 | def fullname(self) -> str: |
71 | | - """Return a custom fullname.""" |
72 | | - return "custom::benchmark" |
| 49 | + """Return a dummy fullname.""" |
| 50 | + return "dummy::benchmark" |
73 | 51 |
|
74 | 52 | @property |
75 | | - def params(self) -> dict: |
| 53 | + def params(self): # noqa: ANN202 |
76 | 54 | """Return empty params.""" |
77 | 55 | return {} |
78 | 56 |
|
79 | 57 | @property |
80 | | - def extra_info(self) -> dict: |
| 58 | + def extra_info(self): # noqa: ANN202 |
81 | 59 | """Return empty extra info.""" |
82 | 60 | return {} |
83 | 61 |
|
84 | | - @staticmethod |
85 | | - @pytest.fixture |
86 | | - def benchmark(request: pytest.FixtureRequest) -> object: |
87 | | - # Check if benchmark fixture is already available (pytest-benchmark is active) |
88 | | - if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"): |
89 | | - try: |
90 | | - return request.getfixturevalue("benchmark") |
91 | | - except (pytest.FixtureLookupError, AttributeError): |
92 | | - pass |
93 | | - return CodeFlashBenchmarkCustomPlugin.CustomBenchmark(request) |
94 | | - |
95 | | - |
96 | | -codeflash_benchmark_plugin = CodeFlashBenchmarkCustomPlugin() |
| 62 | + # Check if benchmark fixture is already available (pytest-benchmark is active) |
| 63 | + if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"): |
| 64 | + try: |
| 65 | + # Try to get the real benchmark fixture |
| 66 | + return request.getfixturevalue("benchmark") |
| 67 | + except (pytest.FixtureLookupError, AttributeError): |
| 68 | + pass |
| 69 | + return CustomBenchmark() |
0 commit comments