|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import importlib.util |
| 4 | + |
| 5 | +import pytest |
| 6 | + |
| 7 | +from codeflash.benchmarking.plugin.plugin import codeflash_benchmark_plugin |
| 8 | + |
| 9 | +PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None |
| 10 | + |
| 11 | +benchmark_options = [ |
| 12 | + ("--benchmark-columns", "store", None, "Benchmark columns"), |
| 13 | + ("--benchmark-group-by", "store", None, "Benchmark group by"), |
| 14 | + ("--benchmark-name", "store", None, "Benchmark name pattern"), |
| 15 | + ("--benchmark-sort", "store", None, "Benchmark sort column"), |
| 16 | + ("--benchmark-json", "store", None, "Benchmark JSON output file"), |
| 17 | + ("--benchmark-save", "store", None, "Benchmark save name"), |
| 18 | + ("--benchmark-warmup", "store", None, "Benchmark warmup"), |
| 19 | + ("--benchmark-warmup-iterations", "store", None, "Benchmark warmup iterations"), |
| 20 | + ("--benchmark-min-time", "store", None, "Benchmark minimum time"), |
| 21 | + ("--benchmark-max-time", "store", None, "Benchmark maximum time"), |
| 22 | + ("--benchmark-min-rounds", "store", None, "Benchmark minimum rounds"), |
| 23 | + ("--benchmark-timer", "store", None, "Benchmark timer"), |
| 24 | + ("--benchmark-calibration-precision", "store", None, "Benchmark calibration precision"), |
| 25 | + ("--benchmark-disable", "store_true", False, "Disable benchmarks"), |
| 26 | + ("--benchmark-skip", "store_true", False, "Skip benchmarks"), |
| 27 | + ("--benchmark-only", "store_true", False, "Only run benchmarks"), |
| 28 | + ("--benchmark-verbose", "store_true", False, "Verbose benchmark output"), |
| 29 | + ("--benchmark-histogram", "store", None, "Benchmark histogram"), |
| 30 | + ("--benchmark-compare", "store", None, "Benchmark compare"), |
| 31 | + ("--benchmark-compare-fail", "store", None, "Benchmark compare fail threshold"), |
| 32 | +] |
| 33 | + |
| 34 | + |
| 35 | +def pytest_configure(config: pytest.Config) -> None: |
| 36 | + """Register the benchmark marker and disable conflicting plugins.""" |
| 37 | + config.addinivalue_line("markers", "benchmark: mark test as a benchmark that should be run with codeflash tracing") |
| 38 | + |
| 39 | + if config.getoption("--codeflash-trace"): |
| 40 | + # When --codeflash-trace is used, ignore all benchmark options by resetting them to defaults |
| 41 | + for option, _, default, _ in benchmark_options: |
| 42 | + option_name = option.replace("--", "").replace("-", "_") |
| 43 | + if hasattr(config.option, option_name): |
| 44 | + setattr(config.option, option_name, default) |
| 45 | + |
| 46 | + if PYTEST_BENCHMARK_INSTALLED: |
| 47 | + config.pluginmanager.set_blocked("pytest_benchmark") |
| 48 | + config.pluginmanager.set_blocked("pytest-benchmark") |
| 49 | + |
| 50 | + |
| 51 | +def pytest_addoption(parser: pytest.Parser) -> None: |
| 52 | + parser.addoption( |
| 53 | + "--codeflash-trace", action="store_true", default=False, help="Enable CodeFlash tracing for benchmarks" |
| 54 | + ) |
| 55 | + # These options are ignored when --codeflash-trace is used |
| 56 | + for option, action, default, help_text in benchmark_options: |
| 57 | + help_suffix = " (ignored when --codeflash-trace is used)" |
| 58 | + parser.addoption(option, action=action, default=default, help=help_text + help_suffix) |
| 59 | + |
| 60 | + |
| 61 | +@pytest.fixture |
| 62 | +def benchmark(request: pytest.FixtureRequest) -> object: |
| 63 | + """Benchmark fixture that works with or without pytest-benchmark installed.""" |
| 64 | + config = request.config |
| 65 | + |
| 66 | + # If --codeflash-trace is enabled, use our implementation |
| 67 | + if config.getoption("--codeflash-trace"): |
| 68 | + return codeflash_benchmark_plugin.Benchmark(request) |
| 69 | + |
| 70 | + # If pytest-benchmark is installed and --codeflash-trace is not enabled, |
| 71 | + # return the normal pytest-benchmark fixture |
| 72 | + if PYTEST_BENCHMARK_INSTALLED: |
| 73 | + from pytest_benchmark.fixture import BenchmarkFixture as BSF # pyright: ignore[reportMissingImports] # noqa: I001, N814 |
| 74 | + |
| 75 | + bs = getattr(config, "_benchmarksession", None) |
| 76 | + if bs and bs.skip: |
| 77 | + pytest.skip("Benchmarks are skipped (--benchmark-skip was used).") |
| 78 | + |
| 79 | + node = request.node |
| 80 | + marker = node.get_closest_marker("benchmark") |
| 81 | + options = dict(marker.kwargs) if marker else {} |
| 82 | + |
| 83 | + if bs: |
| 84 | + return BSF( |
| 85 | + node, |
| 86 | + add_stats=bs.benchmarks.append, |
| 87 | + logger=bs.logger, |
| 88 | + warner=request.node.warn, |
| 89 | + disabled=bs.disabled, |
| 90 | + **dict(bs.options, **options), |
| 91 | + ) |
| 92 | + return lambda func, *args, **kwargs: func(*args, **kwargs) |
| 93 | + |
| 94 | + return lambda func, *args, **kwargs: func(*args, **kwargs) |
0 commit comments