diff --git a/src/pytest_codspeed/instruments/__init__.py b/src/pytest_codspeed/instruments/__init__.py index fb264f1..aaeaf81 100644 --- a/src/pytest_codspeed/instruments/__init__.py +++ b/src/pytest_codspeed/instruments/__init__.py @@ -31,8 +31,8 @@ def measure( name: str, uri: str, fn: Callable[..., T], - *args: tuple, - **kwargs: dict[str, Any], + *args: Any, + **kwargs: Any, ) -> T: ... @abstractmethod diff --git a/src/pytest_codspeed/instruments/valgrind.py b/src/pytest_codspeed/instruments/valgrind.py index 71b1542..c5be4c1 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/src/pytest_codspeed/instruments/valgrind.py @@ -53,8 +53,8 @@ def measure( name: str, uri: str, fn: Callable[..., T], - *args: tuple, - **kwargs: dict[str, Any], + *args: Any, + **kwargs: Any, ) -> T: self.benchmark_count += 1 diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index 2423240..b6076e6 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -188,8 +188,8 @@ def measure( name: str, uri: str, fn: Callable[..., T], - *args: tuple, - **kwargs: dict[str, Any], + *args: Any, + **kwargs: Any, ) -> T: benchmark_config = BenchmarkConfig.from_codspeed_config_and_marker_data( self.config, marker_options diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py index 24e0401..afd9a41 100644 --- a/src/pytest_codspeed/plugin.py +++ b/src/pytest_codspeed/plugin.py @@ -267,7 +267,7 @@ def wrap_runtest( fn: Callable[..., T], ) -> Callable[..., T]: @functools.wraps(fn) - def wrapped(*args: tuple, **kwargs: dict[str, Any]) -> T: + def wrapped(*args: Any, **kwargs: Any) -> T: return _measure(plugin, node, config, None, fn, args, kwargs) return wrapped @@ -329,7 +329,7 @@ def __init__(self, request: pytest.FixtureRequest): self._called = False def __call__( - self, target: Callable[..., T], *args: tuple, **kwargs: dict[str, Any] + self, target: Callable[..., T], *args: Any, **kwargs: Any ) -> T: if self._called: raise RuntimeError("The benchmark fixture can only be used once per test")