diff --git a/src/perf_unit/__init__.py b/src/perf_unit/__init__.py index dc332eb..9791917 100644 --- a/src/perf_unit/__init__.py +++ b/src/perf_unit/__init__.py @@ -19,12 +19,12 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import time + import statistics -from functools import wraps +import time from concurrent.futures import ThreadPoolExecutor - from contextlib import contextmanager +from functools import wraps from unittest import TestCase @@ -60,16 +60,58 @@ def run_single_iteration( return duration() +def perf_method_decorator( + method, + how_many_threads: int = 30, + total_number_of_method_executions: int = 100, + upper_median_threashold_in_milliseconds: int = 500, + percentiles: tuple = (10, 50, 75, 90, 95, 99), +): + @wraps(method) + def wrapper(*method_args, **method_kwargs): + futures = [] + with ThreadPoolExecutor(how_many_threads) as executor: + for _ in range(total_number_of_method_executions): + futures.append( + executor.submit( + run_single_iteration, + method, + *method_args, + **method_kwargs, + ) + ) + + execution_times = tuple([future.result() for future in futures]) + + median_time = statistics.median(execution_times) + + print( + f"Percentile report for {method.__name__} with {len(execution_times)} calls:" + ) + for p in percentiles: + print( + f" {p}th percentile: {statistics.quantiles(execution_times, n=100)[p - 1]} milliseconds" + ) + + assert ( + median_time < upper_median_threashold_in_milliseconds + ), f"Median execution time is too high: {median_time} milliseconds" + + return wrapper + + def perf_unit_test_class( *args, how_many_threads: int = 30, total_number_of_method_executions: int = 100, upper_median_threashold_in_milliseconds: int = 500, percentiles: tuple = (10, 50, 75, 90, 95, 99), + **kwargs, ): """This class decorator converts all test methods in a unit test class into performance tests. - It will run the method repeatedly, with a given number of concurrent threads and then analyzed the methods response time. + It will run the method repeatedly, + with a given number of concurrent threads and then analyzed the methods response time. Args: @@ -80,36 +122,6 @@ def perf_unit_test_class( """ def modify_test_class(cls): - def wrapper(method): - @wraps(method) - def wrapped_method(*args, **kwargs): - futures = [] - with ThreadPoolExecutor(how_many_threads) as executor: - for _ in range(total_number_of_method_executions): - futures.append( - executor.submit( - run_single_iteration, method, *args, **kwargs - ) - ) - - execution_times = tuple([future.result() for future in futures]) - - median_time = statistics.median(execution_times) - - print( - f"Percentile report for {method.__name__} with {len(execution_times)} calls:" - ) - for p in percentiles: - print( - f" {p}th percentile: {statistics.quantiles(execution_times, n=100)[p-1]} milliseconds" - ) - - assert ( - median_time < upper_median_threashold_in_milliseconds - ), f"Median execution time is too high: {median_time} milliseconds" - - return wrapped_method - if not issubclass(cls, TestCase): raise NotATestCaseClass( f"the given class `{cls.__name__}` is not a subclass of `unittest.TestCase`" @@ -118,11 +130,23 @@ def wrapped_method(*args, **kwargs): for attr in dir(cls): if attr.startswith("test_"): original_method = getattr(cls, attr) - setattr(cls, attr, wrapper(original_method)) + setattr( + cls, + attr, + perf_method_decorator( + original_method, + how_many_threads, + total_number_of_method_executions, + upper_median_threashold_in_milliseconds, + percentiles, + ), + ) return cls - if len(args) == 1: + # The decorator has been used without arguments + if len(args) == 1 and callable(args[0]) and not kwargs: return modify_test_class(args[0]) + # The decorator has been used with arguments return modify_test_class