Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 60 additions & 36 deletions src/perf_unit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time

import statistics
from functools import wraps
import time
from concurrent.futures import ThreadPoolExecutor

from contextlib import contextmanager
from functools import wraps
from unittest import TestCase


Expand Down Expand Up @@ -60,16 +60,58 @@ def run_single_iteration(
return duration()


def perf_method_decorator(
method,
how_many_threads: int = 30,
total_number_of_method_executions: int = 100,
upper_median_threashold_in_milliseconds: int = 500,
percentiles: tuple = (10, 50, 75, 90, 95, 99),
):
@wraps(method)
def wrapper(*method_args, **method_kwargs):
futures = []
with ThreadPoolExecutor(how_many_threads) as executor:
for _ in range(total_number_of_method_executions):
futures.append(
executor.submit(
run_single_iteration,
method,
*method_args,
**method_kwargs,
)
)

execution_times = tuple([future.result() for future in futures])

median_time = statistics.median(execution_times)

print(
f"Percentile report for {method.__name__} with {len(execution_times)} calls:"
)
for p in percentiles:
print(
f" {p}th percentile: {statistics.quantiles(execution_times, n=100)[p - 1]} milliseconds"
)

assert (
median_time < upper_median_threashold_in_milliseconds
), f"Median execution time is too high: {median_time} milliseconds"

return wrapper


def perf_unit_test_class(
*args,
how_many_threads: int = 30,
total_number_of_method_executions: int = 100,
upper_median_threashold_in_milliseconds: int = 500,
percentiles: tuple = (10, 50, 75, 90, 95, 99),
**kwargs,
):
"""This class decorator converts all test methods in a unit test class into performance tests.

It will run the method repeatedly, with a given number of concurrent threads and then analyzed the methods response time.
It will run the method repeatedly,
with a given number of concurrent threads and then analyzed the methods response time.


Args:
Expand All @@ -80,36 +122,6 @@ def perf_unit_test_class(
"""

def modify_test_class(cls):
def wrapper(method):
@wraps(method)
def wrapped_method(*args, **kwargs):
futures = []
with ThreadPoolExecutor(how_many_threads) as executor:
for _ in range(total_number_of_method_executions):
futures.append(
executor.submit(
run_single_iteration, method, *args, **kwargs
)
)

execution_times = tuple([future.result() for future in futures])

median_time = statistics.median(execution_times)

print(
f"Percentile report for {method.__name__} with {len(execution_times)} calls:"
)
for p in percentiles:
print(
f" {p}th percentile: {statistics.quantiles(execution_times, n=100)[p-1]} milliseconds"
)

assert (
median_time < upper_median_threashold_in_milliseconds
), f"Median execution time is too high: {median_time} milliseconds"

return wrapped_method

if not issubclass(cls, TestCase):
raise NotATestCaseClass(
f"the given class `{cls.__name__}` is not a subclass of `unittest.TestCase`"
Expand All @@ -118,11 +130,23 @@ def wrapped_method(*args, **kwargs):
for attr in dir(cls):
if attr.startswith("test_"):
original_method = getattr(cls, attr)
setattr(cls, attr, wrapper(original_method))
setattr(
cls,
attr,
perf_method_decorator(
original_method,
how_many_threads,
total_number_of_method_executions,
upper_median_threashold_in_milliseconds,
percentiles,
),
)

return cls

if len(args) == 1:
# The decorator has been used without arguments
if len(args) == 1 and callable(args[0]) and not kwargs:
return modify_test_class(args[0])

# The decorator has been used with arguments
return modify_test_class