|
37 | 37 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
38 | 38 | # SOFTWARE.
|
39 | 39 |
|
| 40 | +import _io |
40 | 41 | import argparse
|
| 42 | +import os |
41 | 43 | import sys
|
42 | 44 | from time import time
|
43 | 45 |
|
| 46 | +_HRULE = '-'.join(['' for i in range(120)]) |
| 47 | +ATTR_WARMUP = '__warmup__' |
| 48 | +ATTR_BENCHMARK = '__benchmark__' |
44 | 49 |
|
45 |
| -def _get_duration_message(i, name, duration): |
46 |
| - return "iteration=%s, name=%s, duration=%s".format(i, name, duration) |
47 | 50 |
|
| 51 | +class BenchRunner(object): |
| 52 | + def __init__(self, bench_file, bench_args=None, iterations=1, verbose=False): |
| 53 | + if bench_args is None: |
| 54 | + bench_args = [] |
| 55 | + self.bench_module = BenchRunner.get_bench_module(bench_file) |
| 56 | + self.bench_args = bench_args |
| 57 | + self.verbose = verbose |
| 58 | + if isinstance(iterations, (list, tuple)): |
| 59 | + iterations = iterations[0] |
| 60 | + if isinstance(iterations, str): |
| 61 | + iterations = int(iterations) |
| 62 | + self.iterations = iterations |
48 | 63 |
|
49 |
| -def benchmark(name, iterations=1): |
50 |
| - def fnbenchmark(func): |
51 |
| - def wrapper(*args, **kwargs): |
52 |
| - for i in range(iterations): |
53 |
| - start = time() |
54 |
| - func(*args, **kwargs) |
55 |
| - duration = "%.3f\n" % (time() - start) |
56 |
| - print(_get_duration_message(i, name, duration)) |
57 |
| - return wrapper |
58 |
| - return fnbenchmark |
| 64 | + @staticmethod |
| 65 | + def get_bench_module(bench_file): |
| 66 | + name = bench_file.rpartition("/")[2].partition(".")[0].replace('.py', '') |
| 67 | + directory = bench_file.rpartition("/")[0] |
| 68 | + pkg = [] |
| 69 | + while any(f.endswith("__init__.py") for f in os.listdir(directory)): |
| 70 | + directory, slash, postfix = directory.rpartition("/") |
| 71 | + pkg.insert(0, postfix) |
| 72 | + |
| 73 | + if pkg: |
| 74 | + sys.path.insert(0, directory) |
| 75 | + bench_module = __import__(".".join(pkg + [name])) |
| 76 | + for p in pkg[1:]: |
| 77 | + bench_module = getattr(bench_module, p) |
| 78 | + bench_module = getattr(bench_module, name) |
| 79 | + return bench_module |
| 80 | + |
| 81 | + else: |
| 82 | + bench_module = type(sys)(name, bench_file) |
| 83 | + with _io.FileIO(bench_file, "r") as f: |
| 84 | + bench_module.__file__ = bench_file |
| 85 | + exec(compile(f.readall(), bench_file, "exec"), bench_module.__dict__) |
| 86 | + return bench_module |
59 | 87 |
|
| 88 | + def _get_attr(self, attr_name): |
| 89 | + if hasattr(self.bench_module, attr_name): |
| 90 | + return getattr(self.bench_module, attr_name) |
60 | 91 |
|
61 |
| -def _call_if_defined(obj, attr_name): |
62 |
| - if hasattr(obj, attr_name): |
63 |
| - attr = getattr(obj, attr_name) |
64 |
| - if hasattr(attr, '__call__'): |
| 92 | + def _call_attr(self, attr_name): |
| 93 | + attr = self._get_attr(attr_name) |
| 94 | + if attr and hasattr(attr, '__call__'): |
65 | 95 | attr()
|
66 | 96 |
|
| 97 | + def run(self): |
| 98 | + if self.verbose: |
| 99 | + print(_HRULE) |
| 100 | + print(self.bench_module.__name__) |
| 101 | + print(_HRULE) |
| 102 | + |
| 103 | + print("### warming up ... ") |
| 104 | + self._call_attr(ATTR_WARMUP) |
| 105 | + print("### running benchmark ... ") |
| 106 | + |
| 107 | + bench_func = self._get_attr(ATTR_BENCHMARK) |
| 108 | + if bench_func and hasattr(bench_func, '__call__'): |
| 109 | + for i in range(self.iterations): |
| 110 | + start = time() |
| 111 | + bench_func(*self.bench_args) |
| 112 | + duration = "%.3f\n" % (time() - start) |
| 113 | + print("### iteration={}, name={}, duration={}".format(i, self.bench_module.__name__, duration)) |
67 | 114 |
|
68 |
| -ATTR_SETUP = '__setup__' |
69 |
| -ATTR_WARMUP = '__warmup__' |
70 |
| -ATTR_BENCHMARK = '__benchmark__' |
71 | 115 |
|
| 116 | +def run_benchmark(prog, args): |
| 117 | + parser = argparse.ArgumentParser(prog=prog, description="Run specified benchmark.") |
| 118 | + parser.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") |
| 119 | + parser.add_argument("-i", "--iterations", help="The number of iterations top run each benchmark.", default=1) |
| 120 | + parser.add_argument("bench_file", metavar='BENCH', help="Path to the benchmark to execute.", nargs=1) |
| 121 | + parser.add_argument("bench_args", metavar='ARGS', help="Path to the benchmarks to execute.", nargs='*', default=None) |
72 | 122 |
|
73 |
| -def run_benchmark(name): |
74 |
| - parser = argparse.ArgumentParser(description="Run specified benchmark.") |
75 |
| - parser.add_argument("-i", "--iterations", help="Number of iterations.", type=int) |
| 123 | + args = parser.parse_args(args) |
| 124 | + BenchRunner(args.bench_file[0], bench_args=args.bench_args, iterations=args.iterations, verbose=args.verbose).run() |
76 | 125 |
|
77 |
| - args = parser.parse_args() |
78 |
| - current_module = sys.modules[__name__] |
79 |
| - _call_if_defined(current_module, ATTR_SETUP) |
80 |
| - _call_if_defined(current_module, ATTR_WARMUP) |
81 |
| - bench = getattr(current_module, ATTR_BENCHMARK) |
82 |
| - if not bench: |
83 |
| - raise ValueError('%s not defined for %s'.format(ATTR_BENCHMARK, name)) |
84 | 126 |
|
85 |
| - benchmark(name, iterations=args.iterations)(bench)() |
| 127 | +if __name__ == '__main__': |
| 128 | + run_benchmark(sys.argv[0], sys.argv[1:]) |
0 commit comments