Skip to content

Commit 43f8b39

Browse files
authored
[tests] Add benchmarks (#952)
* Move to pytest-benchmarks and add to CI * Add tests for more tracer functionality * Save benchmark results to CircleCI artifact * Remove outdated documentation
1 parent d2377d6 commit 43f8b39

File tree

4 files changed

+93
-71
lines changed

4 files changed

+93
-71
lines changed

.circleci/config.yml

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -675,6 +675,24 @@ jobs:
675675
- *persist_to_workspace_step
676676
- *save_cache_step
677677

678+
benchmarks:
679+
docker:
680+
- *test_runner
681+
resource_class: *resource_class
682+
steps:
683+
- checkout
684+
- *restore_cache_step
685+
- run:
686+
command: |
687+
mkdir -p /tmp/test-reports
688+
tox -e 'benchmarks-{py27,py34,py35,py36,py37}' --result-json /tmp/benchmarks.results -- --benchmark-storage=file:///tmp/test-reports/ --benchmark-autosave
689+
- store_test_results:
690+
path: /tmp/test-reports
691+
- store_artifacts:
692+
path: /tmp/test-reports
693+
- *persist_to_workspace_step
694+
- *save_cache_step
695+
678696
deploy_dev:
679697
# build the master branch releasing development docs and wheels
680698
docker:
@@ -806,6 +824,9 @@ workflows:
806824
- algoliasearch:
807825
requires:
808826
- flake8
827+
- benchmarks:
828+
requires:
829+
- flake8
809830
- boto:
810831
requires:
811832
- flake8
@@ -951,6 +972,7 @@ workflows:
951972
- aiopg
952973
- asyncio
953974
- algoliasearch
975+
- benchmarks
954976
- boto
955977
- bottle
956978
- cassandra

README.md

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,3 @@ the CLI can be found at https://circleci.com/docs/2.0/local-cli/.
8888
After installing the `circleci` CLI, you can run jobs by name. For example:
8989

9090
$ circleci build --job django
91-
92-
93-
### Benchmarking
94-
95-
When two or more approaches must be compared, please write a benchmark in the
96-
[benchmark.py](tests/benchmark.py) module so that we can measure the efficiency
97-
of the algorithm. To run your benchmark, just:
98-
99-
$ python -m tests.benchmark

tests/benchmark.py

Lines changed: 68 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -1,86 +1,92 @@
1-
import timeit
2-
31
from ddtrace import Tracer
2+
import pytest
43

54
from .test_tracer import DummyWriter
6-
from os import getpid
7-
85

9-
REPEAT = 10
10-
NUMBER = 10000
116

7+
@pytest.fixture
8+
def tracer():
9+
tracer = Tracer()
10+
tracer.writer = DummyWriter()
11+
return tracer
1212

13-
def trace_error(tracer):
14-
# explicit vars
15-
with tracer.trace('a', service='s', resource='r', span_type='t'):
16-
1 / 0
1713

14+
def test_tracer_context(benchmark, tracer):
15+
def func(tracer):
16+
with tracer.trace('a', service='s', resource='r', span_type='t'):
17+
pass
1818

19-
def benchmark_tracer_trace():
20-
tracer = Tracer()
21-
tracer.writer = DummyWriter()
19+
benchmark(func, tracer)
2220

23-
# testcase
24-
def trace(tracer):
25-
# explicit vars
26-
with tracer.trace('a', service='s', resource='r', span_type='t') as s:
27-
s.set_tag('a', 'b')
28-
s.set_tag('b', 1)
29-
with tracer.trace('another.thing'):
30-
pass
31-
with tracer.trace('another.thing'):
32-
pass
33-
34-
# benchmark
35-
print('## tracer.trace() benchmark: {} loops ##'.format(NUMBER))
36-
timer = timeit.Timer(lambda: trace(tracer))
37-
result = timer.repeat(repeat=REPEAT, number=NUMBER)
38-
print('- trace execution time: {:8.6f}'.format(min(result)))
39-
40-
41-
def benchmark_tracer_wrap():
42-
tracer = Tracer()
43-
tracer.writer = DummyWriter()
4421

45-
# testcase
22+
def test_tracer_wrap_staticmethod(benchmark, tracer):
4623
class Foo(object):
4724
@staticmethod
4825
@tracer.wrap()
49-
def s():
26+
def func():
5027
return 0
5128

29+
f = Foo()
30+
benchmark(f.func)
31+
32+
33+
def test_tracer_wrap_classmethod(benchmark, tracer):
34+
class Foo(object):
5235
@classmethod
5336
@tracer.wrap()
54-
def c(cls):
37+
def func(cls):
5538
return 0
5639

40+
f = Foo()
41+
benchmark(f.func)
42+
43+
44+
def test_tracer_wrap_instancemethod(benchmark, tracer):
45+
class Foo(object):
5746
@tracer.wrap()
58-
def m(self):
47+
def func(self):
5948
return 0
6049

6150
f = Foo()
51+
benchmark(f.func)
52+
53+
54+
def test_tracer_start_span(benchmark, tracer):
55+
benchmark(tracer.start_span, 'benchmark')
56+
57+
58+
def test_tracer_start_finish_span(benchmark, tracer):
59+
def func(tracer):
60+
s = tracer.start_span('benchmark')
61+
s.finish()
62+
63+
benchmark(func, tracer)
64+
65+
66+
def test_trace_simple_trace(benchmark, tracer):
67+
def func(tracer):
68+
with tracer.trace('parent'):
69+
for i in range(5):
70+
with tracer.trace('child') as c:
71+
c.set_tag('i', i)
72+
73+
benchmark(func, tracer)
74+
75+
76+
def test_tracer_large_trace(benchmark, tracer):
77+
import random
78+
79+
# generate trace with 1024 spans
80+
@tracer.wrap()
81+
def func(tracer, level=0):
82+
span = tracer.current_span()
83+
84+
# do some work
85+
num = random.randint(1, 10)
86+
span.set_tag('num', num)
87+
88+
if level < 10:
89+
func(tracer, level+1)
90+
func(tracer, level+1)
6291

63-
# benchmark
64-
print('## tracer.trace() wrapper benchmark: {} loops ##'.format(NUMBER))
65-
timer = timeit.Timer(f.s)
66-
result = timer.repeat(repeat=REPEAT, number=NUMBER)
67-
print('- staticmethod execution time: {:8.6f}'.format(min(result)))
68-
timer = timeit.Timer(f.c)
69-
result = timer.repeat(repeat=REPEAT, number=NUMBER)
70-
print('- classmethod execution time: {:8.6f}'.format(min(result)))
71-
timer = timeit.Timer(f.m)
72-
result = timer.repeat(repeat=REPEAT, number=NUMBER)
73-
print('- method execution time: {:8.6f}'.format(min(result)))
74-
75-
76-
def benchmark_getpid():
77-
timer = timeit.Timer(getpid)
78-
result = timer.repeat(repeat=REPEAT, number=NUMBER)
79-
print('## getpid wrapper benchmark: {} loops ##'.format(NUMBER))
80-
print('- getpid execution time: {:8.6f}'.format(min(result)))
81-
82-
83-
if __name__ == '__main__':
84-
benchmark_tracer_wrap()
85-
benchmark_tracer_trace()
86-
benchmark_getpid()
92+
benchmark(func, tracer)

tox.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ envlist =
118118
py37-opentracer_gevent-gevent{13,14}
119119
# Unit tests: pytest based test suite that do not require any additional dependency
120120
unit_tests-{py27,py34,py35,py36,py37}
121+
benchmarks-{py27,py34,py35,py36,py37}
121122

122123
[testenv]
123124
usedevelop = True
@@ -134,6 +135,7 @@ deps =
134135
!ddtracerun: wrapt
135136
!msgpack03-!msgpack04-!msgpack05-!ddtracerun: msgpack-python
136137
pytest>=3
138+
pytest-benchmark
137139
opentracing
138140
psutil
139141
# test dependencies installed in all envs
@@ -389,6 +391,7 @@ commands =
389391
test_logging: pytest {posargs} tests/contrib/logging/
390392
# Unit tests: pytest based test suite that do not require any additional dependency.
391393
unit_tests: pytest {posargs} tests/unit
394+
benchmarks: pytest --benchmark-only {posargs} tests/benchmark.py
392395

393396
setenv =
394397
DJANGO_SETTINGS_MODULE = app.settings

0 commit comments

Comments
 (0)