|
| 1 | +# -*- coding: utf-8 -*- |
1 | 2 | import itertools |
2 | 3 | import logging |
3 | 4 | import os |
@@ -219,6 +220,47 @@ def test_metrics(encoding, monkeypatch): |
219 | 220 |
|
220 | 221 | with override_global_config(dict(health_metrics_enabled=True)): |
221 | 222 | t = Tracer() |
| 223 | + assert t._partial_flush_min_spans == 500 |
| 224 | + statsd_mock = mock.Mock() |
| 225 | + t._writer.dogstatsd = statsd_mock |
| 226 | + assert t._writer._report_metrics |
| 227 | + with mock.patch("ddtrace.internal.writer.log") as log: |
| 228 | + for _ in range(5): |
| 229 | + spans = [] |
| 230 | + for i in range(3000): |
| 231 | + spans.append(t.trace("op")) |
| 232 | + # Since _partial_flush_min_spans is set to 500 we will flush spans in 6 batches |
| 233 | + # each batch will contain 500 spans |
| 234 | + for s in spans: |
| 235 | + s.finish() |
| 236 | + |
| 237 | + t.shutdown() |
| 238 | + log.warning.assert_not_called() |
| 239 | + log.error.assert_not_called() |
| 240 | + |
| 241 | + statsd_mock.distribution.assert_has_calls( |
| 242 | + [ |
| 243 | + mock.call("datadog.tracer.http.sent.bytes", AnyInt()), |
| 244 | + mock.call("datadog.tracer.http.sent.traces", 30), |
| 245 | + mock.call("datadog.tracer.writer.accepted.traces", 30, tags=[]), |
| 246 | + mock.call("datadog.tracer.buffer.accepted.traces", 30, tags=[]), |
| 247 | + mock.call("datadog.tracer.buffer.accepted.spans", 15000, tags=[]), |
| 248 | + mock.call("datadog.tracer.http.requests", 1, tags=[]), |
| 249 | + mock.call("datadog.tracer.http.sent.bytes", AnyInt(), tags=[]), |
| 250 | + ], |
| 251 | + any_order=True, |
| 252 | + ) |
| 253 | + |
| 254 | + |
| 255 | +@allencodings |
| 256 | +def test_metrics_partial_flush_disabled(encoding, monkeypatch): |
| 257 | + monkeypatch.setenv("DD_TRACE_API_VERSION", encoding) |
| 258 | + |
| 259 | + with override_global_config(dict(health_metrics_enabled=True)): |
| 260 | + t = Tracer() |
| 261 | + t.configure( |
| 262 | + partial_flush_enabled=False, |
| 263 | + ) |
222 | 264 | statsd_mock = mock.Mock() |
223 | 265 | t._writer.dogstatsd = statsd_mock |
224 | 266 | assert t._writer._report_metrics |
@@ -248,8 +290,37 @@ def test_metrics(encoding, monkeypatch): |
248 | 290 | @allencodings |
249 | 291 | def test_single_trace_too_large(encoding, monkeypatch): |
250 | 292 | monkeypatch.setenv("DD_TRACE_API_VERSION", encoding) |
| 293 | + # setting writer interval to 5 seconds so that buffer can fit larger traces |
| 294 | + monkeypatch.setenv("DD_TRACE_WRITER_INTERVAL_SECONDS", "5.0") |
251 | 295 |
|
252 | 296 | t = Tracer() |
| 297 | + assert t._partial_flush_enabled is True |
| 298 | + with mock.patch("ddtrace.internal.writer.log") as log: |
| 299 | + key = "a" * 250 |
| 300 | + with t.trace("huge"): |
| 301 | + for i in range(200000): |
| 302 | + with t.trace("operation") as s: |
| 303 | + # Need to make the strings unique so that the v0.5 encoding doesn’t compress the data |
| 304 | + s.set_tag(key + str(i), key + str(i)) |
| 305 | + t.shutdown() |
| 306 | + log.warning.assert_any_call( |
| 307 | + "trace buffer (%s traces %db/%db) cannot fit trace of size %db, dropping", |
| 308 | + AnyInt(), |
| 309 | + AnyInt(), |
| 310 | + AnyInt(), |
| 311 | + AnyInt(), |
| 312 | + ) |
| 313 | + log.error.assert_not_called() |
| 314 | + |
| 315 | + |
| 316 | +@allencodings |
| 317 | +def test_single_trace_too_large_partial_flush_disabled(encoding, monkeypatch): |
| 318 | + monkeypatch.setenv("DD_TRACE_API_VERSION", encoding) |
| 319 | + |
| 320 | + t = Tracer() |
| 321 | + t.configure( |
| 322 | + partial_flush_enabled=False, |
| 323 | + ) |
253 | 324 | with mock.patch("ddtrace.internal.writer.log") as log: |
254 | 325 | with t.trace("huge"): |
255 | 326 | for i in range(200000): |
@@ -717,7 +788,6 @@ def test_partial_flush_log(run_python_code_in_subprocess, encoding, monkeypatch) |
717 | 788 | t = Tracer() |
718 | 789 |
|
719 | 790 | t.configure( |
720 | | - partial_flush_enabled=True, |
721 | 791 | partial_flush_min_spans=partial_flush_min_spans, |
722 | 792 | ) |
723 | 793 |
|
|
0 commit comments