|
20 | 20 | from concurrent.futures import ThreadPoolExecutor |
21 | 21 | from unittest.mock import Mock |
22 | 22 |
|
| 23 | +import pytest |
| 24 | + |
23 | 25 | from opentelemetry.sdk._logs import ( |
24 | 26 | LogData, |
25 | 27 | LogRecord, |
26 | 28 | ) |
27 | 29 | from opentelemetry.sdk._logs.export import ( |
28 | | - InMemoryLogExporter, |
| 30 | + BatchLogRecordProcessor, |
29 | 31 | ) |
30 | | -from opentelemetry.sdk._shared_internal import BatchProcessor |
31 | 32 | from opentelemetry.sdk.util.instrumentation import InstrumentationScope |
32 | 33 |
|
33 | 34 | EMPTY_LOG = LogData( |
|
36 | 37 | ) |
37 | 38 |
|
38 | 39 |
|
39 | | -class TestBatchProcessor(unittest.TestCase): |
40 | | - def test_logs_exported_once_batch_size_reached(self): |
| 40 | +# BatchLogRecodpRocessor initializes / uses BatchProcessor. |
| 41 | +@pytest.mark.parametrize( |
| 42 | + "batch_processor_class,telemetry", [(BatchLogRecordProcessor, EMPTY_LOG)] |
| 43 | +) |
| 44 | +class TestBatchProcessor: |
| 45 | + def test_telemetry_exported_once_batch_size_reached( |
| 46 | + self, batch_processor_class, telemetry |
| 47 | + ): |
41 | 48 | exporter = Mock() |
42 | | - log_record_processor = BatchProcessor( |
| 49 | + batch_processor = batch_processor_class( |
43 | 50 | exporter=exporter, |
44 | 51 | max_queue_size=15, |
45 | 52 | max_export_batch_size=15, |
46 | 53 | # Will not reach this during the test, this sleep should be interrupted when batch size is reached. |
47 | 54 | schedule_delay_millis=30000, |
48 | | - exporting="Log", |
49 | 55 | export_timeout_millis=500, |
50 | 56 | ) |
51 | 57 | before_export = time.time_ns() |
52 | 58 | for _ in range(15): |
53 | | - log_record_processor.emit(EMPTY_LOG) |
| 59 | + batch_processor.emit(telemetry) |
54 | 60 | # Wait a bit for the worker thread to wake up and call export. |
55 | 61 | time.sleep(0.1) |
56 | 62 | exporter.export.assert_called_once() |
57 | 63 | after_export = time.time_ns() |
58 | 64 | # Shows the worker's 30 second sleep was interrupted within a second. |
59 | | - self.assertLess(after_export - before_export, 1e9) |
| 65 | + assert after_export - before_export < 1e9 |
60 | 66 |
|
61 | 67 | # pylint: disable=no-self-use |
62 | | - def test_logs_exported_once_schedule_delay_reached(self): |
| 68 | + def test_telemetry_exported_once_schedule_delay_reached( |
| 69 | + self, batch_processor_class, telemetry |
| 70 | + ): |
63 | 71 | exporter = Mock() |
64 | | - log_record_processor = BatchProcessor( |
| 72 | + batch_processor = batch_processor_class( |
65 | 73 | exporter=exporter, |
66 | 74 | max_queue_size=15, |
67 | 75 | max_export_batch_size=15, |
68 | 76 | schedule_delay_millis=100, |
69 | | - exporting="Log", |
70 | 77 | export_timeout_millis=500, |
71 | 78 | ) |
72 | | - log_record_processor.emit(EMPTY_LOG) |
| 79 | + batch_processor.emit(telemetry) |
73 | 80 | time.sleep(0.2) |
74 | | - exporter.export.assert_called_once_with([EMPTY_LOG]) |
| 81 | + exporter.export.assert_called_once_with([telemetry]) |
75 | 82 |
|
76 | | - def test_logs_flushed_before_shutdown_and_dropped_after_shutdown(self): |
| 83 | + def test_telemetry_flushed_before_shutdown_and_dropped_after_shutdown( |
| 84 | + self, batch_processor_class, telemetry, caplog |
| 85 | + ): |
77 | 86 | exporter = Mock() |
78 | | - log_record_processor = BatchProcessor( |
| 87 | + batch_processor = batch_processor_class( |
79 | 88 | exporter=exporter, |
80 | 89 | # Neither of these thresholds should be hit before test ends. |
81 | 90 | max_queue_size=15, |
82 | 91 | max_export_batch_size=15, |
83 | 92 | schedule_delay_millis=30000, |
84 | | - exporting="Log", |
85 | 93 | export_timeout_millis=500, |
86 | 94 | ) |
87 | 95 | # This log should be flushed because it was written before shutdown. |
88 | | - log_record_processor.emit(EMPTY_LOG) |
89 | | - log_record_processor.shutdown() |
90 | | - exporter.export.assert_called_once_with([EMPTY_LOG]) |
91 | | - self.assertTrue(exporter._stopped) |
92 | | - |
93 | | - with self.assertLogs(level="INFO") as log: |
94 | | - # This log should not be flushed. |
95 | | - log_record_processor.emit(EMPTY_LOG) |
96 | | - self.assertEqual(len(log.output), 1) |
97 | | - self.assertEqual(len(log.records), 1) |
98 | | - self.assertIn("Shutdown called, ignoring Log.", log.output[0]) |
| 96 | + batch_processor.emit(telemetry) |
| 97 | + batch_processor.shutdown() |
| 98 | + exporter.export.assert_called_once_with([telemetry]) |
| 99 | + assert batch_processor._shutdown is True |
| 100 | + |
| 101 | + # This should not be flushed. |
| 102 | + batch_processor.emit(telemetry) |
| 103 | + assert len(caplog.records) == 1 |
| 104 | + assert "Shutdown called, ignoring" in caplog.text |
99 | 105 | exporter.export.assert_called_once() |
100 | 106 |
|
101 | 107 | # pylint: disable=no-self-use |
102 | | - def test_force_flush_flushes_logs(self): |
| 108 | + def test_force_flush_flushes_telemetry( |
| 109 | + self, batch_processor_class, telemetry |
| 110 | + ): |
103 | 111 | exporter = Mock() |
104 | | - log_record_processor = BatchProcessor( |
| 112 | + batch_processor = batch_processor_class( |
105 | 113 | exporter=exporter, |
106 | 114 | # Neither of these thresholds should be hit before test ends. |
107 | 115 | max_queue_size=15, |
108 | 116 | max_export_batch_size=15, |
109 | 117 | schedule_delay_millis=30000, |
110 | | - exporting="Log", |
111 | 118 | export_timeout_millis=500, |
112 | 119 | ) |
113 | 120 | for _ in range(10): |
114 | | - log_record_processor.emit(EMPTY_LOG) |
115 | | - log_record_processor.force_flush() |
116 | | - exporter.export.assert_called_once_with([EMPTY_LOG for _ in range(10)]) |
| 121 | + batch_processor.emit(telemetry) |
| 122 | + batch_processor.force_flush() |
| 123 | + exporter.export.assert_called_once_with([telemetry for _ in range(10)]) |
117 | 124 |
|
118 | | - def test_with_multiple_threads(self): |
119 | | - exporter = InMemoryLogExporter() |
120 | | - log_record_processor = BatchProcessor( |
| 125 | + def test_with_multiple_threads(self, batch_processor_class, telemetry): |
| 126 | + exporter = Mock() |
| 127 | + batch_processor = batch_processor_class( |
121 | 128 | exporter=exporter, |
122 | 129 | max_queue_size=3000, |
123 | 130 | max_export_batch_size=1000, |
124 | 131 | schedule_delay_millis=30000, |
125 | | - exporting="Log", |
126 | 132 | export_timeout_millis=500, |
127 | 133 | ) |
128 | 134 |
|
129 | | - def bulk_log_and_flush(num_logs): |
130 | | - for _ in range(num_logs): |
131 | | - log_record_processor.emit(EMPTY_LOG) |
132 | | - log_record_processor.force_flush() |
| 135 | + def bulk_emit_and_flush(num_emit): |
| 136 | + for _ in range(num_emit): |
| 137 | + batch_processor.emit(telemetry) |
| 138 | + batch_processor.force_flush() |
133 | 139 |
|
134 | 140 | with ThreadPoolExecutor(max_workers=69) as executor: |
135 | 141 | for idx in range(69): |
136 | | - executor.submit(bulk_log_and_flush, idx + 1) |
| 142 | + executor.submit(bulk_emit_and_flush, idx + 1) |
137 | 143 |
|
138 | 144 | executor.shutdown() |
139 | 145 |
|
140 | | - finished_logs = exporter.get_finished_logs() |
141 | | - self.assertEqual(len(finished_logs), 2415) |
| 146 | + # 69 force flush calls, should result in 69 export calls. |
| 147 | + assert exporter.export.call_count == 69 |
142 | 148 |
|
143 | 149 | @unittest.skipUnless( |
144 | 150 | hasattr(os, "fork"), |
145 | 151 | "needs *nix", |
146 | 152 | ) |
147 | | - def test_batch_log_record_processor_fork(self): |
148 | | - exporter = InMemoryLogExporter() |
149 | | - log_record_processor = BatchProcessor( |
| 153 | + def test_batch_telemetry_record_processor_fork( |
| 154 | + self, batch_processor_class, telemetry |
| 155 | + ): |
| 156 | + exporter = Mock() |
| 157 | + batch_processor = batch_processor_class( |
150 | 158 | exporter, |
151 | | - max_queue_size=100, |
152 | | - max_export_batch_size=64, |
| 159 | + max_queue_size=200, |
| 160 | + max_export_batch_size=10, |
153 | 161 | schedule_delay_millis=30000, |
154 | | - exporting="Log", |
155 | 162 | export_timeout_millis=500, |
156 | 163 | ) |
157 | | - # These logs should be flushed only from the parent process. |
| 164 | + # This telemetry should be flushed only from the parent process. |
158 | 165 | # _at_fork_reinit should be called in the child process, to |
159 | | - # clear these logs in the child process. |
160 | | - for _ in range(10): |
161 | | - log_record_processor.emit(EMPTY_LOG) |
| 166 | + # clear the logs/spans in the child process. |
| 167 | + for _ in range(9): |
| 168 | + batch_processor.emit(telemetry) |
162 | 169 |
|
163 | 170 | multiprocessing.set_start_method("fork") |
164 | 171 |
|
165 | 172 | def child(conn): |
166 | 173 | for _ in range(100): |
167 | | - log_record_processor.emit(EMPTY_LOG) |
168 | | - log_record_processor.force_flush() |
| 174 | + batch_processor.emit(telemetry) |
| 175 | + batch_processor.force_flush() |
169 | 176 |
|
170 | | - logs = exporter.get_finished_logs() |
171 | | - conn.send(len(logs) == 100) |
| 177 | + # Expect force flush to export 10 batches of max export batch size (10) |
| 178 | + conn.send(exporter.export.call_count == 10) |
172 | 179 | conn.close() |
173 | 180 |
|
174 | 181 | parent_conn, child_conn = multiprocessing.Pipe() |
175 | 182 | process = multiprocessing.Process(target=child, args=(child_conn,)) |
176 | 183 | process.start() |
177 | | - self.assertTrue(parent_conn.recv()) |
| 184 | + assert parent_conn.recv() is True |
178 | 185 | process.join() |
179 | | - log_record_processor.force_flush() |
180 | | - self.assertTrue(len(exporter.get_finished_logs()) == 10) |
| 186 | + batch_processor.force_flush() |
| 187 | + # Single export for the telemetry we emitted at the start of the test. |
| 188 | + assert exporter.export.call_count == 1 |
0 commit comments