Skip to content

Commit 690c090

Browse files
committed
add drop specifications
1 parent d677af4 commit 690c090

File tree

1 file changed

+23
-11
lines changed

1 file changed

+23
-11
lines changed

develop-docs/sdk/telemetry/telemetry-buffer/backend-telemetry-buffer.mdx

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ Introduce a `Buffer` layer between the `Client` and the `Transport`. This `Buffe
6464
#### How the Buffer works
6565

6666
- **Smart batching**: Logs are batched into single requests; errors, transactions, and monitors are sent immediately.
67-
- **Pre-send rate limiting**: The scheduler checks rate limits before dispatching, avoiding unnecessary requests while keeping items buffered.
67+
- **Pre-send rate limiting**: The scheduler checks rate limits before serialization to avoid unnecessary processing. When a telemetry is rate-limited the selected batch should
68+
be dropped, to avoid filling up the buffers.
6869
- **Category isolation**: Separate ring buffers for each telemetry type prevent head-of-line blocking.
6970
- **Weighted scheduling**: High-priority telemetry gets sent more frequently via round-robin selection.
7071
- **Transport compatibility**: Works with existing HTTP transport implementations without modification.
@@ -136,11 +137,20 @@ The scheduler runs as a background worker, coordinating the flow of telemetry fr
136137

137138
#### Transport
138139

139-
The transport layer handles HTTP communication with Sentry's ingestion endpoints:
140+
The transport layer handles HTTP communication with Sentry's ingestion endpoints.
141+
142+
<Alert level="info">
143+
144+
The only layer responsible for dropping events is the Buffer. In case that the transport is full, then the Buffer should drop the batch.
145+
146+
</Alert>
140147

141148
### Configuration
142149

143-
#### Buffer Options
150+
#### Transport Options
151+
- **Capacity**: 1000 items.
152+
153+
#### Telemetry Buffer Options
144154
- **Capacity**: 100 items for errors and check-ins, 10*BATCH_SIZE for logs, 1000 for transactions.
145155
- **Overflow policy**: `drop_oldest`.
146156
- **Batch size**: 1 for errors and monitors (immediate send), 100 for logs.
@@ -258,13 +268,15 @@ func (s *Scheduler) processNextBatch() {
258268

259269
// Find ready buffer for this priority
260270
for category, buffer := range s.buffers {
261-
if buffer.Priority() == priority &&
262-
!s.transport.IsRateLimited(category) &&
263-
buffer.IsReadyToFlush() {
264-
items := buffer.PollIfReady()
265-
s.sendItems(category, items)
266-
// only process one batch per tick
267-
break
271+
if buffer.Priority() == priority && buffer.IsReadyToFlush() {
272+
items := buffer.PollIfReady()
273+
if s.transport.IsRateLimited(category) {
274+
// drop the batch and return
275+
return
276+
}
277+
s.sendItems(category, items)
278+
// only process one batch per tick
279+
break
268280
}
269281
}
270282
}
@@ -276,7 +288,7 @@ func (s *Scheduler) processNextBatch() {
276288
```go
277289
func (s *Scheduler) flush() {
278290
// should process all store buffers and send to transport
279-
for category, buffer := range s.buffers {
291+
for category, buffer := range s.buffers {
280292
if !buffer.IsEmpty() {
281293
s.processItems(buffer, category, true)
282294
}

0 commit comments

Comments
 (0)