-
Notifications
You must be signed in to change notification settings - Fork 602
Fix OOM in span_processor_with_async_runtime::BatchSpanProcessor
#2793
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,6 +15,7 @@ | |
| }; | ||
| use opentelemetry::Context; | ||
| use opentelemetry::{otel_debug, otel_error, otel_warn}; | ||
| use std::collections::VecDeque; | ||
| use std::fmt; | ||
| use std::sync::atomic::{AtomicUsize, Ordering}; | ||
| use std::sync::Arc; | ||
|
|
@@ -185,7 +186,7 @@ | |
| } | ||
|
|
||
| struct BatchSpanProcessorInternal<E, R> { | ||
| spans: Vec<SpanData>, | ||
| spans: VecDeque<SpanData>, | ||
| export_tasks: FuturesUnordered<BoxFuture<'static, OTelSdkResult>>, | ||
| runtime: R, | ||
| exporter: E, | ||
|
|
@@ -194,7 +195,7 @@ | |
|
|
||
| impl<E: SpanExporter, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> { | ||
| async fn flush(&mut self, res_channel: Option<oneshot::Sender<OTelSdkResult>>) { | ||
| let export_result = self.export().await; | ||
| let export_result = self.export().await; // TODO: Move execution to `export_tasks`. | ||
| let task = Box::pin(async move { | ||
| if let Some(channel) = res_channel { | ||
| // If a response channel is provided, attempt to send the export result through it. | ||
|
|
@@ -233,17 +234,24 @@ | |
| match message { | ||
| // Span has finished, add to buffer of pending spans. | ||
| BatchMessage::ExportSpan(span) => { | ||
| self.spans.push(span); | ||
| if self.spans.len() == self.config.max_export_batch_size { | ||
| // Replace the oldest span with the new span to avoid suspending messages | ||
| // processing. | ||
| self.spans.pop_front(); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This seems to be silently dropping the old span - Should we log a warning here?
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @lalitb We should. Also, we probably need to bound on |
||
| } | ||
| self.spans.push_back(span); | ||
|
|
||
| if self.spans.len() == self.config.max_export_batch_size { | ||
| // If concurrent exports are saturated, wait for one to complete. | ||
| if !self.export_tasks.is_empty() | ||
| && self.export_tasks.len() == self.config.max_concurrent_exports | ||
| { | ||
| // TODO: Refactor to avoid stopping message processing to not delay | ||
| // shutdown/resource set because of export saturation. | ||
50U10FCA7 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| self.export_tasks.next().await; | ||
| } | ||
|
|
||
| let export_result = self.export().await; | ||
| let export_result = self.export().await; // TODO: Move execution to `export_tasks`. | ||
| let task = async move { | ||
| if let Err(err) = export_result { | ||
| otel_error!( | ||
|
|
@@ -306,7 +314,7 @@ | |
| return Ok(()); | ||
| } | ||
|
|
||
| let export = self.exporter.export(self.spans.split_off(0)); | ||
| let export = self.exporter.export(self.spans.drain(..).collect()); | ||
| let timeout = self.runtime.delay(self.config.max_export_timeout); | ||
| let time_out = self.config.max_export_timeout; | ||
|
|
||
|
|
@@ -364,7 +372,7 @@ | |
|
|
||
| let messages = Box::pin(stream::select(message_receiver, ticker)); | ||
| let processor = BatchSpanProcessorInternal { | ||
| spans: Vec::new(), | ||
| spans: VecDeque::new(), | ||
| export_tasks: FuturesUnordered::new(), | ||
| runtime: timeout_runtime, | ||
| config, | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.