Skip to content

Commit ea20afc

Browse files
cijothomasutpilla
andauthored
feat: Leverage Suppression Context in Sdk (open-telemetry#2868)
Co-authored-by: Utkarsh Umesan Pillai <[email protected]>
1 parent e52efee commit ea20afc

File tree

2 files changed

+14
-76
lines changed

2 files changed

+14
-76
lines changed

examples/basic.rs

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,19 @@ fn main() {
1616
.with_simple_exporter(exporter)
1717
.build();
1818

19-
// For the OpenTelemetry layer, add a tracing filter to filter events from
20-
// OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates
21-
// like reqwest/tonic etc.) from being sent back to OTel itself, thus
22-
// preventing infinite telemetry generation. The filter levels are set as
23-
// follows:
19+
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
20+
// logging is properly suppressed. However, logs emitted by external components
21+
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
22+
// OpenTelemetry context. Until this issue is addressed
23+
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
24+
// filtering like this is the best way to suppress such logs.
25+
//
26+
// The filter levels are set as follows:
2427
// - Allow `info` level and above by default.
25-
// - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely.
26-
// Note: This will also drop events from crates like `tonic` etc. even when
27-
// they are used outside the OTLP Exporter. For more details, see:
28-
// https://github.com/open-telemetry/opentelemetry-rust/issues/761
28+
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
29+
//
30+
// Note: This filtering will also drop logs from these components even when
31+
// they are used outside of the OTLP Exporter.
2932
let filter_otel = EnvFilter::new("info")
3033
.add_directive("hyper=off".parse().unwrap())
3134
.add_directive("opentelemetry=off".parse().unwrap())

src/layer.rs

Lines changed: 2 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -289,13 +289,11 @@ mod tests {
289289
use opentelemetry::{logs::AnyValue, Key};
290290
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
291291
use opentelemetry_sdk::logs::{InMemoryLogExporter, LogProcessor};
292-
use opentelemetry_sdk::logs::{LogBatch, LogExporter};
293292
use opentelemetry_sdk::logs::{SdkLogRecord, SdkLoggerProvider};
294293
use opentelemetry_sdk::trace::{Sampler, SdkTracerProvider};
295-
use tracing::{error, warn};
294+
use tracing::error;
296295
use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt;
297-
use tracing_subscriber::util::SubscriberInitExt;
298-
use tracing_subscriber::{EnvFilter, Layer};
296+
use tracing_subscriber::Layer;
299297

300298
pub fn attributes_contains(log_record: &SdkLogRecord, key: &Key, value: &AnyValue) -> bool {
301299
log_record
@@ -313,69 +311,6 @@ mod tests {
313311
}
314312

315313
// cargo test --features=testing
316-
317-
#[derive(Clone, Debug, Default)]
318-
struct ReentrantLogExporter;
319-
320-
impl LogExporter for ReentrantLogExporter {
321-
async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult {
322-
// This will cause a deadlock as the export itself creates a log
323-
// while still within the lock of the SimpleLogProcessor.
324-
warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "[email protected]");
325-
Ok(())
326-
}
327-
}
328-
329-
#[test]
330-
#[ignore = "See issue: https://github.com/open-telemetry/opentelemetry-rust/issues/1745"]
331-
fn simple_processor_deadlock() {
332-
let exporter: ReentrantLogExporter = ReentrantLogExporter;
333-
let logger_provider = SdkLoggerProvider::builder()
334-
.with_simple_exporter(exporter.clone())
335-
.build();
336-
337-
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
338-
339-
// Setting subscriber as global as that is the only way to test this scenario.
340-
tracing_subscriber::registry().with(layer).init();
341-
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
342-
}
343-
344-
#[test]
345-
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
346-
fn simple_processor_no_deadlock() {
347-
let exporter: ReentrantLogExporter = ReentrantLogExporter;
348-
let logger_provider = SdkLoggerProvider::builder()
349-
.with_simple_exporter(exporter.clone())
350-
.build();
351-
352-
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
353-
354-
// This filter will prevent the deadlock as the reentrant log will be
355-
// ignored.
356-
let filter = EnvFilter::new("debug").add_directive("reentrant=error".parse().unwrap());
357-
// Setting subscriber as global as that is the only way to test this scenario.
358-
tracing_subscriber::registry()
359-
.with(filter)
360-
.with(layer)
361-
.init();
362-
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
363-
}
364-
365-
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
366-
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
367-
async fn batch_processor_no_deadlock() {
368-
let exporter: ReentrantLogExporter = ReentrantLogExporter;
369-
let logger_provider = SdkLoggerProvider::builder()
370-
.with_batch_exporter(exporter.clone())
371-
.build();
372-
373-
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
374-
375-
tracing_subscriber::registry().with(layer).init();
376-
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
377-
}
378-
379314
#[test]
380315
fn tracing_appender_standalone() {
381316
// Arrange

0 commit comments

Comments
 (0)