@@ -8,6 +8,7 @@ use cubestore::util::{metrics, spawn_malloc_trim_loop};
88use cubestore:: { app_metrics, CubeError } ;
99use datafusion:: cube_ext;
1010use log:: debug;
11+ use opentelemetry_sdk:: trace:: TracerProvider ;
1112use serde_json:: Value ;
1213use std:: collections:: HashMap ;
1314use std:: time:: Duration ;
@@ -80,8 +81,13 @@ fn main() {
8081 }
8182 let runtime = tokio_builder. build ( ) . unwrap ( ) ;
8283 runtime. block_on ( async move {
84+ // Holding trace_provider to automatically flush spans during drop.
85+ // opentelemetry::global::shutdown_tracer_provider() doesn't work correctly in v0.26
86+ // @see https://github.com/open-telemetry/opentelemetry-rust/issues/1961
87+ let mut tracer_provider: Option < TracerProvider > = None ;
88+
8389 if enable_telemetry {
84- init_tracing_telemetry ( version) ;
90+ tracer_provider = Some ( init_tracing_telemetry ( version) ) ;
8591 }
8692 // TODO: Should this be avoided if otel is configured?
8793 init_agent_sender ( ) . await ;
@@ -101,10 +107,8 @@ fn main() {
101107 stop_on_ctrl_c ( & services) . await ;
102108 services. wait_processing_loops ( ) . await . unwrap ( ) ;
103109
104- if enable_telemetry {
105- // This still doesn't prevent errors:
106- // OpenTelemetry trace error occurred. cannot send message to batch processor as the channel is closed
107- opentelemetry:: global:: shutdown_tracer_provider ( ) ;
110+ if let Some ( provider) = tracer_provider {
111+ let _ = provider. shutdown ( ) ;
108112 }
109113 } ) ;
110114}
0 commit comments