-
Notifications
You must be signed in to change notification settings - Fork 16
feat: get tags from top level span from tracer #546
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
b674534
c829b0b
3dff6be
8a6e0f9
f67672b
9006b67
c3412ae
9b0438f
d5ad27a
d218e41
b3d263d
63c85c6
ca0285d
9f5677c
e0f36fd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -50,7 +50,7 @@ pub const DATADOG_INVOCATION_ERROR_KEY: &str = "x-datadog-invocation-error"; | |
|
|
||
| pub struct Processor { | ||
| // Buffer containing context of the previous 5 invocations | ||
| pub context_buffer: ContextBuffer, | ||
| pub context_buffer: Arc<Mutex<ContextBuffer>>, | ||
|
||
| // Helper to infer span information | ||
| inferrer: SpanInferrer, | ||
| // Current invocation span | ||
|
|
@@ -78,6 +78,7 @@ impl Processor { | |
| config: Arc<config::Config>, | ||
| aws_config: &AwsConfig, | ||
| metrics_aggregator: Arc<Mutex<MetricsAggregator>>, | ||
| context_buffer: Arc<Mutex<ContextBuffer>>, | ||
| ) -> Self { | ||
| let service = config.service.clone().unwrap_or(String::from("aws.lambda")); | ||
| let resource = tags_provider | ||
|
|
@@ -87,7 +88,7 @@ impl Processor { | |
| let propagator = DatadogCompositePropagator::new(Arc::clone(&config)); | ||
|
|
||
| Processor { | ||
| context_buffer: ContextBuffer::default(), | ||
| context_buffer, | ||
| inferrer: SpanInferrer::new(config.service_mapping.clone()), | ||
| span: create_empty_span(String::from("aws.lambda"), resource, service), | ||
| cold_start_span: None, | ||
|
|
@@ -107,7 +108,8 @@ impl Processor { | |
| self.reset_state(); | ||
| self.set_init_tags(); | ||
|
|
||
| self.context_buffer.create_context(request_id.clone()); | ||
| let mut context_buffer = self.context_buffer.lock().expect("lock poisoned"); | ||
| context_buffer.create_context(&request_id); | ||
| if self.config.enhanced_metrics { | ||
| // Collect offsets for network and cpu metrics | ||
| let network_offset: Option<NetworkData> = proc::get_network_data().ok(); | ||
|
|
@@ -130,9 +132,9 @@ impl Processor { | |
| tmp_chan_tx, | ||
| process_chan_tx, | ||
| }); | ||
| self.context_buffer | ||
| .add_enhanced_metric_data(&request_id, enhanced_metric_offsets); | ||
| context_buffer.add_enhanced_metric_data(&request_id, enhanced_metric_offsets); | ||
| } | ||
| drop(context_buffer); | ||
|
|
||
| // Increment the invocation metric | ||
| self.enhanced_metrics.increment_invocation_metric(); | ||
|
|
@@ -164,7 +166,8 @@ impl Processor { | |
| let mut cold_start = false; | ||
|
|
||
| // If it's empty, then we are in a cold start | ||
| if self.context_buffer.is_empty() { | ||
| let context_buffer = self.context_buffer.lock().expect("lock poisoned"); | ||
| if context_buffer.is_empty() { | ||
| let now = Instant::now(); | ||
| let time_since_sandbox_init = now.duration_since(self.aws_config.sandbox_init_time); | ||
| if time_since_sandbox_init.as_millis() > PROACTIVE_INITIALIZATION_THRESHOLD_MS.into() { | ||
|
|
@@ -178,6 +181,7 @@ impl Processor { | |
| self.enhanced_metrics.set_runtime_tag(&runtime); | ||
| self.runtime = Some(runtime); | ||
| } | ||
| drop(context_buffer); | ||
|
|
||
| if proactive_initialization { | ||
| self.span.meta.insert( | ||
|
|
@@ -244,12 +248,14 @@ impl Processor { | |
| .as_nanos() | ||
| .try_into() | ||
| .unwrap_or_default(); | ||
| self.context_buffer.add_start_time(&request_id, start_time); | ||
| let mut context_buffer = self.context_buffer.lock().expect("lock poisoned"); | ||
| context_buffer.add_start_time(&request_id, start_time); | ||
| self.span.start = start_time; | ||
| } | ||
|
|
||
| #[allow(clippy::too_many_arguments)] | ||
| #[allow(clippy::cast_possible_truncation)] | ||
| #[allow(clippy::await_holding_lock)] // we are dropping the lock before awaiting | ||
| pub async fn on_platform_runtime_done( | ||
| &mut self, | ||
| request_id: &String, | ||
|
|
@@ -260,8 +266,8 @@ impl Processor { | |
| trace_processor: Arc<dyn trace_processor::TraceProcessor + Send + Sync>, | ||
| trace_agent_tx: Sender<SendData>, | ||
| ) { | ||
| self.context_buffer | ||
| .add_runtime_duration(request_id, duration_ms); | ||
| let mut context_buffer = self.context_buffer.lock().expect("lock poisoned"); | ||
| context_buffer.add_runtime_duration(request_id, duration_ms); | ||
|
|
||
| // Set the runtime duration metric | ||
| self.enhanced_metrics | ||
|
|
@@ -289,7 +295,7 @@ impl Processor { | |
| } | ||
| } | ||
|
|
||
| if let Some(context) = self.context_buffer.get(request_id) { | ||
| if let Some(context) = context_buffer.get(request_id) { | ||
| // `round` is intentionally meant to be a whole integer | ||
| self.span.duration = (context.runtime_duration_ms * MS_TO_NS).round() as i64; | ||
| self.span | ||
|
|
@@ -299,6 +305,10 @@ impl Processor { | |
| // - language | ||
| // - metrics tags (for asm) | ||
|
|
||
| if let Some(tracer_span) = &context.tracer_span { | ||
| self.span.meta.extend(tracer_span.meta.clone()); | ||
| } | ||
|
|
||
| if let Some(offsets) = &context.enhanced_metric_data { | ||
| self.enhanced_metrics.set_cpu_utilization_enhanced_metrics( | ||
| offsets.cpu_offset.clone(), | ||
|
|
@@ -310,6 +320,7 @@ impl Processor { | |
| _ = offsets.process_chan_tx.send(()); | ||
| } | ||
| } | ||
| drop(context_buffer); | ||
|
||
|
|
||
| if let Some(trigger_tags) = self.inferrer.get_trigger_tags() { | ||
| self.span.meta.extend(trigger_tags); | ||
|
|
@@ -355,13 +366,14 @@ impl Processor { | |
| dropped_p0_spans: 0, | ||
| }; | ||
|
|
||
| let send_data = trace_processor.process_traces( | ||
| let send_data: SendData = trace_processor.process_traces( | ||
| config.clone(), | ||
| tags_provider.clone(), | ||
| header_tags, | ||
| vec![traces], | ||
| body_size, | ||
| self.inferrer.span_pointers.clone(), | ||
| self.context_buffer.clone(), | ||
| ); | ||
|
|
||
| if let Err(e) = trace_agent_tx.send(send_data).await { | ||
|
|
@@ -380,7 +392,8 @@ impl Processor { | |
| // Set the report log metrics | ||
| self.enhanced_metrics.set_report_log_metrics(&metrics); | ||
|
|
||
| if let Some(context) = self.context_buffer.get(request_id) { | ||
| let context_buffer = self.context_buffer.lock().expect("lock poisoned"); | ||
| if let Some(context) = context_buffer.get(request_id) { | ||
| if context.runtime_duration_ms != 0.0 { | ||
| let post_runtime_duration_ms = metrics.duration_ms - context.runtime_duration_ms; | ||
|
|
||
|
|
@@ -544,7 +557,7 @@ impl Processor { | |
| tags = DatadogHeaderPropagator::extract_tags(headers); | ||
| } | ||
|
|
||
| // We should always use the generated trace id from the tracer | ||
| // We should always use the generated span id from the tracer | ||
| if let Some(header) = headers.get(DATADOG_SPAN_ID_KEY) { | ||
| self.span.span_id = header.parse::<u64>().unwrap_or(0); | ||
| } | ||
|
|
@@ -654,7 +667,13 @@ mod tests { | |
| Aggregator::new(EMPTY_TAGS, 1024).expect("failed to create aggregator"), | ||
| )); | ||
|
|
||
| Processor::new(tags_provider, config, &aws_config, metrics_aggregator) | ||
| Processor::new( | ||
| tags_provider, | ||
| config, | ||
| &aws_config, | ||
| metrics_aggregator, | ||
| Arc::new(Mutex::new(ContextBuffer::default())), | ||
| ) | ||
| } | ||
|
|
||
| #[test] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do we only need this for tests? if so can we wrap it in a test macro? Defaults can cause subtle bugs
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We do use it for logs processor, where we have default empty at the beginning all the time