From 88eeaee00af657862dcbcd9b70c778f71aeabbb2 Mon Sep 17 00:00:00 2001 From: Jordan Gonzalez <30836115+duncanista@users.noreply.github.com> Date: Thu, 8 Jan 2026 13:48:11 -0500 Subject: [PATCH 1/3] skip logs where keys dont exist --- .../enhanced_lambda_metrics.py | 31 ++++++++++++------- .../tests/test_enhanced_lambda_metrics.py | 30 ++++++++++++++++++ 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/aws/logs_monitoring/enhanced_lambda_metrics.py b/aws/logs_monitoring/enhanced_lambda_metrics.py index 2c4ef975..c4352abb 100644 --- a/aws/logs_monitoring/enhanced_lambda_metrics.py +++ b/aws/logs_monitoring/enhanced_lambda_metrics.py @@ -289,7 +289,9 @@ def parse_metrics_from_json_report_log(log_message): metrics = [] for record_key, metric_name in RUNTIME_METRICS_BY_RECORD_KEY.items(): - metric_point_value = record_metrics[record_key] + metric_point_value = record_metrics.get(record_key) + if metric_point_value is None: + continue if metric_name in METRIC_ADJUSTMENT_FACTORS: metric_point_value *= METRIC_ADJUSTMENT_FACTORS[metric_name] @@ -301,9 +303,10 @@ def parse_metrics_from_json_report_log(log_message): ) ) - tags = [ - f"{MEMORY_ALLOCATED_FIELD_NAME}:{record_metrics[MEMORY_ALLOCATED_RECORD_KEY]}" - ] + tags = [] + memory_allocated = record_metrics.get(MEMORY_ALLOCATED_RECORD_KEY) + if memory_allocated is not None: + tags.append(f"{MEMORY_ALLOCATED_FIELD_NAME}:{memory_allocated}") init_duration = record_metrics.get(INIT_DURATION_RECORD_KEY) if init_duration: @@ -317,15 +320,19 @@ def parse_metrics_from_json_report_log(log_message): else: tags.append("cold_start:false") - metrics.append( - DatadogMetricPoint( - f"{ENHANCED_METRICS_NAMESPACE_PREFIX}.{ESTIMATED_COST_METRIC_NAME}", - calculate_estimated_cost( - record_metrics[BILLED_DURATION_RECORD_KEY], - record_metrics[MEMORY_ALLOCATED_RECORD_KEY], - ), + # Billed duration only available for On-Demand Lambda functions, for Managed Instances, + # billed duration is no longer available. + billed_duration = record_metrics.get(BILLED_DURATION_RECORD_KEY) + if billed_duration is not None and memory_allocated is not None: + metrics.append( + DatadogMetricPoint( + f"{ENHANCED_METRICS_NAMESPACE_PREFIX}.{ESTIMATED_COST_METRIC_NAME}", + calculate_estimated_cost( + billed_duration, + memory_allocated, + ), + ) ) - ) if record.get("status") == "timeout": metrics.append( diff --git a/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py b/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py index b85d179c..794ee52f 100644 --- a/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py +++ b/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py @@ -86,6 +86,23 @@ class TestEnhancedLambdaMetrics(unittest.TestCase): }, } ) + managed_instances_metrics_json_report = json.dumps( + { + "time": "2026-01-08T18:22:35.343Z", + "type": "platform.report", + "record": { + "requestId": "4f423807-598d-47ae-9652-4f7ee31d4d10", + "metrics": { + "durationMs": 2.524, + }, + "spans": [ + {"name": "responseLatency", "start": "2026-01-08T18:22:35.342Z", "durationMs": 0.642}, + {"name": "responseDuration", "start": "2026-01-08T18:22:35.343Z", "durationMs": 0.075}, + ], + "status": "success", + }, + } + ) def test_parse_lambda_tags_from_arn(self): verify_as_json( @@ -129,6 +146,19 @@ def test_parse_metrics_from_timeout_json_report_log(self): parsed_metrics = parse_metrics_from_json_report_log(self.timeout_json_report) verify_as_json(parsed_metrics) + def test_parse_metrics_from_partial_metrics_json_report_log(self): + """Test that JSON report logs with partial/incomplete metrics don't raise KeyError""" + parsed_metrics = parse_metrics_from_json_report_log(self.managed_instances_metrics_json_report) + # Should only return metrics that are present (duration in this case) + # Should not raise KeyError for missing billedDurationMs, maxMemoryUsedMB, memorySizeMB + assert len(parsed_metrics) == 1 # Only duration metric + assert parsed_metrics[0].name == "aws.lambda.enhanced.duration" + # Duration should be converted from ms to seconds (2.524 * 0.001 = 0.002524) + assert parsed_metrics[0].value == 0.002524 + # Tags should include cold_start:false but NOT memorysize since it's missing + assert "cold_start:false" in parsed_metrics[0].tags + assert not any(tag.startswith("memorysize:") for tag in parsed_metrics[0].tags) + def test_create_out_of_memory_enhanced_metric(self): go_out_of_memory_error = "fatal error: runtime: out of memory" self.assertEqual( From ca28b7d15df7f76e1ce4e7ced476d0acd0162cfc Mon Sep 17 00:00:00 2001 From: Jordan Gonzalez <30836115+duncanista@users.noreply.github.com> Date: Thu, 8 Jan 2026 13:51:38 -0500 Subject: [PATCH 2/3] update comment --- aws/logs_monitoring/enhanced_lambda_metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/logs_monitoring/enhanced_lambda_metrics.py b/aws/logs_monitoring/enhanced_lambda_metrics.py index c4352abb..0fe2a63e 100644 --- a/aws/logs_monitoring/enhanced_lambda_metrics.py +++ b/aws/logs_monitoring/enhanced_lambda_metrics.py @@ -320,8 +320,8 @@ def parse_metrics_from_json_report_log(log_message): else: tags.append("cold_start:false") - # Billed duration only available for On-Demand Lambda functions, for Managed Instances, - # billed duration is no longer available. + # Billed duration only available for On-Demand Lambda functions, + # for Managed Instances, this is no longer available. billed_duration = record_metrics.get(BILLED_DURATION_RECORD_KEY) if billed_duration is not None and memory_allocated is not None: metrics.append( From c54278ee45c6dce999b53b5ad3fdcee10dc4097c Mon Sep 17 00:00:00 2001 From: Jordan Gonzalez <30836115+duncanista@users.noreply.github.com> Date: Thu, 8 Jan 2026 14:08:48 -0500 Subject: [PATCH 3/3] lint --- aws/logs_monitoring/enhanced_lambda_metrics.py | 2 +- .../tests/test_enhanced_lambda_metrics.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/aws/logs_monitoring/enhanced_lambda_metrics.py b/aws/logs_monitoring/enhanced_lambda_metrics.py index 0fe2a63e..ab88b0cd 100644 --- a/aws/logs_monitoring/enhanced_lambda_metrics.py +++ b/aws/logs_monitoring/enhanced_lambda_metrics.py @@ -320,7 +320,7 @@ def parse_metrics_from_json_report_log(log_message): else: tags.append("cold_start:false") - # Billed duration only available for On-Demand Lambda functions, + # Billed duration only available for On-Demand Lambda functions, # for Managed Instances, this is no longer available. billed_duration = record_metrics.get(BILLED_DURATION_RECORD_KEY) if billed_duration is not None and memory_allocated is not None: diff --git a/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py b/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py index 794ee52f..b4a71bca 100644 --- a/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py +++ b/aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py @@ -96,8 +96,16 @@ class TestEnhancedLambdaMetrics(unittest.TestCase): "durationMs": 2.524, }, "spans": [ - {"name": "responseLatency", "start": "2026-01-08T18:22:35.342Z", "durationMs": 0.642}, - {"name": "responseDuration", "start": "2026-01-08T18:22:35.343Z", "durationMs": 0.075}, + { + "name": "responseLatency", + "start": "2026-01-08T18:22:35.342Z", + "durationMs": 0.642, + }, + { + "name": "responseDuration", + "start": "2026-01-08T18:22:35.343Z", + "durationMs": 0.075, + }, ], "status": "success", }, @@ -148,7 +156,9 @@ def test_parse_metrics_from_timeout_json_report_log(self): def test_parse_metrics_from_partial_metrics_json_report_log(self): """Test that JSON report logs with partial/incomplete metrics don't raise KeyError""" - parsed_metrics = parse_metrics_from_json_report_log(self.managed_instances_metrics_json_report) + parsed_metrics = parse_metrics_from_json_report_log( + self.managed_instances_metrics_json_report + ) # Should only return metrics that are present (duration in this case) # Should not raise KeyError for missing billedDurationMs, maxMemoryUsedMB, memorySizeMB assert len(parsed_metrics) == 1 # Only duration metric