Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 19 additions & 12 deletions aws/logs_monitoring/enhanced_lambda_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,9 @@ def parse_metrics_from_json_report_log(log_message):
metrics = []

for record_key, metric_name in RUNTIME_METRICS_BY_RECORD_KEY.items():
metric_point_value = record_metrics[record_key]
metric_point_value = record_metrics.get(record_key)
if metric_point_value is None:
continue

if metric_name in METRIC_ADJUSTMENT_FACTORS:
metric_point_value *= METRIC_ADJUSTMENT_FACTORS[metric_name]
Expand All @@ -301,9 +303,10 @@ def parse_metrics_from_json_report_log(log_message):
)
)

tags = [
f"{MEMORY_ALLOCATED_FIELD_NAME}:{record_metrics[MEMORY_ALLOCATED_RECORD_KEY]}"
]
tags = []
memory_allocated = record_metrics.get(MEMORY_ALLOCATED_RECORD_KEY)
if memory_allocated is not None:
tags.append(f"{MEMORY_ALLOCATED_FIELD_NAME}:{memory_allocated}")

init_duration = record_metrics.get(INIT_DURATION_RECORD_KEY)
if init_duration:
Expand All @@ -317,15 +320,19 @@ def parse_metrics_from_json_report_log(log_message):
else:
tags.append("cold_start:false")

metrics.append(
DatadogMetricPoint(
f"{ENHANCED_METRICS_NAMESPACE_PREFIX}.{ESTIMATED_COST_METRIC_NAME}",
calculate_estimated_cost(
record_metrics[BILLED_DURATION_RECORD_KEY],
record_metrics[MEMORY_ALLOCATED_RECORD_KEY],
),
# Billed duration only available for On-Demand Lambda functions,
# for Managed Instances, this is no longer available.
billed_duration = record_metrics.get(BILLED_DURATION_RECORD_KEY)
if billed_duration is not None and memory_allocated is not None:
metrics.append(
DatadogMetricPoint(
f"{ENHANCED_METRICS_NAMESPACE_PREFIX}.{ESTIMATED_COST_METRIC_NAME}",
calculate_estimated_cost(
billed_duration,
memory_allocated,
),
)
)
)

if record.get("status") == "timeout":
metrics.append(
Expand Down
40 changes: 40 additions & 0 deletions aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,31 @@ class TestEnhancedLambdaMetrics(unittest.TestCase):
},
}
)
managed_instances_metrics_json_report = json.dumps(
{
"time": "2026-01-08T18:22:35.343Z",
"type": "platform.report",
"record": {
"requestId": "4f423807-598d-47ae-9652-4f7ee31d4d10",
"metrics": {
"durationMs": 2.524,
},
"spans": [
{
"name": "responseLatency",
"start": "2026-01-08T18:22:35.342Z",
"durationMs": 0.642,
},
{
"name": "responseDuration",
"start": "2026-01-08T18:22:35.343Z",
"durationMs": 0.075,
},
],
"status": "success",
},
}
)

def test_parse_lambda_tags_from_arn(self):
verify_as_json(
Expand Down Expand Up @@ -129,6 +154,21 @@ def test_parse_metrics_from_timeout_json_report_log(self):
parsed_metrics = parse_metrics_from_json_report_log(self.timeout_json_report)
verify_as_json(parsed_metrics)

def test_parse_metrics_from_partial_metrics_json_report_log(self):
"""Test that JSON report logs with partial/incomplete metrics don't raise KeyError"""
parsed_metrics = parse_metrics_from_json_report_log(
self.managed_instances_metrics_json_report
)
# Should only return metrics that are present (duration in this case)
# Should not raise KeyError for missing billedDurationMs, maxMemoryUsedMB, memorySizeMB
assert len(parsed_metrics) == 1 # Only duration metric
assert parsed_metrics[0].name == "aws.lambda.enhanced.duration"
# Duration should be converted from ms to seconds (2.524 * 0.001 = 0.002524)
assert parsed_metrics[0].value == 0.002524
# Tags should include cold_start:false but NOT memorysize since it's missing
assert "cold_start:false" in parsed_metrics[0].tags
assert not any(tag.startswith("memorysize:") for tag in parsed_metrics[0].tags)

def test_create_out_of_memory_enhanced_metric(self):
go_out_of_memory_error = "fatal error: runtime: out of memory"
self.assertEqual(
Expand Down
Loading