|
16 | 16 | ROLE_SYSTEM = "system"
|
17 | 17 | ROLE_USER = "user"
|
18 | 18 | ROLE_ASSISTANT = "assistant"
|
| 19 | +ROLE_TOOL = "tool" |
19 | 20 |
|
20 | 21 | _logger = logging.getLogger(__name__)
|
21 | 22 |
|
@@ -137,6 +138,35 @@ class PatternConfig(TypedDict, total=False):
|
137 | 138 | "role": ROLE_USER,
|
138 | 139 | "source": "prompt",
|
139 | 140 | },
|
| 141 | + # OTel GenAI Semantic Convention used by the latest Strands SDK |
| 142 | + # References: |
| 143 | + # - OTel GenAI SemConv: https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-events/ |
| 144 | + # - Strands SDK PR(introduced in v0.1.9): https://github.com/strands-agents/sdk-python/pull/319 |
| 145 | + "gen_ai.user.message": { |
| 146 | + "type": PatternType.DIRECT, |
| 147 | + "role": ROLE_USER, |
| 148 | + "source": "prompt", |
| 149 | + }, |
| 150 | + "gen_ai.assistant.message": { |
| 151 | + "type": PatternType.DIRECT, |
| 152 | + "role": ROLE_ASSISTANT, |
| 153 | + "source": "output", |
| 154 | + }, |
| 155 | + "gen_ai.system.message": { |
| 156 | + "type": PatternType.DIRECT, |
| 157 | + "role": ROLE_SYSTEM, |
| 158 | + "source": "prompt", |
| 159 | + }, |
| 160 | + "gen_ai.tool.message": { |
| 161 | + "type": PatternType.DIRECT, |
| 162 | + "role": ROLE_TOOL, |
| 163 | + "source": "prompt", |
| 164 | + }, |
| 165 | + "gen_ai.choice": { |
| 166 | + "type": PatternType.DIRECT, |
| 167 | + "role": ROLE_ASSISTANT, |
| 168 | + "source": "output", |
| 169 | + }, |
140 | 170 | }
|
141 | 171 |
|
142 | 172 |
|
@@ -214,6 +244,7 @@ def _collect_all_llo_messages(self, span: ReadableSpan, attributes: types.Attrib
|
214 | 244 | for attr_key, value in attributes.items():
|
215 | 245 | if attr_key in self._exact_match_patterns:
|
216 | 246 | config = self._pattern_configs[attr_key]
|
| 247 | + |
217 | 248 | messages.append(
|
218 | 249 | {"content": value, "role": config.get("role", "unknown"), "source": config.get("source", "unknown")}
|
219 | 250 | )
|
@@ -279,6 +310,12 @@ def _collect_llo_attributes_from_span(self, span: ReadableSpan) -> Dict[str, Any
|
279 | 310 | # Collect from span events
|
280 | 311 | if span.events:
|
281 | 312 | for event in span.events:
|
| 313 | + # Check if event name itself is an LLO pattern (e.g., "gen_ai.user.message") |
| 314 | + if self._is_llo_attribute(event.name): |
| 315 | + # Put all event attributes as the content as LLO in log event |
| 316 | + all_llo_attributes[event.name] = dict(event.attributes) if event.attributes else {} |
| 317 | + |
| 318 | + # Also check traditional pattern - LLO attributes within event attributes |
282 | 319 | if event.attributes:
|
283 | 320 | for key, value in event.attributes.items():
|
284 | 321 | if self._is_llo_attribute(key):
|
@@ -372,6 +409,10 @@ def _filter_span_events(self, span: ReadableSpan) -> None:
|
372 | 409 | updated_events = []
|
373 | 410 |
|
374 | 411 | for event in span.events:
|
| 412 | + # Skip entire event if event name is an LLO pattern |
| 413 | + if self._is_llo_attribute(event.name): |
| 414 | + continue |
| 415 | + |
375 | 416 | if not event.attributes:
|
376 | 417 | updated_events.append(event)
|
377 | 418 | continue
|
@@ -417,7 +458,7 @@ def _group_messages_by_type(self, messages: List[Dict[str, Any]]) -> Dict[str, L
|
417 | 458 | elif role == ROLE_ASSISTANT:
|
418 | 459 | output_messages.append(formatted_message)
|
419 | 460 | else:
|
420 |
| - # Route based on source for non-standard roles |
| 461 | + # Route based on source for non-standard roles including tool |
421 | 462 | if any(key in message.get("source", "") for key in ["completion", "output", "result"]):
|
422 | 463 | output_messages.append(formatted_message)
|
423 | 464 | else:
|
|
0 commit comments