|
1 | 1 | import sentry_sdk |
| 2 | +from sentry_sdk.ai.utils import set_data_normalized |
2 | 3 | from sentry_sdk.consts import OP, SPANDATA |
| 4 | +from sentry_sdk.utils import safe_serialize |
3 | 5 |
|
4 | 6 | from ..consts import SPAN_ORIGIN |
5 | 7 | from ..utils import ( |
6 | | - _get_model_name, |
7 | 8 | _set_agent_data, |
8 | 9 | _set_model_data, |
9 | | - _set_usage_data, |
10 | | - _set_input_messages, |
11 | | - _set_output_data, |
| 10 | + _should_send_prompts, |
| 11 | + _get_model_name, |
12 | 12 | ) |
13 | 13 |
|
14 | 14 | from typing import TYPE_CHECKING |
|
17 | 17 | from typing import Any |
18 | 18 |
|
19 | 19 |
|
| 20 | +def _set_usage_data(span, usage): |
| 21 | + # type: (sentry_sdk.tracing.Span, RequestUsage) -> None |
| 22 | + """Set token usage data on a span.""" |
| 23 | + if usage is None: |
| 24 | + return |
| 25 | + |
| 26 | + if hasattr(usage, "input_tokens") and usage.input_tokens is not None: |
| 27 | + span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) |
| 28 | + |
| 29 | + if hasattr(usage, "output_tokens") and usage.output_tokens is not None: |
| 30 | + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) |
| 31 | + |
| 32 | + if hasattr(usage, "total_tokens") and usage.total_tokens is not None: |
| 33 | + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) |
| 34 | + |
| 35 | + |
| 36 | +def _set_input_messages(span, messages): |
| 37 | + # type: (sentry_sdk.tracing.Span, Any) -> None |
| 38 | + """Set input messages data on a span.""" |
| 39 | + if not _should_send_prompts(): |
| 40 | + return |
| 41 | + |
| 42 | + if not messages: |
| 43 | + return |
| 44 | + |
| 45 | + try: |
| 46 | + formatted_messages = [] |
| 47 | + system_prompt = None |
| 48 | + |
| 49 | + # Extract system prompt from any ModelRequest with instructions |
| 50 | + for msg in messages: |
| 51 | + if hasattr(msg, "instructions") and msg.instructions: |
| 52 | + system_prompt = msg.instructions |
| 53 | + break |
| 54 | + |
| 55 | + # Add system prompt as first message if present |
| 56 | + if system_prompt: |
| 57 | + formatted_messages.append( |
| 58 | + {"role": "system", "content": [{"type": "text", "text": system_prompt}]} |
| 59 | + ) |
| 60 | + |
| 61 | + for msg in messages: |
| 62 | + if hasattr(msg, "parts"): |
| 63 | + for part in msg.parts: |
| 64 | + role = "user" |
| 65 | + if hasattr(part, "__class__"): |
| 66 | + if "System" in part.__class__.__name__: |
| 67 | + role = "system" |
| 68 | + elif ( |
| 69 | + "Assistant" in part.__class__.__name__ |
| 70 | + or "Text" in part.__class__.__name__ |
| 71 | + or "ToolCall" in part.__class__.__name__ |
| 72 | + ): |
| 73 | + role = "assistant" |
| 74 | + elif "ToolReturn" in part.__class__.__name__: |
| 75 | + role = "tool" |
| 76 | + |
| 77 | + content = [] # type: List[Dict[str, Any] | str] |
| 78 | + tool_calls = None |
| 79 | + tool_call_id = None |
| 80 | + |
| 81 | + # Handle ToolCallPart (assistant requesting tool use) |
| 82 | + if "ToolCall" in part.__class__.__name__: |
| 83 | + tool_call_data = {} |
| 84 | + if hasattr(part, "tool_name"): |
| 85 | + tool_call_data["name"] = part.tool_name |
| 86 | + if hasattr(part, "args"): |
| 87 | + tool_call_data["arguments"] = safe_serialize(part.args) |
| 88 | + if tool_call_data: |
| 89 | + tool_calls = [tool_call_data] |
| 90 | + # Handle ToolReturnPart (tool result) |
| 91 | + elif "ToolReturn" in part.__class__.__name__: |
| 92 | + if hasattr(part, "tool_name"): |
| 93 | + tool_call_id = part.tool_name |
| 94 | + if hasattr(part, "content"): |
| 95 | + content.append({"type": "text", "text": str(part.content)}) |
| 96 | + # Handle regular content |
| 97 | + elif hasattr(part, "content"): |
| 98 | + if isinstance(part.content, str): |
| 99 | + content.append({"type": "text", "text": part.content}) |
| 100 | + elif isinstance(part.content, list): |
| 101 | + for item in part.content: |
| 102 | + if isinstance(item, str): |
| 103 | + content.append({"type": "text", "text": item}) |
| 104 | + else: |
| 105 | + content.append(safe_serialize(item)) |
| 106 | + else: |
| 107 | + content.append({"type": "text", "text": str(part.content)}) |
| 108 | + |
| 109 | + # Add message if we have content or tool calls |
| 110 | + if content or tool_calls: |
| 111 | + message = {"role": role} # type: Dict[str, Any] |
| 112 | + if content: |
| 113 | + message["content"] = content |
| 114 | + if tool_calls: |
| 115 | + message["tool_calls"] = tool_calls |
| 116 | + if tool_call_id: |
| 117 | + message["tool_call_id"] = tool_call_id |
| 118 | + formatted_messages.append(message) |
| 119 | + |
| 120 | + if formatted_messages: |
| 121 | + set_data_normalized( |
| 122 | + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, formatted_messages, unpack=False |
| 123 | + ) |
| 124 | + except Exception: |
| 125 | + # If we fail to format messages, just skip it |
| 126 | + pass |
| 127 | + |
| 128 | + |
| 129 | +def _set_output_data(span, response): |
| 130 | + # type: (sentry_sdk.tracing.Span, Any) -> None |
| 131 | + """Set output data on a span.""" |
| 132 | + if not _should_send_prompts(): |
| 133 | + return |
| 134 | + |
| 135 | + if not response: |
| 136 | + return |
| 137 | + |
| 138 | + set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_name) |
| 139 | + try: |
| 140 | + # Extract text from ModelResponse |
| 141 | + if hasattr(response, "parts"): |
| 142 | + texts = [] |
| 143 | + tool_calls = [] |
| 144 | + |
| 145 | + for part in response.parts: |
| 146 | + if hasattr(part, "__class__"): |
| 147 | + if "Text" in part.__class__.__name__ and hasattr(part, "content"): |
| 148 | + texts.append(part.content) |
| 149 | + elif "ToolCall" in part.__class__.__name__: |
| 150 | + tool_call_data = { |
| 151 | + "type": "function", |
| 152 | + } |
| 153 | + if hasattr(part, "tool_name"): |
| 154 | + tool_call_data["name"] = part.tool_name |
| 155 | + if hasattr(part, "args"): |
| 156 | + tool_call_data["arguments"] = safe_serialize(part.args) |
| 157 | + tool_calls.append(tool_call_data) |
| 158 | + |
| 159 | + if texts: |
| 160 | + set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, texts) |
| 161 | + |
| 162 | + if tool_calls: |
| 163 | + span.set_data( |
| 164 | + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls) |
| 165 | + ) |
| 166 | + |
| 167 | + except Exception: |
| 168 | + # If we fail to format output, just skip it |
| 169 | + pass |
| 170 | + |
| 171 | + |
20 | 172 | def ai_client_span(messages, agent, model, model_settings): |
21 | 173 | # type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span |
22 | 174 | """Create a span for an AI client call (model request). |
@@ -72,11 +224,11 @@ def ai_client_span(messages, agent, model, model_settings): |
72 | 224 | # Add description from function_schema if available |
73 | 225 | if hasattr(tool, "function_schema"): |
74 | 226 | schema = tool.function_schema |
75 | | - if hasattr(schema, "description") and schema.description: |
| 227 | + if getattr(schema, "description", None): |
76 | 228 | tool_info["description"] = schema.description |
77 | 229 |
|
78 | 230 | # Add parameters from json_schema |
79 | | - if hasattr(schema, "json_schema") and schema.json_schema: |
| 231 | + if getattr(schema, "json_schema", None): |
80 | 232 | tool_info["parameters"] = schema.json_schema |
81 | 233 |
|
82 | 234 | tools.append(tool_info) |
|
0 commit comments