|
| 1 | +import json |
| 2 | +import sentry_sdk |
| 3 | +from sentry_sdk.consts import SPANDATA |
| 4 | +from sentry_sdk.integrations import DidNotEnable |
| 5 | +from sentry_sdk.scope import should_send_default_pii |
| 6 | +from sentry_sdk.utils import event_from_exception |
| 7 | + |
| 8 | +from typing import TYPE_CHECKING |
| 9 | + |
| 10 | +if TYPE_CHECKING: |
| 11 | + from typing import Any |
| 12 | + from typing import Callable |
| 13 | + from typing import Union |
| 14 | + from agents import Usage |
| 15 | + |
| 16 | +try: |
| 17 | + import agents |
| 18 | + |
| 19 | +except ImportError: |
| 20 | + raise DidNotEnable("OpenAI Agents not installed") |
| 21 | + |
| 22 | + |
| 23 | +def _capture_exception(exc): |
| 24 | + # type: (Any) -> None |
| 25 | + event, hint = event_from_exception( |
| 26 | + exc, |
| 27 | + client_options=sentry_sdk.get_client().options, |
| 28 | + mechanism={"type": "openai_agents", "handled": False}, |
| 29 | + ) |
| 30 | + sentry_sdk.capture_event(event, hint=hint) |
| 31 | + |
| 32 | + |
| 33 | +def _get_start_span_function(): |
| 34 | + # type: () -> Callable[..., Any] |
| 35 | + current_span = sentry_sdk.get_current_span() |
| 36 | + transaction_exists = ( |
| 37 | + current_span is not None and current_span.containing_transaction == current_span |
| 38 | + ) |
| 39 | + return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction |
| 40 | + |
| 41 | + |
| 42 | +def _set_agent_data(span, agent): |
| 43 | + # type: (sentry_sdk.tracing.Span, agents.Agent) -> None |
| 44 | + span.set_data( |
| 45 | + SPANDATA.GEN_AI_SYSTEM, "openai" |
| 46 | + ) # See footnote for https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-system for explanation why. |
| 47 | + |
| 48 | + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent.name) |
| 49 | + |
| 50 | + if agent.model_settings.max_tokens: |
| 51 | + span.set_data( |
| 52 | + SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, agent.model_settings.max_tokens |
| 53 | + ) |
| 54 | + |
| 55 | + if agent.model: |
| 56 | + model_name = agent.model.model if hasattr(agent.model, "model") else agent.model |
| 57 | + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) |
| 58 | + |
| 59 | + if agent.model_settings.presence_penalty: |
| 60 | + span.set_data( |
| 61 | + SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, |
| 62 | + agent.model_settings.presence_penalty, |
| 63 | + ) |
| 64 | + |
| 65 | + if agent.model_settings.temperature: |
| 66 | + span.set_data( |
| 67 | + SPANDATA.GEN_AI_REQUEST_TEMPERATURE, agent.model_settings.temperature |
| 68 | + ) |
| 69 | + |
| 70 | + if agent.model_settings.top_p: |
| 71 | + span.set_data(SPANDATA.GEN_AI_REQUEST_TOP_P, agent.model_settings.top_p) |
| 72 | + |
| 73 | + if agent.model_settings.frequency_penalty: |
| 74 | + span.set_data( |
| 75 | + SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, |
| 76 | + agent.model_settings.frequency_penalty, |
| 77 | + ) |
| 78 | + |
| 79 | + if len(agent.tools) > 0: |
| 80 | + span.set_data( |
| 81 | + SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, |
| 82 | + safe_serialize([vars(tool) for tool in agent.tools]), |
| 83 | + ) |
| 84 | + |
| 85 | + |
| 86 | +def _set_usage_data(span, usage): |
| 87 | + # type: (sentry_sdk.tracing.Span, Usage) -> None |
| 88 | + span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) |
| 89 | + span.set_data( |
| 90 | + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, |
| 91 | + usage.input_tokens_details.cached_tokens, |
| 92 | + ) |
| 93 | + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) |
| 94 | + span.set_data( |
| 95 | + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, |
| 96 | + usage.output_tokens_details.reasoning_tokens, |
| 97 | + ) |
| 98 | + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) |
| 99 | + |
| 100 | + |
| 101 | +def _set_input_data(span, get_response_kwargs): |
| 102 | + # type: (sentry_sdk.tracing.Span, dict[str, Any]) -> None |
| 103 | + if not should_send_default_pii(): |
| 104 | + return |
| 105 | + |
| 106 | + messages_by_role = { |
| 107 | + "system": [], |
| 108 | + "user": [], |
| 109 | + "assistant": [], |
| 110 | + "tool": [], |
| 111 | + } # type: (dict[str, list[Any]]) |
| 112 | + system_instructions = get_response_kwargs.get("system_instructions") |
| 113 | + if system_instructions: |
| 114 | + messages_by_role["system"].append({"type": "text", "text": system_instructions}) |
| 115 | + |
| 116 | + for message in get_response_kwargs.get("input", []): |
| 117 | + if "role" in message: |
| 118 | + messages_by_role[message.get("role")].append( |
| 119 | + {"type": "text", "text": message.get("content")} |
| 120 | + ) |
| 121 | + else: |
| 122 | + if message.get("type") == "function_call": |
| 123 | + messages_by_role["assistant"].append(message) |
| 124 | + elif message.get("type") == "function_call_output": |
| 125 | + messages_by_role["tool"].append(message) |
| 126 | + |
| 127 | + request_messages = [] |
| 128 | + for role, messages in messages_by_role.items(): |
| 129 | + if len(messages) > 0: |
| 130 | + request_messages.append({"role": role, "content": messages}) |
| 131 | + |
| 132 | + span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(request_messages)) |
| 133 | + |
| 134 | + |
| 135 | +def _set_output_data(span, result): |
| 136 | + # type: (sentry_sdk.tracing.Span, Any) -> None |
| 137 | + if not should_send_default_pii(): |
| 138 | + return |
| 139 | + |
| 140 | + output_messages = { |
| 141 | + "response": [], |
| 142 | + "tool": [], |
| 143 | + } # type: (dict[str, list[Any]]) |
| 144 | + |
| 145 | + for output in result.output: |
| 146 | + if output.type == "function_call": |
| 147 | + output_messages["tool"].append(output.dict()) |
| 148 | + elif output.type == "message": |
| 149 | + for output_message in output.content: |
| 150 | + try: |
| 151 | + output_messages["response"].append(output_message.text) |
| 152 | + except AttributeError: |
| 153 | + # Unknown output message type, just return the json |
| 154 | + output_messages["response"].append(output_message.dict()) |
| 155 | + |
| 156 | + if len(output_messages["tool"]) > 0: |
| 157 | + span.set_data( |
| 158 | + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"]) |
| 159 | + ) |
| 160 | + |
| 161 | + if len(output_messages["response"]) > 0: |
| 162 | + span.set_data( |
| 163 | + SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"]) |
| 164 | + ) |
| 165 | + |
| 166 | + |
| 167 | +def safe_serialize(data): |
| 168 | + # type: (Any) -> str |
| 169 | + """Safely serialize to a readable string.""" |
| 170 | + |
| 171 | + def serialize_item(item): |
| 172 | + # type: (Any) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]] |
| 173 | + if callable(item): |
| 174 | + try: |
| 175 | + module = getattr(item, "__module__", None) |
| 176 | + qualname = getattr(item, "__qualname__", None) |
| 177 | + name = getattr(item, "__name__", "anonymous") |
| 178 | + |
| 179 | + if module and qualname: |
| 180 | + full_path = f"{module}.{qualname}" |
| 181 | + elif module and name: |
| 182 | + full_path = f"{module}.{name}" |
| 183 | + else: |
| 184 | + full_path = name |
| 185 | + |
| 186 | + return f"<function {full_path}>" |
| 187 | + except Exception: |
| 188 | + return f"<callable {type(item).__name__}>" |
| 189 | + elif isinstance(item, dict): |
| 190 | + return {k: serialize_item(v) for k, v in item.items()} |
| 191 | + elif isinstance(item, (list, tuple)): |
| 192 | + return [serialize_item(x) for x in item] |
| 193 | + elif hasattr(item, "__dict__"): |
| 194 | + try: |
| 195 | + attrs = { |
| 196 | + k: serialize_item(v) |
| 197 | + for k, v in vars(item).items() |
| 198 | + if not k.startswith("_") |
| 199 | + } |
| 200 | + return f"<{type(item).__name__} {attrs}>" |
| 201 | + except Exception: |
| 202 | + return repr(item) |
| 203 | + else: |
| 204 | + return item |
| 205 | + |
| 206 | + try: |
| 207 | + serialized = serialize_item(data) |
| 208 | + return json.dumps(serialized, default=str) |
| 209 | + except Exception: |
| 210 | + return str(data) |
0 commit comments