|
1 | 1 | from functools import wraps |
2 | | -import json |
3 | 2 | from typing import TYPE_CHECKING |
4 | 3 |
|
5 | 4 | import sentry_sdk |
@@ -117,8 +116,32 @@ def _set_input_data(span, kwargs, integration): |
117 | 116 | and should_send_default_pii() |
118 | 117 | and integration.include_prompts |
119 | 118 | ): |
| 119 | + normalized_messages = [] |
| 120 | + for message in messages: |
| 121 | + if ( |
| 122 | + message.get("role") == "user" |
| 123 | + and "content" in message |
| 124 | + and isinstance(message["content"], (list, tuple)) |
| 125 | + ): |
| 126 | + for item in message["content"]: |
| 127 | + import ipdb |
| 128 | + |
| 129 | + ipdb.set_trace() |
| 130 | + if item["type"] == "tool_result": |
| 131 | + normalized_messages.append( |
| 132 | + { |
| 133 | + "role": "tool", |
| 134 | + "content": { |
| 135 | + "tool_use_id": item["tool_use_id"], |
| 136 | + "output": item["content"], |
| 137 | + }, |
| 138 | + } |
| 139 | + ) |
| 140 | + else: |
| 141 | + normalized_messages.append(message) |
| 142 | + |
120 | 143 | set_data_normalized( |
121 | | - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(messages) |
| 144 | + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False |
122 | 145 | ) |
123 | 146 |
|
124 | 147 | set_data_normalized( |
@@ -159,21 +182,36 @@ def _set_output_data( |
159 | 182 | Set output data for the span based on the AI response.""" |
160 | 183 | span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model) |
161 | 184 | if should_send_default_pii() and integration.include_prompts: |
162 | | - set_data_normalized( |
163 | | - span, |
164 | | - SPANDATA.GEN_AI_RESPONSE_TEXT, |
165 | | - json.dumps(content_blocks), |
166 | | - unpack=False, |
167 | | - ) |
| 185 | + output_messages = { |
| 186 | + "response": [], |
| 187 | + "tool": [], |
| 188 | + } # type: (dict[str, list[Any]]) |
| 189 | + |
| 190 | + for output in content_blocks: |
| 191 | + if output["type"] == "text": |
| 192 | + output_messages["response"].append(output["text"]) |
| 193 | + elif output["type"] == "tool_use": |
| 194 | + output_messages["tool"].append(output) |
| 195 | + |
| 196 | + if len(output_messages["tool"]) > 0: |
| 197 | + set_data_normalized( |
| 198 | + span, |
| 199 | + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, |
| 200 | + output_messages["tool"], |
| 201 | + unpack=False, |
| 202 | + ) |
| 203 | + |
| 204 | + if len(output_messages["response"]) > 0: |
| 205 | + set_data_normalized( |
| 206 | + span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] |
| 207 | + ) |
168 | 208 |
|
169 | 209 | record_token_usage( |
170 | 210 | span, |
171 | 211 | input_tokens=input_tokens, |
172 | 212 | output_tokens=output_tokens, |
173 | 213 | ) |
174 | 214 |
|
175 | | - # TODO: GEN_AI_RESPONSE_TOOL_CALLS ? |
176 | | - |
177 | 215 | if finish_span: |
178 | 216 | span.__exit__(None, None, None) |
179 | 217 |
|
|
0 commit comments