Skip to content

Commit 299bd9e

Browse files
committed
feat(llmo): change $ai_output_choices to have an array of content
1 parent e71f386 commit 299bd9e

File tree

4 files changed

+112
-114
lines changed

4 files changed

+112
-114
lines changed

posthog/ai/utils.py

Lines changed: 50 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,7 @@ def format_response(response, provider: str):
117117

118118
def format_response_anthropic(response):
119119
output = []
120-
121-
content_text = ""
122-
tool_calls = []
120+
content = []
123121

124122
for choice in response.content:
125123
if (
@@ -128,7 +126,7 @@ def format_response_anthropic(response):
128126
and hasattr(choice, "text")
129127
and choice.text
130128
):
131-
content_text += choice.text
129+
content.append(choice.text)
132130
elif (
133131
hasattr(choice, "type")
134132
and choice.type == "tool_use"
@@ -143,18 +141,13 @@ def format_response_anthropic(response):
143141
"arguments": getattr(choice, "input", {}),
144142
},
145143
}
144+
content.append(tool_call)
146145

147-
tool_calls.append(tool_call)
148-
149-
if content_text or tool_calls:
150-
message: Dict[str, Any] = {
146+
if content:
147+
message = {
151148
"role": "assistant",
152-
"content": content_text if content_text else None,
149+
"content": content,
153150
}
154-
155-
if tool_calls:
156-
message["tool_calls"] = tool_calls
157-
158151
output.append(message)
159152

160153
return output
@@ -164,36 +157,39 @@ def format_response_openai(response):
164157
output = []
165158

166159
if hasattr(response, "choices"):
160+
content = []
161+
role = "assistant"
162+
167163
for choice in response.choices:
168164
# Handle Chat Completions response format
169165
if hasattr(choice, "message") and choice.message:
170-
message = {
171-
"role": choice.message.role,
172-
"content": choice.message.content,
173-
}
166+
if choice.message.role:
167+
role = choice.message.role
168+
169+
if choice.message.content:
170+
content.append(choice.message.content)
174171

175172
if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
176-
tool_calls = []
177173
for tool_call in choice.message.tool_calls:
178-
tool_calls.append(
179-
{
180-
"type": "function",
181-
"id": tool_call.id,
182-
"function": {
183-
"name": tool_call.function.name,
184-
"arguments": tool_call.function.arguments,
185-
},
186-
}
187-
)
188-
message["tool_calls"] = tool_calls
189-
190-
output.append(message)
174+
content.append({
175+
"type": "function",
176+
"id": tool_call.id,
177+
"function": {
178+
"name": tool_call.function.name,
179+
"arguments": tool_call.function.arguments,
180+
},
181+
})
182+
183+
if content:
184+
message = {
185+
"role": role,
186+
"content": content,
187+
}
188+
output.append(message)
191189

192190
# Handle Responses API format
193191
if hasattr(response, "output"):
194-
content_text = ""
195-
tool_calls = []
196-
images = []
192+
content = []
197193
role = "assistant"
198194

199195
for item in response.output:
@@ -207,54 +203,38 @@ def format_response_openai(response):
207203
and content_item.type == "output_text"
208204
and hasattr(content_item, "text")
209205
):
210-
content_text += content_item.text
206+
content.append(content_item.text)
211207
elif hasattr(content_item, "text"):
212-
content_text += content_item.text
208+
content.append(content_item.text)
213209
elif (
214210
hasattr(content_item, "type")
215211
and content_item.type == "input_image"
216212
and hasattr(content_item, "image_url")
217213
):
218-
images.append(
219-
{
220-
"type": "image",
221-
"image": content_item.image_url,
222-
}
223-
)
214+
content.append({
215+
"type": "image",
216+
"image": content_item.image_url,
217+
})
224218
elif hasattr(item, "content"):
225-
content_text += str(item.content)
219+
content.append(str(item.content))
226220

227221
elif hasattr(item, "type") and item.type == "function_call":
228-
tool_call = {
222+
content.append({
229223
"type": "function",
230224
"id": getattr(item, "call_id", getattr(item, "id", "")),
231225
"function": {
232226
"name": item.name,
233227
"arguments": getattr(item, "arguments", {}),
234228
},
235-
}
236-
237-
tool_calls.append(tool_call)
229+
})
238230

239-
if content_text or tool_calls:
231+
if content:
240232
message = {
241233
"role": role,
242-
"content": content_text if content_text else None,
234+
"content": content,
243235
}
244-
245-
if tool_calls:
246-
message["tool_calls"] = tool_calls
247-
248236
output.append(message)
249237

250-
for image in images:
251-
output.append(
252-
{
253-
"content": image,
254-
"role": role,
255-
}
256-
)
257-
258238
return output
259239

260240

@@ -264,49 +244,41 @@ def format_response_gemini(response):
264244
if hasattr(response, "candidates") and response.candidates:
265245
for candidate in response.candidates:
266246
if hasattr(candidate, "content") and candidate.content:
267-
content_text = ""
268-
tool_calls = []
247+
content = []
269248

270249
if hasattr(candidate.content, "parts") and candidate.content.parts:
271250
for part in candidate.content.parts:
272251
if hasattr(part, "text") and part.text:
273-
content_text += part.text
252+
content.append(part.text)
274253
elif hasattr(part, "function_call") and part.function_call:
275254
function_call = part.function_call
276-
277-
tool_call = {
255+
content.append({
278256
"type": "function",
279257
"function": {
280258
"name": function_call.name,
281259
"arguments": function_call.args,
282260
},
283-
}
284-
285-
tool_calls.append(tool_call)
261+
})
286262

287-
if content_text or tool_calls:
288-
message: Dict[str, Any] = {
263+
if content:
264+
message = {
289265
"role": "assistant",
290-
"content": content_text if content_text else None,
266+
"content": content,
291267
}
292-
293-
if tool_calls:
294-
message["tool_calls"] = tool_calls
295-
296268
output.append(message)
297269

298270
elif hasattr(candidate, "text") and candidate.text:
299271
output.append(
300272
{
301273
"role": "assistant",
302-
"content": candidate.text,
274+
"content": [candidate.text],
303275
}
304276
)
305277
elif hasattr(response, "text") and response.text:
306278
output.append(
307279
{
308280
"role": "assistant",
309-
"content": response.text,
281+
"content": [response.text],
310282
}
311283
)
312284

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ def mock_anthropic_response_with_tool_calls():
9696
role="assistant",
9797
content=[
9898
{"type": "text", "text": "I'll help you check the weather."},
99+
{"type": "text", "text": " Let me look that up."},
99100
{
100101
"type": "tool_use",
101102
"id": "toolu_abc123",
@@ -161,7 +162,7 @@ def test_basic_completion(mock_client, mock_anthropic_response):
161162
assert props["$ai_model"] == "claude-3-opus-20240229"
162163
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
163164
assert props["$ai_output_choices"] == [
164-
{"role": "assistant", "content": "Test response"}
165+
{"role": "assistant", "content": ["Test response"]}
165166
]
166167
assert props["$ai_input_tokens"] == 20
167168
assert props["$ai_output_tokens"] == 10
@@ -474,7 +475,7 @@ def test_cached_tokens(mock_client, mock_anthropic_response_with_cached_tokens):
474475
assert props["$ai_model"] == "claude-3-opus-20240229"
475476
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
476477
assert props["$ai_output_choices"] == [
477-
{"role": "assistant", "content": "Test response"}
478+
{"role": "assistant", "content": ["Test response"]}
478479
]
479480
assert props["$ai_input_tokens"] == 20
480481
assert props["$ai_output_tokens"] == 10
@@ -531,7 +532,7 @@ def test_tool_definition(mock_client, mock_anthropic_response):
531532
assert props["$ai_model"] == "claude-3-5-sonnet-20241022"
532533
assert props["$ai_input"] == [{"role": "user", "content": "hey"}]
533534
assert props["$ai_output_choices"] == [
534-
{"role": "assistant", "content": "Test response"}
535+
{"role": "assistant", "content": ["Test response"]}
535536
]
536537
assert props["$ai_input_tokens"] == 20
537538
assert props["$ai_output_tokens"] == 10
@@ -583,8 +584,9 @@ def test_tool_calls_in_output_choices(
583584
assert props["$ai_output_choices"] == [
584585
{
585586
"role": "assistant",
586-
"content": "I'll help you check the weather.",
587-
"tool_calls": [
587+
"content": [
588+
"I'll help you check the weather.",
589+
" Let me look that up.",
588590
{
589591
"type": "function",
590592
"id": "toolu_abc123",
@@ -645,8 +647,7 @@ def test_tool_calls_only_no_content(
645647
assert props["$ai_output_choices"] == [
646648
{
647649
"role": "assistant",
648-
"content": None,
649-
"tool_calls": [
650+
"content": [
650651
{
651652
"type": "function",
652653
"id": "toolu_def456",
@@ -715,8 +716,9 @@ async def run_test():
715716
assert props["$ai_output_choices"] == [
716717
{
717718
"role": "assistant",
718-
"content": "I'll help you check the weather.",
719-
"tool_calls": [
719+
"content": [
720+
"I'll help you check the weather.",
721+
" Let me look that up.",
720722
{
721723
"type": "function",
722724
"id": "toolu_abc123",

posthog/test/ai/gemini/test_gemini.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -71,11 +71,16 @@ def mock_gemini_response_with_function_calls():
7171
mock_function_call.name = "get_current_weather"
7272
mock_function_call.args = {"location": "San Francisco"}
7373

74-
# Mock text part - need to ensure hasattr() works correctly
75-
mock_text_part = MagicMock()
76-
mock_text_part.text = "I'll check the weather for you."
74+
# Mock text part 1
75+
mock_text_part1 = MagicMock()
76+
mock_text_part1.text = "I'll check the weather for you."
7777
# Make hasattr(part, "text") return True
78-
type(mock_text_part).text = mock_text_part.text
78+
type(mock_text_part1).text = mock_text_part1.text
79+
80+
# Mock text part 2
81+
mock_text_part2 = MagicMock()
82+
mock_text_part2.text = " Let me look that up."
83+
type(mock_text_part2).text = mock_text_part2.text
7984

8085
# Mock function call part - need to ensure hasattr() works correctly
8186
mock_function_part = MagicMock()
@@ -85,9 +90,9 @@ def mock_gemini_response_with_function_calls():
8590
# Ensure hasattr(part, "text") returns False for the function part
8691
del mock_function_part.text
8792

88-
# Mock content with both text and function call parts
93+
# Mock content with 2 text parts and 1 function call part
8994
mock_content = MagicMock()
90-
mock_content.parts = [mock_text_part, mock_function_part]
95+
mock_content.parts = [mock_text_part1, mock_text_part2, mock_function_part]
9196

9297
# Mock candidate
9398
mock_candidate = MagicMock()
@@ -444,7 +449,7 @@ def test_tool_use_response(mock_client, mock_google_genai_client, mock_gemini_re
444449
assert props["$ai_model"] == "gemini-2.5-flash"
445450
assert props["$ai_input"] == [{"role": "user", "content": "hey"}]
446451
assert props["$ai_output_choices"] == [
447-
{"role": "assistant", "content": "Test response from Gemini"}
452+
{"role": "assistant", "content": ["Test response from Gemini"]}
448453
]
449454
assert props["$ai_input_tokens"] == 20
450455
assert props["$ai_output_tokens"] == 10
@@ -484,8 +489,9 @@ def test_function_calls_in_output_choices(
484489
assert props["$ai_output_choices"] == [
485490
{
486491
"role": "assistant",
487-
"content": "I'll check the weather for you.",
488-
"tool_calls": [
492+
"content": [
493+
"I'll check the weather for you.",
494+
" Let me look that up.",
489495
{
490496
"type": "function",
491497
"function": {
@@ -532,8 +538,7 @@ def test_function_calls_only_no_content(
532538
assert props["$ai_output_choices"] == [
533539
{
534540
"role": "assistant",
535-
"content": None,
536-
"tool_calls": [
541+
"content": [
537542
{
538543
"type": "function",
539544
"function": {

0 commit comments

Comments
 (0)