Skip to content

Commit 3eedd07

Browse files
committed
chore(llmo): run formatter
1 parent b567aae commit 3eedd07

File tree

5 files changed

+114
-103
lines changed

5 files changed

+114
-103
lines changed

posthog/ai/langchain/callbacks.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,8 @@ def _capture_generation(
584584
]
585585
else:
586586
completions = [
587-
_extract_raw_response(generation) for generation in generation_result
587+
_extract_raw_response(generation)
588+
for generation in generation_result
588589
]
589590
event_properties["$ai_output_choices"] = with_privacy_mode(
590591
self._ph_client, self._privacy_mode, completions

posthog/ai/utils.py

Lines changed: 35 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -117,10 +117,10 @@ def format_response(response, provider: str):
117117

118118
def format_response_anthropic(response):
119119
output = []
120-
120+
121121
content_text = ""
122122
tool_calls = []
123-
123+
124124
for choice in response.content:
125125
if (
126126
hasattr(choice, "type")
@@ -145,7 +145,7 @@ def format_response_anthropic(response):
145145
}
146146

147147
tool_calls.append(tool_call)
148-
148+
149149
if content_text or tool_calls:
150150
message = {
151151
"role": "assistant",
@@ -156,7 +156,7 @@ def format_response_anthropic(response):
156156
message["tool_calls"] = tool_calls
157157

158158
output.append(message)
159-
159+
160160
return output
161161

162162

@@ -171,20 +171,22 @@ def format_response_openai(response):
171171
"role": choice.message.role,
172172
"content": choice.message.content,
173173
}
174-
174+
175175
if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
176176
tool_calls = []
177177
for tool_call in choice.message.tool_calls:
178-
tool_calls.append({
179-
"type": "function",
180-
"id": tool_call.id,
181-
"function": {
182-
"name": tool_call.function.name,
183-
"arguments": tool_call.function.arguments,
184-
},
185-
})
178+
tool_calls.append(
179+
{
180+
"type": "function",
181+
"id": tool_call.id,
182+
"function": {
183+
"name": tool_call.function.name,
184+
"arguments": tool_call.function.arguments,
185+
},
186+
}
187+
)
186188
message["tool_calls"] = tool_calls
187-
189+
188190
output.append(message)
189191

190192
# Handle Responses API format
@@ -193,7 +195,7 @@ def format_response_openai(response):
193195
tool_calls = []
194196
images = []
195197
role = "assistant"
196-
198+
197199
for item in response.output:
198200
if item.type == "message":
199201
role = item.role
@@ -213,13 +215,15 @@ def format_response_openai(response):
213215
and content_item.type == "input_image"
214216
and hasattr(content_item, "image_url")
215217
):
216-
images.append({
217-
"type": "image",
218-
"image": content_item.image_url,
219-
})
218+
images.append(
219+
{
220+
"type": "image",
221+
"image": content_item.image_url,
222+
}
223+
)
220224
elif hasattr(item, "content"):
221225
content_text += str(item.content)
222-
226+
223227
elif hasattr(item, "type") and item.type == "function_call":
224228
tool_call = {
225229
"type": "function",
@@ -231,7 +235,7 @@ def format_response_openai(response):
231235
}
232236

233237
tool_calls.append(tool_call)
234-
238+
235239
if content_text or tool_calls:
236240
message = {
237241
"role": role,
@@ -242,12 +246,14 @@ def format_response_openai(response):
242246
message["tool_calls"] = tool_calls
243247

244248
output.append(message)
245-
249+
246250
for image in images:
247-
output.append({
248-
"content": image,
249-
"role": role,
250-
})
251+
output.append(
252+
{
253+
"content": image,
254+
"role": role,
255+
}
256+
)
251257

252258
return output
253259

@@ -260,7 +266,7 @@ def format_response_gemini(response):
260266
if hasattr(candidate, "content") and candidate.content:
261267
content_text = ""
262268
tool_calls = []
263-
269+
264270
if hasattr(candidate.content, "parts") and candidate.content.parts:
265271
for part in candidate.content.parts:
266272
if hasattr(part, "text") and part.text:
@@ -277,7 +283,7 @@ def format_response_gemini(response):
277283
}
278284

279285
tool_calls.append(tool_call)
280-
286+
281287
if content_text or tool_calls:
282288
message = {
283289
"role": "assistant",
@@ -288,7 +294,7 @@ def format_response_gemini(response):
288294
message["tool_calls"] = tool_calls
289295

290296
output.append(message)
291-
297+
292298
elif hasattr(candidate, "text") and candidate.text:
293299
output.append(
294300
{

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -100,8 +100,8 @@ def mock_anthropic_response_with_tool_calls():
100100
"type": "tool_use",
101101
"id": "toolu_abc123",
102102
"name": "get_weather",
103-
"input": {"location": "San Francisco"}
104-
}
103+
"input": {"location": "San Francisco"},
104+
},
105105
],
106106
model="claude-3-5-sonnet-20241022",
107107
usage=Usage(
@@ -124,7 +124,7 @@ def mock_anthropic_response_tool_calls_only():
124124
"type": "tool_use",
125125
"id": "toolu_def456",
126126
"name": "get_weather",
127-
"input": {"location": "New York", "unit": "fahrenheit"}
127+
"input": {"location": "New York", "unit": "fahrenheit"},
128128
}
129129
],
130130
model="claude-3-5-sonnet-20241022",
@@ -542,7 +542,9 @@ def test_tool_definition(mock_client, mock_anthropic_response):
542542
assert props["$ai_tools"] == tools
543543

544544

545-
def test_tool_calls_in_output_choices(mock_client, mock_anthropic_response_with_tool_calls):
545+
def test_tool_calls_in_output_choices(
546+
mock_client, mock_anthropic_response_with_tool_calls
547+
):
546548
with patch(
547549
"anthropic.resources.Messages.create",
548550
return_value=mock_anthropic_response_with_tool_calls,
@@ -560,10 +562,8 @@ def test_tool_calls_in_output_choices(mock_client, mock_anthropic_response_with_
560562
"description": "Get weather",
561563
"input_schema": {
562564
"type": "object",
563-
"properties": {
564-
"location": {"type": "string"}
565-
},
566-
"required": ["location"]
565+
"properties": {"location": {"type": "string"}},
566+
"required": ["location"],
567567
},
568568
}
569569
],
@@ -590,10 +590,10 @@ def test_tool_calls_in_output_choices(mock_client, mock_anthropic_response_with_
590590
"id": "toolu_abc123",
591591
"function": {
592592
"name": "get_weather",
593-
"arguments": {"location": "San Francisco"}
594-
}
593+
"arguments": {"location": "San Francisco"},
594+
},
595595
}
596-
]
596+
],
597597
}
598598
]
599599

@@ -603,7 +603,9 @@ def test_tool_calls_in_output_choices(mock_client, mock_anthropic_response_with_
603603
assert props["$ai_http_status"] == 200
604604

605605

606-
def test_tool_calls_only_no_content(mock_client, mock_anthropic_response_tool_calls_only):
606+
def test_tool_calls_only_no_content(
607+
mock_client, mock_anthropic_response_tool_calls_only
608+
):
607609
with patch(
608610
"anthropic.resources.Messages.create",
609611
return_value=mock_anthropic_response_tool_calls_only,
@@ -612,9 +614,7 @@ def test_tool_calls_only_no_content(mock_client, mock_anthropic_response_tool_ca
612614
response = client.messages.create(
613615
model="claude-3-5-sonnet-20241022",
614616
max_tokens=200,
615-
messages=[
616-
{"role": "user", "content": "Get weather for New York"}
617-
],
617+
messages=[{"role": "user", "content": "Get weather for New York"}],
618618
tools=[
619619
{
620620
"name": "get_weather",
@@ -623,9 +623,9 @@ def test_tool_calls_only_no_content(mock_client, mock_anthropic_response_tool_ca
623623
"type": "object",
624624
"properties": {
625625
"location": {"type": "string"},
626-
"unit": {"type": "string"}
626+
"unit": {"type": "string"},
627627
},
628-
"required": ["location"]
628+
"required": ["location"],
629629
},
630630
}
631631
],
@@ -652,10 +652,10 @@ def test_tool_calls_only_no_content(mock_client, mock_anthropic_response_tool_ca
652652
"id": "toolu_def456",
653653
"function": {
654654
"name": "get_weather",
655-
"arguments": {"location": "New York", "unit": "fahrenheit"}
656-
}
655+
"arguments": {"location": "New York", "unit": "fahrenheit"},
656+
},
657657
}
658-
]
658+
],
659659
}
660660
]
661661

@@ -665,18 +665,20 @@ def test_tool_calls_only_no_content(mock_client, mock_anthropic_response_tool_ca
665665
assert props["$ai_http_status"] == 200
666666

667667

668-
def test_async_tool_calls_in_output_choices(mock_client, mock_anthropic_response_with_tool_calls):
668+
def test_async_tool_calls_in_output_choices(
669+
mock_client, mock_anthropic_response_with_tool_calls
670+
):
669671
import asyncio
670-
672+
671673
async def mock_async_create(**kwargs):
672674
return mock_anthropic_response_with_tool_calls
673-
675+
674676
with patch(
675677
"anthropic.resources.AsyncMessages.create",
676678
side_effect=mock_async_create,
677679
):
678680
async_client = AsyncAnthropic(api_key="test-key", posthog_client=mock_client)
679-
681+
680682
async def run_test():
681683
return await async_client.messages.create(
682684
model="claude-3-5-sonnet-20241022",
@@ -690,16 +692,14 @@ async def run_test():
690692
"description": "Get weather",
691693
"input_schema": {
692694
"type": "object",
693-
"properties": {
694-
"location": {"type": "string"}
695-
},
696-
"required": ["location"]
695+
"properties": {"location": {"type": "string"}},
696+
"required": ["location"],
697697
},
698698
}
699699
],
700700
posthog_distinct_id="test-id",
701701
)
702-
702+
703703
response = asyncio.run(run_test())
704704

705705
assert response == mock_anthropic_response_with_tool_calls
@@ -722,10 +722,10 @@ async def run_test():
722722
"id": "toolu_abc123",
723723
"function": {
724724
"name": "get_weather",
725-
"arguments": {"location": "San Francisco"}
726-
}
725+
"arguments": {"location": "San Francisco"},
726+
},
727727
}
728-
]
728+
],
729729
}
730730
]
731731

0 commit comments

Comments
 (0)