Skip to content

Commit f2985cd

Browse files
committed
fix(llma): run ruff
1 parent e4c4884 commit f2985cd

File tree

4 files changed

+117
-85
lines changed

4 files changed

+117
-85
lines changed

posthog/ai/anthropic/anthropic_converter.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -145,17 +145,21 @@ def format_anthropic_streaming_content(
145145

146146
for block in content_blocks:
147147
if block.get("type") == "text":
148-
formatted.append({
149-
"type": "text",
150-
"text": block.get("text") or "",
151-
})
148+
formatted.append(
149+
{
150+
"type": "text",
151+
"text": block.get("text") or "",
152+
}
153+
)
152154

153155
elif block.get("type") == "function":
154-
formatted.append({
155-
"type": "function",
156-
"id": block.get("id"),
157-
"function": block.get("function") or {},
158-
})
156+
formatted.append(
157+
{
158+
"type": "function",
159+
"id": block.get("id"),
160+
"function": block.get("function") or {},
161+
}
162+
)
159163

160164
return formatted
161165

posthog/ai/gemini/gemini_converter.py

Lines changed: 51 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -175,38 +175,48 @@ def format_gemini_response(response: Any) -> List[FormattedMessage]:
175175
if hasattr(candidate.content, "parts") and candidate.content.parts:
176176
for part in candidate.content.parts:
177177
if hasattr(part, "text") and part.text:
178-
content.append({
179-
"type": "text",
180-
"text": part.text,
181-
})
178+
content.append(
179+
{
180+
"type": "text",
181+
"text": part.text,
182+
}
183+
)
182184

183185
elif hasattr(part, "function_call") and part.function_call:
184186
function_call = part.function_call
185-
content.append({
186-
"type": "function",
187-
"function": {
188-
"name": function_call.name,
189-
"arguments": function_call.args,
190-
},
191-
})
187+
content.append(
188+
{
189+
"type": "function",
190+
"function": {
191+
"name": function_call.name,
192+
"arguments": function_call.args,
193+
},
194+
}
195+
)
192196

193197
if content:
194-
output.append({
195-
"role": "assistant",
196-
"content": content,
197-
})
198+
output.append(
199+
{
200+
"role": "assistant",
201+
"content": content,
202+
}
203+
)
198204

199205
elif hasattr(candidate, "text") and candidate.text:
200-
output.append({
201-
"role": "assistant",
202-
"content": [{"type": "text", "text": candidate.text}],
203-
})
206+
output.append(
207+
{
208+
"role": "assistant",
209+
"content": [{"type": "text", "text": candidate.text}],
210+
}
211+
)
204212

205213
elif hasattr(response, "text") and response.text:
206-
output.append({
207-
"role": "assistant",
208-
"content": [{"type": "text", "text": response.text}],
209-
})
214+
output.append(
215+
{
216+
"role": "assistant",
217+
"content": [{"type": "text", "text": response.text}],
218+
}
219+
)
210220

211221
return output
212222

@@ -376,24 +386,30 @@ def format_gemini_streaming_output(
376386
elif item.get("type") == "function":
377387
# If we have accumulated text, add it first
378388
if text_parts:
379-
content.append({
380-
"type": "text",
381-
"text": "".join(text_parts),
382-
})
389+
content.append(
390+
{
391+
"type": "text",
392+
"text": "".join(text_parts),
393+
}
394+
)
383395
text_parts = []
384396

385397
# Add the function call
386-
content.append({
387-
"type": "function",
388-
"function": item.get("function", {}),
389-
})
398+
content.append(
399+
{
400+
"type": "function",
401+
"function": item.get("function", {}),
402+
}
403+
)
390404

391405
# Add any remaining text
392406
if text_parts:
393-
content.append({
394-
"type": "text",
395-
"text": "".join(text_parts),
396-
})
407+
content.append(
408+
{
409+
"type": "text",
410+
"text": "".join(text_parts),
411+
}
412+
)
397413

398414
# If we have content, return it
399415
if content:

posthog/ai/openai/openai_converter.py

Lines changed: 50 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -48,27 +48,33 @@ def format_openai_response(response: Any) -> List[FormattedMessage]:
4848
role = choice.message.role
4949

5050
if choice.message.content:
51-
content.append({
52-
"type": "text",
53-
"text": choice.message.content,
54-
})
51+
content.append(
52+
{
53+
"type": "text",
54+
"text": choice.message.content,
55+
}
56+
)
5557

5658
if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
5759
for tool_call in choice.message.tool_calls:
58-
content.append({
59-
"type": "function",
60-
"id": tool_call.id,
61-
"function": {
62-
"name": tool_call.function.name,
63-
"arguments": tool_call.function.arguments,
64-
},
65-
})
60+
content.append(
61+
{
62+
"type": "function",
63+
"id": tool_call.id,
64+
"function": {
65+
"name": tool_call.function.name,
66+
"arguments": tool_call.function.arguments,
67+
},
68+
}
69+
)
6670

6771
if content:
68-
output.append({
69-
"role": role,
70-
"content": content,
71-
})
72+
output.append(
73+
{
74+
"role": role,
75+
"content": content,
76+
}
77+
)
7278

7379
# Handle Responses API format
7480
if hasattr(response, "output"):
@@ -86,10 +92,12 @@ def format_openai_response(response: Any) -> List[FormattedMessage]:
8692
and content_item.type == "output_text"
8793
and hasattr(content_item, "text")
8894
):
89-
content.append({
90-
"type": "text",
91-
"text": content_item.text,
92-
})
95+
content.append(
96+
{
97+
"type": "text",
98+
"text": content_item.text,
99+
}
100+
)
93101

94102
elif hasattr(content_item, "text"):
95103
content.append({"type": "text", "text": content_item.text})
@@ -110,20 +118,24 @@ def format_openai_response(response: Any) -> List[FormattedMessage]:
110118
content.append(text_content)
111119

112120
elif hasattr(item, "type") and item.type == "function_call":
113-
content.append({
114-
"type": "function",
115-
"id": getattr(item, "call_id", getattr(item, "id", "")),
116-
"function": {
117-
"name": item.name,
118-
"arguments": getattr(item, "arguments", {}),
119-
},
120-
})
121+
content.append(
122+
{
123+
"type": "function",
124+
"id": getattr(item, "call_id", getattr(item, "id", "")),
125+
"function": {
126+
"name": item.name,
127+
"arguments": getattr(item, "arguments", {}),
128+
},
129+
}
130+
)
121131

122132
if content:
123-
output.append({
124-
"role": role,
125-
"content": content,
126-
})
133+
output.append(
134+
{
135+
"role": role,
136+
"content": content,
137+
}
138+
)
127139

128140
return output
129141

@@ -149,10 +161,12 @@ def format_openai_input(
149161
# Handle Chat Completions API format
150162
if messages is not None:
151163
for msg in messages:
152-
formatted_messages.append({
153-
"role": msg.get("role", "user"),
154-
"content": msg.get("content", ""),
155-
})
164+
formatted_messages.append(
165+
{
166+
"role": msg.get("role", "user"),
167+
"content": msg.get("content", ""),
168+
}
169+
)
156170

157171
# Handle Responses API format
158172
if input_data is not None:

posthog/ai/utils.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,15 @@
1414

1515

1616
def merge_usage_stats(
17-
target: Dict[str, int],
18-
source: StreamingUsageStats,
19-
mode: str = "incremental"
17+
target: Dict[str, int], source: StreamingUsageStats, mode: str = "incremental"
2018
) -> None:
2119
"""
2220
Merge streaming usage statistics into target dict, handling None values.
23-
21+
2422
Supports two modes:
2523
- "incremental": Add source values to target (for APIs that report new tokens)
2624
- "cumulative": Replace target with source values (for APIs that report totals)
27-
25+
2826
Args:
2927
target: Dictionary to update with usage stats
3028
source: StreamingUsageStats that may contain None values

0 commit comments

Comments
 (0)