Skip to content

Commit 43af2c3

Browse files
committed
fix(llma): extract converters for providers
1 parent 4edab3d commit 43af2c3

File tree

8 files changed

+665
-246
lines changed

8 files changed

+665
-246
lines changed

posthog/ai/anthropic/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,12 @@
66
AsyncAnthropicBedrock,
77
AsyncAnthropicVertex,
88
)
9+
from .anthropic_converter import (
10+
format_anthropic_response,
11+
format_anthropic_input,
12+
extract_anthropic_tools,
13+
format_anthropic_streaming_content,
14+
)
915

1016
__all__ = [
1117
"Anthropic",
@@ -14,4 +20,8 @@
1420
"AsyncAnthropicBedrock",
1521
"AnthropicVertex",
1622
"AsyncAnthropicVertex",
23+
"format_anthropic_response",
24+
"format_anthropic_input",
25+
"extract_anthropic_tools",
26+
"format_anthropic_streaming_content",
1727
]
Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
"""
2+
Anthropic-specific conversion utilities.
3+
4+
This module handles the conversion of Anthropic API responses and inputs
5+
into standardized formats for PostHog tracking.
6+
"""
7+
8+
from typing import Any, Dict, List, Optional
9+
10+
from posthog.ai.types import (
11+
FormattedContentItem,
12+
FormattedFunctionCall,
13+
FormattedMessage,
14+
FormattedTextContent,
15+
)
16+
17+
18+
def format_anthropic_response(response: Any) -> List[FormattedMessage]:
19+
"""
20+
Format an Anthropic response into standardized message format.
21+
22+
Args:
23+
response: The response object from Anthropic API
24+
25+
Returns:
26+
List of formatted messages with role and content
27+
"""
28+
output = []
29+
30+
if response is None:
31+
return output
32+
33+
content: List[FormattedContentItem] = []
34+
35+
# Process content blocks from the response
36+
if hasattr(response, "content"):
37+
for choice in response.content:
38+
if (
39+
hasattr(choice, "type")
40+
and choice.type == "text"
41+
and hasattr(choice, "text")
42+
and choice.text
43+
):
44+
text_content: FormattedTextContent = {
45+
"type": "text",
46+
"text": choice.text
47+
}
48+
content.append(text_content)
49+
elif (
50+
hasattr(choice, "type")
51+
and choice.type == "tool_use"
52+
and hasattr(choice, "name")
53+
and hasattr(choice, "id")
54+
):
55+
function_call: FormattedFunctionCall = {
56+
"type": "function",
57+
"id": choice.id,
58+
"function": {
59+
"name": choice.name,
60+
"arguments": getattr(choice, "input", {}),
61+
}
62+
}
63+
content.append(function_call)
64+
65+
if content:
66+
message: FormattedMessage = {
67+
"role": "assistant",
68+
"content": content,
69+
}
70+
output.append(message)
71+
72+
return output
73+
74+
75+
def format_anthropic_input(messages: List[Dict[str, Any]], system: Optional[str] = None) -> List[FormattedMessage]:
76+
"""
77+
Format Anthropic input messages with optional system prompt.
78+
79+
Args:
80+
messages: List of message dictionaries
81+
system: Optional system prompt to prepend
82+
83+
Returns:
84+
List of formatted messages
85+
"""
86+
formatted_messages: List[FormattedMessage] = []
87+
88+
# Add system message if provided
89+
if system is not None:
90+
formatted_messages.append({
91+
"role": "system",
92+
"content": system
93+
})
94+
95+
# Add user messages
96+
if messages:
97+
for msg in messages:
98+
# Messages are already in the correct format, just ensure type safety
99+
formatted_msg: FormattedMessage = {
100+
"role": msg.get("role", "user"),
101+
"content": msg.get("content", "")
102+
}
103+
formatted_messages.append(formatted_msg)
104+
105+
return formatted_messages
106+
107+
108+
def extract_anthropic_tools(kwargs: Dict[str, Any]) -> Optional[Any]:
109+
"""
110+
Extract tool definitions from Anthropic API kwargs.
111+
112+
Args:
113+
kwargs: Keyword arguments passed to Anthropic API
114+
115+
Returns:
116+
Tool definitions if present, None otherwise
117+
"""
118+
return kwargs.get("tools", None)
119+
120+
121+
def format_anthropic_streaming_content(content_blocks: List[Dict[str, Any]]) -> List[FormattedContentItem]:
122+
"""
123+
Format content blocks from Anthropic streaming response.
124+
125+
Used by streaming handlers to format accumulated content blocks.
126+
127+
Args:
128+
content_blocks: List of content block dictionaries from streaming
129+
130+
Returns:
131+
List of formatted content items
132+
"""
133+
formatted: List[FormattedContentItem] = []
134+
135+
for block in content_blocks:
136+
if block.get("type") == "text":
137+
text_content: FormattedTextContent = {
138+
"type": "text",
139+
"text": block.get("text", "")
140+
}
141+
formatted.append(text_content)
142+
elif block.get("type") == "function":
143+
function_call: FormattedFunctionCall = {
144+
"type": "function",
145+
"id": block.get("id"),
146+
"function": block.get("function", {})
147+
}
148+
formatted.append(function_call)
149+
150+
return formatted

posthog/ai/gemini/__init__.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
from .gemini import Client
2-
from .gemini_converter import format_gemini_input, FormattedMessage
2+
from .gemini_converter import (
3+
format_gemini_input,
4+
format_gemini_response,
5+
extract_gemini_tools,
6+
)
37

48

59
# Create a genai-like module for perfect drop-in replacement
@@ -9,4 +13,10 @@ class _GenAI:
913

1014
genai = _GenAI()
1115

12-
__all__ = ["Client", "genai", "format_gemini_input", "FormattedMessage"]
16+
__all__ = [
17+
"Client",
18+
"genai",
19+
"format_gemini_input",
20+
"format_gemini_response",
21+
"extract_gemini_tools",
22+
]

posthog/ai/gemini/gemini_converter.py

Lines changed: 90 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,18 @@
11
"""
2-
Gemini input format converter module.
2+
Gemini-specific conversion utilities.
33
4-
This module handles the conversion of various Gemini input formats into a standardized
5-
format for PostHog tracking. It eliminates code duplication between gemini.py and utils.py.
4+
This module handles the conversion of Gemini API responses and inputs
5+
into standardized formats for PostHog tracking.
66
"""
77

8-
from typing import Any, Dict, List, TypedDict, Union
8+
from typing import Any, Dict, List, Optional, TypedDict, Union
9+
10+
from posthog.ai.types import (
11+
FormattedContentItem,
12+
FormattedFunctionCall,
13+
FormattedMessage,
14+
FormattedTextContent,
15+
)
916

1017

1118
class GeminiPart(TypedDict, total=False):
@@ -21,12 +28,6 @@ class GeminiMessage(TypedDict, total=False):
2128
text: str
2229

2330

24-
class FormattedMessage(TypedDict):
25-
"""Standardized message format for PostHog tracking."""
26-
role: str
27-
content: str
28-
29-
3031
def _extract_text_from_parts(parts: List[Any]) -> str:
3132
"""
3233
Extract and concatenate text from a parts array.
@@ -131,6 +132,85 @@ def _format_object_message(item: Any) -> FormattedMessage:
131132
return {"role": "user", "content": str(item)}
132133

133134

135+
def format_gemini_response(response: Any) -> List[FormattedMessage]:
136+
"""
137+
Format a Gemini response into standardized message format.
138+
139+
Args:
140+
response: The response object from Gemini API
141+
142+
Returns:
143+
List of formatted messages with role and content
144+
"""
145+
output = []
146+
147+
if response is None:
148+
return output
149+
150+
if hasattr(response, "candidates") and response.candidates:
151+
for candidate in response.candidates:
152+
if hasattr(candidate, "content") and candidate.content:
153+
content: List[FormattedContentItem] = []
154+
155+
if hasattr(candidate.content, "parts") and candidate.content.parts:
156+
for part in candidate.content.parts:
157+
if hasattr(part, "text") and part.text:
158+
text_content: FormattedTextContent = {
159+
"type": "text",
160+
"text": part.text
161+
}
162+
content.append(text_content)
163+
elif hasattr(part, "function_call") and part.function_call:
164+
function_call = part.function_call
165+
func_content: FormattedFunctionCall = {
166+
"type": "function",
167+
"function": {
168+
"name": function_call.name,
169+
"arguments": function_call.args,
170+
}
171+
}
172+
content.append(func_content)
173+
174+
if content:
175+
message: FormattedMessage = {
176+
"role": "assistant",
177+
"content": content,
178+
}
179+
output.append(message)
180+
181+
elif hasattr(candidate, "text") and candidate.text:
182+
message: FormattedMessage = {
183+
"role": "assistant",
184+
"content": [{"type": "text", "text": candidate.text}],
185+
}
186+
output.append(message)
187+
188+
elif hasattr(response, "text") and response.text:
189+
message: FormattedMessage = {
190+
"role": "assistant",
191+
"content": [{"type": "text", "text": response.text}],
192+
}
193+
output.append(message)
194+
195+
return output
196+
197+
198+
def extract_gemini_tools(kwargs: Dict[str, Any]) -> Optional[Any]:
199+
"""
200+
Extract tool definitions from Gemini API kwargs.
201+
202+
Args:
203+
kwargs: Keyword arguments passed to Gemini API
204+
205+
Returns:
206+
Tool definitions if present, None otherwise
207+
"""
208+
if "config" in kwargs and hasattr(kwargs["config"], "tools"):
209+
return kwargs["config"].tools
210+
211+
return None
212+
213+
134214
def format_gemini_input(contents: Any) -> List[FormattedMessage]:
135215
"""
136216
Format Gemini input contents into standardized message format for PostHog tracking.

posthog/ai/openai/__init__.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,20 @@
11
from .openai import OpenAI
22
from .openai_async import AsyncOpenAI
33
from .openai_providers import AsyncAzureOpenAI, AzureOpenAI
4+
from .openai_converter import (
5+
format_openai_response,
6+
format_openai_input,
7+
extract_openai_tools,
8+
format_openai_streaming_content,
9+
)
410

5-
__all__ = ["OpenAI", "AsyncOpenAI", "AzureOpenAI", "AsyncAzureOpenAI"]
11+
__all__ = [
12+
"OpenAI",
13+
"AsyncOpenAI",
14+
"AzureOpenAI",
15+
"AsyncAzureOpenAI",
16+
"format_openai_response",
17+
"format_openai_input",
18+
"extract_openai_tools",
19+
"format_openai_streaming_content",
20+
]

0 commit comments

Comments
 (0)