Skip to content

Commit b9ac3cc

Browse files
committed
feat: add suppoort for responses api
1 parent 332a6ff commit b9ac3cc

File tree

6 files changed

+495
-22
lines changed

6 files changed

+495
-22
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
2+
3+
## 3.20.0 – 2025-03-13
4+
5+
1. Add support for OpenAI Responses API.
6+
17
## 3.19.2 – 2025-03-11
28

39
1. Fix install requirements for analytics package

posthog/ai/openai/openai.py

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,167 @@ def __init__(self, posthog_client: PostHogClient, **kwargs):
3232
self.chat = WrappedChat(self)
3333
self.embeddings = WrappedEmbeddings(self)
3434
self.beta = WrappedBeta(self)
35+
self.responses = WrappedResponses(self)
36+
37+
38+
class WrappedResponses(openai.resources.responses.Responses):
39+
_client: OpenAI
40+
41+
def create(
42+
self,
43+
posthog_distinct_id: Optional[str] = None,
44+
posthog_trace_id: Optional[str] = None,
45+
posthog_properties: Optional[Dict[str, Any]] = None,
46+
posthog_privacy_mode: bool = False,
47+
posthog_groups: Optional[Dict[str, Any]] = None,
48+
**kwargs: Any,
49+
):
50+
if posthog_trace_id is None:
51+
posthog_trace_id = uuid.uuid4()
52+
53+
if kwargs.get("stream", False):
54+
return self._create_streaming(
55+
posthog_distinct_id,
56+
posthog_trace_id,
57+
posthog_properties,
58+
posthog_privacy_mode,
59+
posthog_groups,
60+
**kwargs,
61+
)
62+
63+
return call_llm_and_track_usage(
64+
posthog_distinct_id,
65+
self._client._ph_client,
66+
"openai",
67+
posthog_trace_id,
68+
posthog_properties,
69+
posthog_privacy_mode,
70+
posthog_groups,
71+
self._client.base_url,
72+
super().create,
73+
**kwargs,
74+
)
75+
76+
def _create_streaming(
77+
self,
78+
posthog_distinct_id: Optional[str],
79+
posthog_trace_id: Optional[str],
80+
posthog_properties: Optional[Dict[str, Any]],
81+
posthog_privacy_mode: bool,
82+
posthog_groups: Optional[Dict[str, Any]],
83+
**kwargs: Any,
84+
):
85+
start_time = time.time()
86+
usage_stats: Dict[str, int] = {}
87+
final_content = []
88+
response = super().create(**kwargs)
89+
90+
def generator():
91+
nonlocal usage_stats
92+
nonlocal final_content
93+
94+
try:
95+
for chunk in response:
96+
if hasattr(chunk, "type") and chunk.type == "response.completed":
97+
res = chunk.response
98+
if res.output and len(res.output) > 0:
99+
final_content.append(res.output[0])
100+
101+
if hasattr(chunk, "usage") and chunk.usage:
102+
usage_stats = {
103+
k: getattr(chunk.usage, k, 0)
104+
for k in [
105+
"input_tokens",
106+
"output_tokens",
107+
"total_tokens",
108+
]
109+
}
110+
111+
# Add support for cached tokens
112+
if hasattr(chunk.usage, "output_tokens_details") and hasattr(
113+
chunk.usage.output_tokens_details, "reasoning_tokens"
114+
):
115+
usage_stats["reasoning_tokens"] = chunk.usage.output_tokens_details.reasoning_tokens
116+
117+
if hasattr(chunk.usage, "input_tokens_details") and hasattr(
118+
chunk.usage.input_tokens_details, "cached_tokens"
119+
):
120+
usage_stats["cache_read_input_tokens"] = chunk.usage.input_tokens_details.cached_tokens
121+
122+
yield chunk
123+
124+
finally:
125+
end_time = time.time()
126+
latency = end_time - start_time
127+
output = final_content
128+
self._capture_streaming_event(
129+
posthog_distinct_id,
130+
posthog_trace_id,
131+
posthog_properties,
132+
posthog_privacy_mode,
133+
posthog_groups,
134+
kwargs,
135+
usage_stats,
136+
latency,
137+
output,
138+
)
139+
140+
return generator()
141+
142+
def _capture_streaming_event(
143+
self,
144+
posthog_distinct_id: Optional[str],
145+
posthog_trace_id: Optional[str],
146+
posthog_properties: Optional[Dict[str, Any]],
147+
posthog_privacy_mode: bool,
148+
posthog_groups: Optional[Dict[str, Any]],
149+
kwargs: Dict[str, Any],
150+
usage_stats: Dict[str, int],
151+
latency: float,
152+
output: str,
153+
tool_calls: Optional[List[Dict[str, Any]]] = None,
154+
):
155+
if posthog_trace_id is None:
156+
posthog_trace_id = uuid.uuid4()
157+
158+
event_properties = {
159+
"$ai_provider": "openai",
160+
"$ai_model": kwargs.get("model"),
161+
"$ai_model_parameters": get_model_params(kwargs),
162+
"$ai_input": with_privacy_mode(self._client._ph_client, posthog_privacy_mode, kwargs.get("input")),
163+
"$ai_output_choices": with_privacy_mode(
164+
self._client._ph_client,
165+
posthog_privacy_mode,
166+
output,
167+
),
168+
"$ai_http_status": 200,
169+
"$ai_input_tokens": usage_stats.get("input_tokens", 0),
170+
"$ai_output_tokens": usage_stats.get("output_tokens", 0),
171+
"$ai_cache_read_input_tokens": usage_stats.get("cache_read_input_tokens", 0),
172+
"$ai_reasoning_tokens": usage_stats.get("reasoning_tokens", 0),
173+
"$ai_latency": latency,
174+
"$ai_trace_id": posthog_trace_id,
175+
"$ai_base_url": str(self._client.base_url),
176+
**(posthog_properties or {}),
177+
}
178+
179+
if tool_calls:
180+
event_properties["$ai_tools"] = with_privacy_mode(
181+
self._client._ph_client,
182+
posthog_privacy_mode,
183+
tool_calls,
184+
)
185+
186+
if posthog_distinct_id is None:
187+
event_properties["$process_person_profile"] = False
188+
189+
if hasattr(self._client._ph_client, "capture"):
190+
self._client._ph_client.capture(
191+
distinct_id=posthog_distinct_id or posthog_trace_id,
192+
event="$ai_generation",
193+
properties=event_properties,
194+
groups=posthog_groups,
195+
)
35196

36197

37198
class WrappedChat(openai.resources.chat.Chat):
@@ -121,6 +282,11 @@ def generator():
121282
):
122283
usage_stats["cache_read_input_tokens"] = chunk.usage.prompt_tokens_details.cached_tokens
123284

285+
if hasattr(chunk.usage, "output_tokens_details") and hasattr(
286+
chunk.usage.output_tokens_details, "reasoning_tokens"
287+
):
288+
usage_stats["reasoning_tokens"] = chunk.usage.output_tokens_details.reasoning_tokens
289+
124290
if hasattr(chunk, "choices") and chunk.choices and len(chunk.choices) > 0:
125291
if chunk.choices[0].delta and chunk.choices[0].delta.content:
126292
content = chunk.choices[0].delta.content
@@ -191,6 +357,7 @@ def _capture_streaming_event(
191357
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
192358
"$ai_output_tokens": usage_stats.get("completion_tokens", 0),
193359
"$ai_cache_read_input_tokens": usage_stats.get("cache_read_input_tokens", 0),
360+
"$ai_reasoning_tokens": usage_stats.get("reasoning_tokens", 0),
194361
"$ai_latency": latency,
195362
"$ai_trace_id": posthog_trace_id,
196363
"$ai_base_url": str(self._client.base_url),

posthog/ai/openai/openai_async.py

Lines changed: 164 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,169 @@ def __init__(self, posthog_client: PostHogClient, **kwargs):
3131
self.chat = WrappedChat(self)
3232
self.embeddings = WrappedEmbeddings(self)
3333
self.beta = WrappedBeta(self)
34+
self.responses = WrappedResponses(self)
35+
36+
37+
class WrappedResponses(openai.resources.responses.Responses):
38+
_client: AsyncOpenAI
39+
40+
async def create(
41+
self,
42+
posthog_distinct_id: Optional[str] = None,
43+
posthog_trace_id: Optional[str] = None,
44+
posthog_properties: Optional[Dict[str, Any]] = None,
45+
posthog_privacy_mode: bool = False,
46+
posthog_groups: Optional[Dict[str, Any]] = None,
47+
**kwargs: Any,
48+
):
49+
if posthog_trace_id is None:
50+
posthog_trace_id = uuid.uuid4()
51+
52+
if kwargs.get("stream", False):
53+
return self._create_streaming(
54+
posthog_distinct_id,
55+
posthog_trace_id,
56+
posthog_properties,
57+
posthog_privacy_mode,
58+
posthog_groups,
59+
**kwargs,
60+
)
61+
62+
return await call_llm_and_track_usage_async(
63+
posthog_distinct_id,
64+
self._client._ph_client,
65+
"openai",
66+
posthog_trace_id,
67+
posthog_properties,
68+
posthog_privacy_mode,
69+
posthog_groups,
70+
self._client.base_url,
71+
super().create,
72+
**kwargs,
73+
)
74+
75+
async def _create_streaming(
76+
self,
77+
posthog_distinct_id: Optional[str],
78+
posthog_trace_id: Optional[str],
79+
posthog_properties: Optional[Dict[str, Any]],
80+
posthog_privacy_mode: bool,
81+
posthog_groups: Optional[Dict[str, Any]],
82+
**kwargs: Any,
83+
):
84+
start_time = time.time()
85+
usage_stats: Dict[str, int] = {}
86+
final_content = []
87+
response = super().create(**kwargs)
88+
89+
async def async_generator():
90+
nonlocal usage_stats
91+
nonlocal final_content
92+
93+
try:
94+
for chunk in response:
95+
if hasattr(chunk, "type") and chunk.type == "response.completed":
96+
res = chunk.response
97+
if res.output and len(res.output) > 0:
98+
final_content.append(res.output[0])
99+
100+
if hasattr(chunk, "usage") and chunk.usage:
101+
usage_stats = {
102+
k: getattr(chunk.usage, k, 0)
103+
for k in [
104+
"input_tokens",
105+
"output_tokens",
106+
"total_tokens",
107+
]
108+
}
109+
110+
# Add support for cached tokens
111+
if hasattr(chunk.usage, "output_tokens_details") and hasattr(
112+
chunk.usage.output_tokens_details, "reasoning_tokens"
113+
):
114+
usage_stats["reasoning_tokens"] = chunk.usage.output_tokens_details.reasoning_tokens
115+
116+
if hasattr(chunk.usage, "input_tokens_details") and hasattr(
117+
chunk.usage.input_tokens_details, "cached_tokens"
118+
):
119+
usage_stats["cache_read_input_tokens"] = chunk.usage.input_tokens_details.cached_tokens
120+
121+
yield chunk
122+
123+
finally:
124+
end_time = time.time()
125+
latency = end_time - start_time
126+
output = final_content
127+
self._capture_streaming_event(
128+
posthog_distinct_id,
129+
posthog_trace_id,
130+
posthog_properties,
131+
posthog_privacy_mode,
132+
posthog_groups,
133+
kwargs,
134+
usage_stats,
135+
latency,
136+
output,
137+
)
138+
139+
return async_generator()
140+
141+
async def _capture_streaming_event(
142+
self,
143+
posthog_distinct_id: Optional[str],
144+
posthog_trace_id: Optional[str],
145+
posthog_properties: Optional[Dict[str, Any]],
146+
posthog_privacy_mode: bool,
147+
posthog_groups: Optional[Dict[str, Any]],
148+
kwargs: Dict[str, Any],
149+
usage_stats: Dict[str, int],
150+
latency: float,
151+
output: str,
152+
tool_calls: Optional[List[Dict[str, Any]]] = None,
153+
):
154+
if posthog_trace_id is None:
155+
posthog_trace_id = uuid.uuid4()
156+
157+
event_properties = {
158+
"$ai_provider": "openai",
159+
"$ai_model": kwargs.get("model"),
160+
"$ai_model_parameters": get_model_params(kwargs),
161+
"$ai_input": with_privacy_mode(self._client._ph_client, posthog_privacy_mode, kwargs.get("input")),
162+
"$ai_output_choices": with_privacy_mode(
163+
self._client._ph_client,
164+
posthog_privacy_mode,
165+
output,
166+
),
167+
"$ai_http_status": 200,
168+
"$ai_input_tokens": usage_stats.get("input_tokens", 0),
169+
"$ai_output_tokens": usage_stats.get("output_tokens", 0),
170+
"$ai_cache_read_input_tokens": usage_stats.get("cache_read_input_tokens", 0),
171+
"$ai_reasoning_tokens": usage_stats.get("reasoning_tokens", 0),
172+
"$ai_latency": latency,
173+
"$ai_trace_id": posthog_trace_id,
174+
"$ai_base_url": str(self._client.base_url),
175+
**(posthog_properties or {}),
176+
}
177+
178+
if tool_calls:
179+
event_properties["$ai_tools"] = with_privacy_mode(
180+
self._client._ph_client,
181+
posthog_privacy_mode,
182+
tool_calls,
183+
)
184+
185+
if posthog_distinct_id is None:
186+
event_properties["$process_person_profile"] = False
187+
188+
if hasattr(self._client._ph_client, "capture"):
189+
await self._client._ph_client.capture(
190+
distinct_id=posthog_distinct_id or posthog_trace_id,
191+
event="$ai_generation",
192+
properties=event_properties,
193+
groups=posthog_groups,
194+
)
195+
196+
34197

35198

36199
class WrappedChat(openai.resources.chat.AsyncChat):
@@ -206,7 +369,7 @@ async def _capture_streaming_event(
206369
event_properties["$process_person_profile"] = False
207370

208371
if hasattr(self._client._ph_client, "capture"):
209-
self._client._ph_client.capture(
372+
await self._client._ph_client.capture(
210373
distinct_id=posthog_distinct_id or posthog_trace_id,
211374
event="$ai_generation",
212375
properties=event_properties,

0 commit comments

Comments
 (0)