@@ -32,6 +32,167 @@ def __init__(self, posthog_client: PostHogClient, **kwargs):
3232 self .chat = WrappedChat (self )
3333 self .embeddings = WrappedEmbeddings (self )
3434 self .beta = WrappedBeta (self )
35+ self .responses = WrappedResponses (self )
36+
37+
38+ class WrappedResponses (openai .resources .responses .Responses ):
39+ _client : OpenAI
40+
41+ def create (
42+ self ,
43+ posthog_distinct_id : Optional [str ] = None ,
44+ posthog_trace_id : Optional [str ] = None ,
45+ posthog_properties : Optional [Dict [str , Any ]] = None ,
46+ posthog_privacy_mode : bool = False ,
47+ posthog_groups : Optional [Dict [str , Any ]] = None ,
48+ ** kwargs : Any ,
49+ ):
50+ if posthog_trace_id is None :
51+ posthog_trace_id = uuid .uuid4 ()
52+
53+ if kwargs .get ("stream" , False ):
54+ return self ._create_streaming (
55+ posthog_distinct_id ,
56+ posthog_trace_id ,
57+ posthog_properties ,
58+ posthog_privacy_mode ,
59+ posthog_groups ,
60+ ** kwargs ,
61+ )
62+
63+ return call_llm_and_track_usage (
64+ posthog_distinct_id ,
65+ self ._client ._ph_client ,
66+ "openai" ,
67+ posthog_trace_id ,
68+ posthog_properties ,
69+ posthog_privacy_mode ,
70+ posthog_groups ,
71+ self ._client .base_url ,
72+ super ().create ,
73+ ** kwargs ,
74+ )
75+
76+ def _create_streaming (
77+ self ,
78+ posthog_distinct_id : Optional [str ],
79+ posthog_trace_id : Optional [str ],
80+ posthog_properties : Optional [Dict [str , Any ]],
81+ posthog_privacy_mode : bool ,
82+ posthog_groups : Optional [Dict [str , Any ]],
83+ ** kwargs : Any ,
84+ ):
85+ start_time = time .time ()
86+ usage_stats : Dict [str , int ] = {}
87+ final_content = []
88+ response = super ().create (** kwargs )
89+
90+ def generator ():
91+ nonlocal usage_stats
92+ nonlocal final_content
93+
94+ try :
95+ for chunk in response :
96+ if hasattr (chunk , "type" ) and chunk .type == "response.completed" :
97+ res = chunk .response
98+ if res .output and len (res .output ) > 0 :
99+ final_content .append (res .output [0 ])
100+
101+ if hasattr (chunk , "usage" ) and chunk .usage :
102+ usage_stats = {
103+ k : getattr (chunk .usage , k , 0 )
104+ for k in [
105+ "input_tokens" ,
106+ "output_tokens" ,
107+ "total_tokens" ,
108+ ]
109+ }
110+
111+ # Add support for cached tokens
112+ if hasattr (chunk .usage , "output_tokens_details" ) and hasattr (
113+ chunk .usage .output_tokens_details , "reasoning_tokens"
114+ ):
115+ usage_stats ["reasoning_tokens" ] = chunk .usage .output_tokens_details .reasoning_tokens
116+
117+ if hasattr (chunk .usage , "input_tokens_details" ) and hasattr (
118+ chunk .usage .input_tokens_details , "cached_tokens"
119+ ):
120+ usage_stats ["cache_read_input_tokens" ] = chunk .usage .input_tokens_details .cached_tokens
121+
122+ yield chunk
123+
124+ finally :
125+ end_time = time .time ()
126+ latency = end_time - start_time
127+ output = final_content
128+ self ._capture_streaming_event (
129+ posthog_distinct_id ,
130+ posthog_trace_id ,
131+ posthog_properties ,
132+ posthog_privacy_mode ,
133+ posthog_groups ,
134+ kwargs ,
135+ usage_stats ,
136+ latency ,
137+ output ,
138+ )
139+
140+ return generator ()
141+
142+ def _capture_streaming_event (
143+ self ,
144+ posthog_distinct_id : Optional [str ],
145+ posthog_trace_id : Optional [str ],
146+ posthog_properties : Optional [Dict [str , Any ]],
147+ posthog_privacy_mode : bool ,
148+ posthog_groups : Optional [Dict [str , Any ]],
149+ kwargs : Dict [str , Any ],
150+ usage_stats : Dict [str , int ],
151+ latency : float ,
152+ output : str ,
153+ tool_calls : Optional [List [Dict [str , Any ]]] = None ,
154+ ):
155+ if posthog_trace_id is None :
156+ posthog_trace_id = uuid .uuid4 ()
157+
158+ event_properties = {
159+ "$ai_provider" : "openai" ,
160+ "$ai_model" : kwargs .get ("model" ),
161+ "$ai_model_parameters" : get_model_params (kwargs ),
162+ "$ai_input" : with_privacy_mode (self ._client ._ph_client , posthog_privacy_mode , kwargs .get ("input" )),
163+ "$ai_output_choices" : with_privacy_mode (
164+ self ._client ._ph_client ,
165+ posthog_privacy_mode ,
166+ output ,
167+ ),
168+ "$ai_http_status" : 200 ,
169+ "$ai_input_tokens" : usage_stats .get ("input_tokens" , 0 ),
170+ "$ai_output_tokens" : usage_stats .get ("output_tokens" , 0 ),
171+ "$ai_cache_read_input_tokens" : usage_stats .get ("cache_read_input_tokens" , 0 ),
172+ "$ai_reasoning_tokens" : usage_stats .get ("reasoning_tokens" , 0 ),
173+ "$ai_latency" : latency ,
174+ "$ai_trace_id" : posthog_trace_id ,
175+ "$ai_base_url" : str (self ._client .base_url ),
176+ ** (posthog_properties or {}),
177+ }
178+
179+ if tool_calls :
180+ event_properties ["$ai_tools" ] = with_privacy_mode (
181+ self ._client ._ph_client ,
182+ posthog_privacy_mode ,
183+ tool_calls ,
184+ )
185+
186+ if posthog_distinct_id is None :
187+ event_properties ["$process_person_profile" ] = False
188+
189+ if hasattr (self ._client ._ph_client , "capture" ):
190+ self ._client ._ph_client .capture (
191+ distinct_id = posthog_distinct_id or posthog_trace_id ,
192+ event = "$ai_generation" ,
193+ properties = event_properties ,
194+ groups = posthog_groups ,
195+ )
35196
36197
37198class WrappedChat (openai .resources .chat .Chat ):
@@ -121,6 +282,11 @@ def generator():
121282 ):
122283 usage_stats ["cache_read_input_tokens" ] = chunk .usage .prompt_tokens_details .cached_tokens
123284
285+ if hasattr (chunk .usage , "output_tokens_details" ) and hasattr (
286+ chunk .usage .output_tokens_details , "reasoning_tokens"
287+ ):
288+ usage_stats ["reasoning_tokens" ] = chunk .usage .output_tokens_details .reasoning_tokens
289+
124290 if hasattr (chunk , "choices" ) and chunk .choices and len (chunk .choices ) > 0 :
125291 if chunk .choices [0 ].delta and chunk .choices [0 ].delta .content :
126292 content = chunk .choices [0 ].delta .content
@@ -191,6 +357,7 @@ def _capture_streaming_event(
191357 "$ai_input_tokens" : usage_stats .get ("prompt_tokens" , 0 ),
192358 "$ai_output_tokens" : usage_stats .get ("completion_tokens" , 0 ),
193359 "$ai_cache_read_input_tokens" : usage_stats .get ("cache_read_input_tokens" , 0 ),
360+ "$ai_reasoning_tokens" : usage_stats .get ("reasoning_tokens" , 0 ),
194361 "$ai_latency" : latency ,
195362 "$ai_trace_id" : posthog_trace_id ,
196363 "$ai_base_url" : str (self ._client .base_url ),
0 commit comments