@@ -28,6 +28,34 @@ def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
2828 model_params [param ] = kwargs [param ]
2929 return model_params
3030
31+ def extract_core_model_params (kwargs : Dict [str , Any ], provider : str ) -> Dict [str , Any ]:
32+ """
33+ Extracts core model parameters from the kwargs dictionary.
34+ """
35+ output = {}
36+ if provider == "anthropic" :
37+ if "temperature" in kwargs :
38+ output ["$ai_temperature" ] = kwargs .get ("temperature" )
39+ if "max_tokens" in kwargs :
40+ output ["$ai_max_tokens" ] = kwargs .get ("max_tokens" )
41+ if "stream" in kwargs :
42+ output ["$ai_stream" ] = kwargs .get ("stream" )
43+ elif provider == "openai" :
44+ if "temperature" in kwargs :
45+ output ["$ai_temperature" ] = kwargs .get ("temperature" )
46+ if "max_completion_tokens" in kwargs :
47+ output ["$ai_max_tokens" ] = kwargs .get ("max_completion_tokens" )
48+ if "stream" in kwargs :
49+ output ["$ai_stream" ] = kwargs .get ("stream" )
50+ else : # default to openai params
51+ if "temperature" in kwargs :
52+ output ["$ai_temperature" ] = kwargs .get ("temperature" )
53+ if "max_tokens" in kwargs :
54+ output ["$ai_max_tokens" ] = kwargs .get ("max_completion_tokens" )
55+ if "stream" in kwargs :
56+ output ["$ai_stream" ] = kwargs .get ("stream" )
57+ return output
58+
3159
3260def get_usage (response , provider : str ) -> Dict [str , Any ]:
3361 if provider == "anthropic" :
@@ -137,6 +165,7 @@ def call_llm_and_track_usage(
137165 "$ai_latency" : latency ,
138166 "$ai_trace_id" : posthog_trace_id ,
139167 "$ai_base_url" : str (base_url ),
168+ ** extract_core_model_params (kwargs , provider ),
140169 ** (posthog_properties or {}),
141170 }
142171
@@ -205,6 +234,7 @@ async def call_llm_and_track_usage_async(
205234 "$ai_latency" : latency ,
206235 "$ai_trace_id" : posthog_trace_id ,
207236 "$ai_base_url" : str (base_url ),
237+ ** extract_core_model_params (kwargs , provider ),
208238 ** (posthog_properties or {}),
209239 }
210240
0 commit comments