Skip to content

Commit 6368d32

Browse files
committed
fix: flatten core model params
1 parent d1e2218 commit 6368d32

File tree

7 files changed

+107
-3
lines changed

7 files changed

+107
-3
lines changed

posthog/ai/anthropic/anthropic.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import uuid
99
from typing import Any, Dict, Optional
1010

11-
from posthog.ai.utils import call_llm_and_track_usage, get_model_params, with_privacy_mode
11+
from posthog.ai.utils import call_llm_and_track_usage, extract_core_model_params, get_model_params, with_privacy_mode
1212
from posthog.client import Client as PostHogClient
1313

1414

@@ -183,6 +183,7 @@ def _capture_streaming_event(
183183
"$ai_latency": latency,
184184
"$ai_trace_id": posthog_trace_id,
185185
"$ai_base_url": str(self._client.base_url),
186+
**extract_core_model_params(kwargs, "anthropic"),
186187
**(posthog_properties or {}),
187188
}
188189

posthog/ai/langchain/callbacks.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from langchain_core.outputs import ChatGeneration, LLMResult
2424
from pydantic import BaseModel
2525

26-
from posthog.ai.utils import get_model_params, with_privacy_mode
26+
from posthog.ai.utils import extract_core_model_params, get_model_params, with_privacy_mode
2727
from posthog.client import Client
2828

2929
log = logging.getLogger("posthog")
@@ -178,6 +178,7 @@ def on_llm_end(
178178
"$ai_latency": latency,
179179
"$ai_trace_id": trace_id,
180180
"$ai_base_url": run.get("base_url"),
181+
**extract_core_model_params(run.get("model_params"), run.get("provider")),
181182
**self._properties,
182183
}
183184
if self._distinct_id is None:
@@ -224,6 +225,7 @@ def on_llm_error(
224225
"$ai_latency": latency,
225226
"$ai_trace_id": trace_id,
226227
"$ai_base_url": run.get("base_url"),
228+
**extract_core_model_params(run.get("model_params"), run.get("provider")),
227229
**self._properties,
228230
}
229231
if self._distinct_id is None:

posthog/ai/openai/openai.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
except ImportError:
99
raise ModuleNotFoundError("Please install the OpenAI SDK to use this feature: 'pip install openai'")
1010

11-
from posthog.ai.utils import call_llm_and_track_usage, get_model_params, with_privacy_mode
11+
from posthog.ai.utils import call_llm_and_track_usage, extract_core_model_params, get_model_params, with_privacy_mode
1212
from posthog.client import Client as PostHogClient
1313

1414

@@ -167,6 +167,7 @@ def _capture_streaming_event(
167167
"$ai_latency": latency,
168168
"$ai_trace_id": posthog_trace_id,
169169
"$ai_base_url": str(self._client.base_url),
170+
**extract_core_model_params(kwargs, "openai"),
170171
**posthog_properties,
171172
}
172173

posthog/ai/utils.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,34 @@ def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
2828
model_params[param] = kwargs[param]
2929
return model_params
3030

31+
def extract_core_model_params(kwargs: Dict[str, Any], provider: str) -> Dict[str, Any]:
32+
"""
33+
Extracts core model parameters from the kwargs dictionary.
34+
"""
35+
output = {}
36+
if provider == "anthropic":
37+
if "temperature" in kwargs:
38+
output["$ai_temperature"] = kwargs.get("temperature")
39+
if "max_tokens" in kwargs:
40+
output["$ai_max_tokens"] = kwargs.get("max_tokens")
41+
if "stream" in kwargs:
42+
output["$ai_stream"] = kwargs.get("stream")
43+
elif provider == "openai":
44+
if "temperature" in kwargs:
45+
output["$ai_temperature"] = kwargs.get("temperature")
46+
if "max_completion_tokens" in kwargs:
47+
output["$ai_max_tokens"] = kwargs.get("max_completion_tokens")
48+
if "stream" in kwargs:
49+
output["$ai_stream"] = kwargs.get("stream")
50+
else: # default to openai params
51+
if "temperature" in kwargs:
52+
output["$ai_temperature"] = kwargs.get("temperature")
53+
if "max_tokens" in kwargs:
54+
output["$ai_max_tokens"] = kwargs.get("max_completion_tokens")
55+
if "stream" in kwargs:
56+
output["$ai_stream"] = kwargs.get("stream")
57+
return output
58+
3159

3260
def get_usage(response, provider: str) -> Dict[str, Any]:
3361
if provider == "anthropic":
@@ -137,6 +165,7 @@ def call_llm_and_track_usage(
137165
"$ai_latency": latency,
138166
"$ai_trace_id": posthog_trace_id,
139167
"$ai_base_url": str(base_url),
168+
**extract_core_model_params(kwargs, provider),
140169
**(posthog_properties or {}),
141170
}
142171

@@ -205,6 +234,7 @@ async def call_llm_and_track_usage_async(
205234
"$ai_latency": latency,
206235
"$ai_trace_id": posthog_trace_id,
207236
"$ai_base_url": str(base_url),
237+
**extract_core_model_params(kwargs, provider),
208238
**(posthog_properties or {}),
209239
}
210240

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,3 +271,28 @@ async def test_basic_async_integration(mock_client):
271271
assert props["$ai_http_status"] == 200
272272
assert props["foo"] == "bar"
273273
assert isinstance(props["$ai_latency"], float)
274+
275+
276+
def test_core_model_params(mock_client, mock_anthropic_response):
277+
with patch("anthropic.resources.Messages.create", return_value=mock_anthropic_response):
278+
client = Anthropic(api_key="test-key", posthog_client=mock_client)
279+
response = client.messages.create(
280+
model="claude-3-opus-20240229",
281+
temperature=0.5,
282+
max_tokens=100,
283+
stream=False,
284+
messages=[{"role": "user", "content": "Hello"}],
285+
posthog_distinct_id="test-id",
286+
posthog_properties={"foo": "bar"},
287+
)
288+
289+
assert response == mock_anthropic_response
290+
assert mock_client.capture.call_count == 1
291+
292+
call_args = mock_client.capture.call_args[1]
293+
props = call_args["properties"]
294+
assert props["$ai_model_parameters"] == {"temperature": 0.5, "max_tokens": 100, "stream": False}
295+
assert props["$ai_temperature"] == 0.5
296+
assert props["$ai_max_tokens"] == 100
297+
assert props["$ai_stream"] == False
298+
assert props["foo"] == "bar"

posthog/test/ai/langchain/test_callbacks.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -771,3 +771,24 @@ def test_tool_calls(mock_client):
771771
}
772772
]
773773
assert "additional_kwargs" not in call["properties"]["$ai_output_choices"][0]
774+
775+
776+
def test_core_model_params(mock_client):
777+
prompt = ChatPromptTemplate.from_messages([("user", "Foo")])
778+
chain = prompt | ChatOpenAI(
779+
api_key=OPENAI_API_KEY,
780+
model="gpt-4",
781+
temperature=0.5,
782+
max_tokens=100,
783+
stream=False,
784+
)
785+
callbacks = CallbackHandler(mock_client)
786+
chain.invoke({}, config={"callbacks": [callbacks]})
787+
788+
assert mock_client.capture.call_count == 1
789+
call = mock_client.capture.call_args[1]
790+
assert call["properties"]["$ai_model_parameters"] == {"temperature": 0.5, "max_tokens": 100, "stream": False}
791+
assert call["properties"]["$ai_temperature"] == 0.5
792+
assert call["properties"]["$ai_max_tokens"] == 100
793+
assert call["properties"]["$ai_stream"] == False
794+
assert call["properties"]["foo"] == "bar"

posthog/test/ai/openai/test_openai.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,3 +173,27 @@ def test_privacy_mode_global(mock_client, mock_openai_response):
173173
props = call_args["properties"]
174174
assert props["$ai_input"] is None
175175
assert props["$ai_output_choices"] is None
176+
177+
def test_core_model_params(mock_client, mock_openai_response):
178+
with patch("openai.resources.chat.completions.Completions.create", return_value=mock_openai_response):
179+
client = OpenAI(api_key="test-key", posthog_client=mock_client)
180+
response = client.chat.completions.create(
181+
model="gpt-4",
182+
temperature=0.5,
183+
max_completion_tokens=100,
184+
stream=False,
185+
messages=[{"role": "user", "content": "Hello"}],
186+
posthog_distinct_id="test-id",
187+
posthog_properties={"foo": "bar"},
188+
)
189+
190+
assert response == mock_openai_response
191+
assert mock_client.capture.call_count == 1
192+
193+
call_args = mock_client.capture.call_args[1]
194+
props = call_args["properties"]
195+
assert props["$ai_model_parameters"] == {"temperature": 0.5, "max_completion_tokens": 100, "stream": False}
196+
assert props["$ai_temperature"] == 0.5
197+
assert props["$ai_max_tokens"] == 100
198+
assert props["$ai_stream"] == False
199+
assert props["foo"] == "bar"

0 commit comments

Comments
 (0)