Skip to content

Commit a9fc3b6

Browse files
committed
version conflicts eh
2 parents 403dd65 + 7dc4cbb commit a9fc3b6

File tree

11 files changed

+295
-19
lines changed

11 files changed

+295
-19
lines changed

CHANGELOG.md

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,19 @@
1-
## 3.15.2 - 2025-02-26
1+
## 3.18.1 – 2025-03-03
22

33
1. Improve quota-limited feature flag logs
44

5+
## 3.18.0 - 2025-02-28
6+
7+
1. Add support for Azure OpenAI.
8+
9+
## 3.17.0 - 2025-02-27
10+
11+
1. The LangChain handler now captures tools in `$ai_generation` events, in property `$ai_tools`. This allows for displaying tools provided to the LLM call in PostHog UI. Note that support for `$ai_tools` in OpenAI and Anthropic SDKs is coming soon.
12+
13+
## 3.16.0 - 2025-02-26
14+
15+
1. feat: add some platform info to events (#198)
16+
517
## 3.15.1 - 2025-02-23
618

719
1. Fix async client support for OpenAI.

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,10 @@ Please see the [Python integration docs](https://posthog.com/docs/integrations/p
1010
### Testing Locally
1111

1212
1. Run `python3 -m venv env` (creates virtual environment called "env")
13+
* or `uv venv env`
1314
2. Run `source env/bin/activate` (activates the virtual environment)
1415
3. Run `python3 -m pip install -e ".[test]"` (installs the package in develop mode, along with test dependencies)
16+
* or `uv pip install -e ".[test]"`
1517
4. Run `make test`
1618
1. To run a specific test do `pytest -k test_no_api_key`
1719

llm_observability_examples.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,11 @@ def main_sync():
3333
groups = {"company": "test_company"}
3434

3535
try:
36-
# basic_openai_call(distinct_id, trace_id, properties, groups)
36+
basic_openai_call(distinct_id, trace_id, properties, groups)
3737
# streaming_openai_call(distinct_id, trace_id, properties, groups)
3838
# embedding_openai_call(distinct_id, trace_id, properties, groups)
3939
# image_openai_call()
40-
beta_openai_call(distinct_id, trace_id, properties, groups)
40+
# beta_openai_call(distinct_id, trace_id, properties, groups)
4141
except Exception as e:
4242
print("Error during OpenAI call:", str(e))
4343

@@ -216,6 +216,6 @@ def beta_openai_call(distinct_id, trace_id, properties, groups):
216216
# HOW TO RUN:
217217
# comment out one of these to run the other
218218

219-
# if __name__ == "__main__":
220-
# main_sync()
221-
asyncio.run(main_async())
219+
if __name__ == "__main__":
220+
main_sync()
221+
# asyncio.run(main_async())

posthog/ai/langchain/callbacks.py

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,8 @@ class GenerationMetadata(SpanMetadata):
6060
"""Model parameters of the run: temperature, max_tokens, etc."""
6161
base_url: Optional[str] = None
6262
"""Base URL of the provider's API used in the run."""
63+
tools: Optional[List[Dict[str, Any]]] = None
64+
"""Tools provided to the model."""
6365

6466

6567
RunMetadata = Union[SpanMetadata, GenerationMetadata]
@@ -377,6 +379,8 @@ def _set_llm_metadata(
377379
generation = GenerationMetadata(name=run_name, input=messages, start_time=time.time(), end_time=None)
378380
if isinstance(invocation_params, dict):
379381
generation.model_params = get_model_params(invocation_params)
382+
if tools := invocation_params.get("tools"):
383+
generation.tools = tools
380384
if isinstance(metadata, dict):
381385
if model := metadata.get("ls_model_name"):
382386
generation.model = model
@@ -424,7 +428,11 @@ def _pop_run_and_capture_trace_or_span(self, run_id: UUID, parent_run_id: Option
424428
log.warning(f"Run {run_id} is a generation, but attempted to be captured as a trace or span.")
425429
return
426430
self._capture_trace_or_span(
427-
trace_id, run_id, run, outputs, self._get_parent_run_id(trace_id, run_id, parent_run_id)
431+
trace_id,
432+
run_id,
433+
run,
434+
outputs,
435+
self._get_parent_run_id(trace_id, run_id, parent_run_id),
428436
)
429437

430438
def _capture_trace_or_span(
@@ -465,7 +473,10 @@ def _capture_trace_or_span(
465473
)
466474

467475
def _pop_run_and_capture_generation(
468-
self, run_id: UUID, parent_run_id: Optional[UUID], response: Union[LLMResult, BaseException]
476+
self,
477+
run_id: UUID,
478+
parent_run_id: Optional[UUID],
479+
response: Union[LLMResult, BaseException],
469480
):
470481
trace_id = self._get_trace_id(run_id)
471482
self._pop_parent_of_run(run_id)
@@ -476,7 +487,11 @@ def _pop_run_and_capture_generation(
476487
log.warning(f"Run {run_id} is not a generation, but attempted to be captured as a generation.")
477488
return
478489
self._capture_generation(
479-
trace_id, run_id, run, response, self._get_parent_run_id(trace_id, run_id, parent_run_id)
490+
trace_id,
491+
run_id,
492+
run,
493+
response,
494+
self._get_parent_run_id(trace_id, run_id, parent_run_id),
480495
)
481496

482497
def _capture_generation(
@@ -500,6 +515,8 @@ def _capture_generation(
500515
"$ai_latency": run.latency,
501516
"$ai_base_url": run.base_url,
502517
}
518+
if run.tools:
519+
event_properties["$ai_tools"] = run.tools
503520

504521
if isinstance(output, BaseException):
505522
event_properties["$ai_http_status"] = _get_http_status(output)

posthog/ai/openai/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from .openai import OpenAI
22
from .openai_async import AsyncOpenAI
3+
from .openai_providers import AsyncAzureOpenAI, AzureOpenAI
34

4-
__all__ = ["OpenAI", "AsyncOpenAI"]
5+
__all__ = ["OpenAI", "AsyncOpenAI", "AzureOpenAI", "AsyncAzureOpenAI"]
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
try:
2+
import openai
3+
import openai.resources
4+
except ImportError:
5+
raise ModuleNotFoundError("Please install the Open AI SDK to use this feature: 'pip install openai'")
6+
7+
from posthog.ai.openai.openai import WrappedBeta, WrappedChat, WrappedEmbeddings
8+
from posthog.ai.openai.openai_async import WrappedBeta as AsyncWrappedBeta
9+
from posthog.ai.openai.openai_async import WrappedChat as AsyncWrappedChat
10+
from posthog.ai.openai.openai_async import WrappedEmbeddings as AsyncWrappedEmbeddings
11+
from posthog.client import Client as PostHogClient
12+
13+
14+
class AzureOpenAI(openai.AzureOpenAI):
15+
"""
16+
A wrapper around the Azure OpenAI SDK that automatically sends LLM usage events to PostHog.
17+
"""
18+
19+
_ph_client: PostHogClient
20+
21+
def __init__(self, posthog_client: PostHogClient, **kwargs):
22+
super().__init__(**kwargs)
23+
self._ph_client = posthog_client
24+
self.chat = WrappedChat(self)
25+
self.embeddings = WrappedEmbeddings(self)
26+
self.beta = WrappedBeta(self)
27+
28+
29+
class AsyncAzureOpenAI(openai.AsyncAzureOpenAI):
30+
"""
31+
A wrapper around the Azure OpenAI SDK that automatically sends LLM usage events to PostHog.
32+
"""
33+
34+
_ph_client: PostHogClient
35+
36+
def __init__(self, posthog_client: PostHogClient, **kwargs):
37+
super().__init__(**kwargs)
38+
self._ph_client = posthog_client
39+
self.chat = AsyncWrappedChat(self)
40+
self.embeddings = AsyncWrappedEmbeddings(self)
41+
self.beta = AsyncWrappedBeta(self)

posthog/client.py

Lines changed: 59 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,14 @@
22
import logging
33
import numbers
44
import os
5+
import platform
56
import sys
67
import warnings
78
from datetime import datetime, timedelta
9+
from typing import Any
810
from uuid import UUID, uuid4
911

12+
import distro # For Linux OS detection
1013
from dateutil.tz import tzutc
1114
from six import string_types
1215

@@ -29,6 +32,60 @@
2932
MAX_DICT_SIZE = 50_000
3033

3134

35+
def get_os_info():
36+
"""
37+
Returns standardized OS name and version information.
38+
Similar to how user agent parsing works in JS.
39+
"""
40+
os_name = ""
41+
os_version = ""
42+
43+
platform_name = sys.platform
44+
45+
if platform_name.startswith("win"):
46+
os_name = "Windows"
47+
if hasattr(platform, "win32_ver"):
48+
win_version = platform.win32_ver()[0]
49+
if win_version:
50+
os_version = win_version
51+
52+
elif platform_name == "darwin":
53+
os_name = "Mac OS X"
54+
if hasattr(platform, "mac_ver"):
55+
mac_version = platform.mac_ver()[0]
56+
if mac_version:
57+
os_version = mac_version
58+
59+
elif platform_name.startswith("linux"):
60+
os_name = "Linux"
61+
linux_info = distro.info()
62+
if linux_info["version"]:
63+
os_version = linux_info["version"]
64+
65+
elif platform_name.startswith("freebsd"):
66+
os_name = "FreeBSD"
67+
if hasattr(platform, "release"):
68+
os_version = platform.release()
69+
70+
else:
71+
os_name = platform_name
72+
if hasattr(platform, "release"):
73+
os_version = platform.release()
74+
75+
return os_name, os_version
76+
77+
78+
def system_context() -> dict[str, Any]:
79+
os_name, os_version = get_os_info()
80+
81+
return {
82+
"$python_runtime": platform.python_implementation(),
83+
"$python_version": "%s.%s.%s" % (sys.version_info[:3]),
84+
"$os": os_name,
85+
"$os_version": os_version,
86+
}
87+
88+
3289
class Client(object):
3390
"""Create a new PostHog client."""
3491

@@ -231,7 +288,8 @@ def capture(
231288
stacklevel=2,
232289
)
233290

234-
properties = properties or {}
291+
properties = {**(properties or {}), **system_context()}
292+
235293
require("distinct_id", distinct_id, ID_TYPES)
236294
require("properties", properties, dict)
237295
require("event", event, string_types)

posthog/test/ai/langchain/test_callbacks.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,6 +1168,61 @@ async def test_async_anthropic_streaming(mock_client):
11681168
assert isinstance(trace_props["$ai_output_state"], AIMessage)
11691169

11701170

1171+
def test_metadata_tools(mock_client):
1172+
callbacks = CallbackHandler(mock_client)
1173+
run_id = uuid.uuid4()
1174+
tools = [
1175+
[
1176+
{
1177+
"type": "function",
1178+
"function": {
1179+
"name": "foo",
1180+
"description": "The foo.",
1181+
"parameters": {
1182+
"properties": {
1183+
"bar": {
1184+
"description": "The bar of foo.",
1185+
"type": "string",
1186+
},
1187+
},
1188+
"required": ["query_description", "query_kind"],
1189+
"type": "object",
1190+
"additionalProperties": False,
1191+
},
1192+
"strict": True,
1193+
},
1194+
}
1195+
]
1196+
]
1197+
1198+
with patch("time.time", return_value=1234567890):
1199+
callbacks._set_llm_metadata(
1200+
{"kwargs": {"openai_api_base": "https://us.posthog.com"}},
1201+
run_id,
1202+
messages=[{"role": "user", "content": "What's the weather like in SF?"}],
1203+
invocation_params={"temperature": 0.5, "tools": tools},
1204+
metadata={"ls_model_name": "hog-mini", "ls_provider": "posthog"},
1205+
name="test",
1206+
)
1207+
expected = GenerationMetadata(
1208+
model="hog-mini",
1209+
input=[{"role": "user", "content": "What's the weather like in SF?"}],
1210+
start_time=1234567890,
1211+
model_params={"temperature": 0.5},
1212+
provider="posthog",
1213+
base_url="https://us.posthog.com",
1214+
name="test",
1215+
tools=tools,
1216+
end_time=None,
1217+
)
1218+
assert callbacks._runs[run_id] == expected
1219+
with patch("time.time", return_value=1234567891):
1220+
run = callbacks._pop_run_metadata(run_id)
1221+
expected.end_time = 1234567891
1222+
assert run == expected
1223+
assert callbacks._runs == {}
1224+
1225+
11711226
def test_tool_calls(mock_client):
11721227
prompt = ChatPromptTemplate.from_messages([("user", "Foo")])
11731228
model = FakeMessagesListChatModel(

0 commit comments

Comments
 (0)