Skip to content

Commit 315d6e1

Browse files
committed
feat: add all other open ai functions
1 parent d8e4557 commit 315d6e1

File tree

5 files changed

+43
-18
lines changed

5 files changed

+43
-18
lines changed

llm_observability_examples.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ def main_sync():
3131
try:
3232
basic_openai_call(distinct_id, trace_id, properties)
3333
# streaming_openai_call(distinct_id, trace_id, properties)
34+
non_instrumented_openai_call()
3435
except Exception as e:
3536
print("Error during OpenAI call:", str(e))
3637

@@ -130,6 +131,12 @@ async def streaming_async_openai_call(distinct_id, trace_id, properties):
130131
return response
131132

132133

134+
def non_instrumented_openai_call():
135+
response = openai_client.images.generate(model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024")
136+
print(response)
137+
return response
138+
139+
133140
# HOW TO RUN:
134141
# comment out one of these to run the other
135142

posthog/ai/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,3 @@
22
from .providers.openai.openai_async import AsyncOpenAI
33

44
__all__ = ["OpenAI", "AsyncOpenAI"]
5-
# TODO: add Azure OpenAI wrapper

posthog/ai/providers/openai/openai.py

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
from posthog.client import Client as PostHogClient
1010
from posthog.ai.utils import (
11-
track_usage,
11+
call_llm_and_track_usage,
1212
get_model_params,
1313
)
1414

@@ -33,6 +33,15 @@ def __init__(
3333
self._openai_client = openai.OpenAI(**openai_config)
3434
self._posthog_client = posthog_client
3535

36+
def __getattr__(self, name: str) -> Any:
37+
"""
38+
Expose all attributes of the underlying openai.OpenAI instance except for the 'chat' property,
39+
which is replaced with a custom ChatNamespace for usage tracking.
40+
"""
41+
if name == "chat":
42+
return self.chat
43+
return getattr(self._openai_client, name)
44+
3645
@property
3746
def chat(self) -> "ChatNamespace":
3847
return ChatNamespace(self._posthog_client, self._openai_client)
@@ -61,7 +70,7 @@ def create(
6170
**kwargs: Any,
6271
):
6372
distinct_id = posthog_distinct_id or "anonymous_ai_user"
64-
73+
6574
if kwargs.get("stream", False):
6675
return self._create_streaming(
6776
distinct_id,
@@ -70,11 +79,10 @@ def create(
7079
**kwargs,
7180
)
7281

73-
7482
def call_method(**call_kwargs):
7583
return self._openai_client.chat.completions.create(**call_kwargs)
7684

77-
return track_usage(
85+
return call_llm_and_track_usage(
7886
distinct_id,
7987
self._ph_client,
8088
posthog_trace_id,
@@ -113,7 +121,9 @@ def generator():
113121
end_time = time.time()
114122
latency = end_time - start_time
115123
output = "".join(accumulated_content)
116-
self._capture_streaming_event(distinct_id, posthog_trace_id, posthog_properties, kwargs, usage_stats, latency, output)
124+
self._capture_streaming_event(
125+
distinct_id, posthog_trace_id, posthog_properties, kwargs, usage_stats, latency, output
126+
)
117127

118128
return generator()
119129

@@ -127,7 +137,7 @@ def _capture_streaming_event(
127137
latency: float,
128138
output: str,
129139
):
130-
140+
131141
event_properties = {
132142
"$ai_provider": "openai",
133143
"$ai_model": kwargs.get("model"),
@@ -140,7 +150,7 @@ def _capture_streaming_event(
140150
"role": "assistant",
141151
}
142152
]
143-
},
153+
},
144154
"$ai_http_status": 200,
145155
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
146156
"$ai_output_tokens": usage_stats.get("completion_tokens", 0),

posthog/ai/providers/openai/openai_async.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
raise ModuleNotFoundError("Please install OpenAI to use this feature: 'pip install openai'")
88

99
from posthog.client import Client as PostHogClient
10-
from posthog.ai.utils import track_usage_async, get_model_params
10+
from posthog.ai.utils import call_llm_and_track_usage_async, get_model_params
1111

1212

1313
class AsyncOpenAI:
@@ -29,6 +29,15 @@ def __init__(
2929
self._openai_client = openai.AsyncOpenAI(**openai_config)
3030
self._posthog_client = posthog_client
3131

32+
def __getattr__(self, name: str) -> Any:
33+
"""
34+
Expose all attributes of the underlying openai.AsyncOpenAI instance except for the 'chat' property,
35+
which is replaced with a custom AsyncChatNamespace for usage tracking.
36+
"""
37+
if name == "chat":
38+
return self.chat
39+
return getattr(self._openai_client, name)
40+
3241
@property
3342
def chat(self) -> "AsyncChatNamespace":
3443
return AsyncChatNamespace(self._posthog_client, self._openai_client)
@@ -71,11 +80,10 @@ async def create(
7180
async def call_async_method(**call_kwargs):
7281
return await self._openai_client.chat.completions.create(**call_kwargs)
7382

74-
response = await track_usage_async(
83+
response = await call_llm_and_track_usage_async(
7584
distinct_id, self._ph_client, posthog_trace_id, posthog_properties, call_async_method, **kwargs
7685
)
7786
return response
78-
7987

8088
async def _create_streaming(
8189
self,
@@ -106,7 +114,9 @@ async def async_generator():
106114
end_time = time.time()
107115
latency = end_time - start_time
108116
output = "".join(accumulated_content)
109-
self._capture_streaming_event(distinct_id, posthog_trace_id, posthog_properties, kwargs, usage_stats, latency, output)
117+
self._capture_streaming_event(
118+
distinct_id, posthog_trace_id, posthog_properties, kwargs, usage_stats, latency, output
119+
)
110120

111121
return async_generator()
112122

@@ -120,7 +130,7 @@ def _capture_streaming_event(
120130
latency: float,
121131
output: str,
122132
):
123-
133+
124134
event_properties = {
125135
"$ai_provider": "openai",
126136
"$ai_model": kwargs.get("model"),
@@ -133,7 +143,7 @@ def _capture_streaming_event(
133143
"role": "assistant",
134144
}
135145
]
136-
},
146+
},
137147
"$ai_http_status": 200,
138148
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
139149
"$ai_output_tokens": usage_stats.get("completion_tokens", 0),
@@ -148,4 +158,3 @@ def _capture_streaming_event(
148158
event="$ai_generation",
149159
properties=event_properties,
150160
)
151-

posthog/ai/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Dict, AsyncGenerator, Callable, Optional
1+
from typing import Any, Dict, Callable, Optional
22
import time
33
from posthog.client import Client as PostHogClient
44

@@ -39,7 +39,7 @@ def format_response(response):
3939
return output
4040

4141

42-
def track_usage(
42+
def call_llm_and_track_usage(
4343
distinct_id: str,
4444
ph_client: PostHogClient,
4545
posthog_trace_id: Optional[str],
@@ -99,7 +99,7 @@ def track_usage(
9999
return response
100100

101101

102-
async def track_usage_async(
102+
async def call_llm_and_track_usage_async(
103103
distinct_id: str,
104104
ph_client: PostHogClient,
105105
posthog_trace_id: Optional[str],

0 commit comments

Comments
 (0)