Skip to content

Commit eb6bd0f

Browse files
committed
chore: cleanup formatting
1 parent 8b2b9e3 commit eb6bd0f

File tree

5 files changed

+41
-38
lines changed

5 files changed

+41
-38
lines changed

llm_observability_examples.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
posthog_client=posthog,
2222
)
2323

24+
2425
def main_sync():
2526
trace_id = str(uuid.uuid4())
2627
print("Trace ID:", trace_id)
@@ -31,6 +32,7 @@ def main_sync():
3132
except Exception as e:
3233
print("Error during OpenAI call:", str(e))
3334

35+
3436
async def main_async():
3537
try:
3638
await basic_async_openai_call()
@@ -42,7 +44,10 @@ async def main_async():
4244
def basic_openai_call():
4345
response = openai_client.chat.completions.create(
4446
model="gpt-4o-mini",
45-
messages=[{"role": "system", "content": "You are a complex problem solver."}, {"role": "user", "content": "Explain quantum computing in simple terms."}],
47+
messages=[
48+
{"role": "system", "content": "You are a complex problem solver."},
49+
{"role": "user", "content": "Explain quantum computing in simple terms."},
50+
],
4651
max_tokens=100,
4752
temperature=0.7,
4853
)
@@ -52,10 +57,14 @@ def basic_openai_call():
5257
print("No response or unexpected format returned.")
5358
return response
5459

60+
5561
async def basic_async_openai_call():
5662
response = await async_openai_client.chat.completions.create(
5763
model="gpt-4o-mini",
58-
messages=[{"role": "system", "content": "You are a complex problem solver."}, {"role": "user", "content": "Explain quantum computing in simple terms."}],
64+
messages=[
65+
{"role": "system", "content": "You are a complex problem solver."},
66+
{"role": "user", "content": "Explain quantum computing in simple terms."},
67+
],
5968
max_tokens=100,
6069
temperature=0.7,
6170
)
@@ -65,10 +74,14 @@ async def basic_async_openai_call():
6574
print("No response or unexpected format returned.")
6675
return response
6776

77+
6878
def streaming_openai_call():
6979
response = openai_client.chat.completions.create(
7080
model="gpt-4o-mini",
71-
messages=[{"role": "system", "content": "You are a complex problem solver."}, {"role": "user", "content": "Explain quantum computing in simple terms."}],
81+
messages=[
82+
{"role": "system", "content": "You are a complex problem solver."},
83+
{"role": "user", "content": "Explain quantum computing in simple terms."},
84+
],
7285
max_tokens=100,
7386
temperature=0.7,
7487
stream=True,
@@ -79,10 +92,14 @@ def streaming_openai_call():
7992

8093
return response
8194

95+
8296
async def streaming_async_openai_call():
8397
response = await async_openai_client.chat.completions.create(
8498
model="gpt-4o-mini",
85-
messages=[{"role": "system", "content": "You are a complex problem solver."}, {"role": "user", "content": "Explain quantum computing in simple terms."}],
99+
messages=[
100+
{"role": "system", "content": "You are a complex problem solver."},
101+
{"role": "user", "content": "Explain quantum computing in simple terms."},
102+
],
86103
max_tokens=100,
87104
temperature=0.7,
88105
stream=True,
@@ -93,10 +110,11 @@ async def streaming_async_openai_call():
93110

94111
return response
95112

113+
96114
# HOW TO RUN:
97115
# comment out one of these to run the other
98116

99117
if __name__ == "__main__":
100118
main_sync()
101119

102-
# asyncio.run(main_async())
120+
# asyncio.run(main_async())

posthog/ai/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
from .providers.openai.openai_async import AsyncOpenAI
33

44
__all__ = ["OpenAI", "AsyncOpenAI"]
5-
# TODO: add Azure OpenAI wrapper
5+
# TODO: add Azure OpenAI wrapper

posthog/ai/providers/openai/openai.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,12 @@
44
try:
55
import openai
66
except ImportError:
7-
raise ModuleNotFoundError(
8-
"Please install OpenAI to use this feature: 'pip install openai'"
9-
)
7+
raise ModuleNotFoundError("Please install OpenAI to use this feature: 'pip install openai'")
108

119
from posthog.client import Client as PostHogClient
1210
from posthog.ai.utils import get_model_params, format_response, process_sync_streaming_response, track_usage
1311

12+
1413
class OpenAI:
1514
"""
1615
A wrapper around the OpenAI SDK that automatically sends LLM usage events to PostHog.
@@ -83,11 +82,6 @@ def call_method(**call_kwargs):
8382
return self._openai_client.chat.completions.create(**call_kwargs)
8483

8584
response = track_usage(
86-
distinct_id,
87-
self._ph_client,
88-
posthog_trace_id,
89-
posthog_properties,
90-
call_method,
91-
**kwargs
85+
distinct_id, self._ph_client, posthog_trace_id, posthog_properties, call_method, **kwargs
9286
)
93-
return response
87+
return response

posthog/ai/providers/openai/openai_async.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,12 @@
44
try:
55
import openai
66
except ImportError:
7-
raise ModuleNotFoundError(
8-
"Please install OpenAI to use this feature: 'pip install openai'"
9-
)
7+
raise ModuleNotFoundError("Please install OpenAI to use this feature: 'pip install openai'")
108

119
from posthog.client import Client as PostHogClient
1210
from posthog.ai.utils import get_model_params, format_response, process_async_streaming_response, track_usage_async
1311

12+
1413
class AsyncOpenAI:
1514
"""
1615
An async wrapper around the OpenAI SDK that automatically sends LLM usage events to PostHog.
@@ -60,7 +59,7 @@ async def create(
6059
"""
6160
Wraps openai chat completions (async) and captures a $ai_generation event in PostHog.
6261
63-
To use streaming in async mode:
62+
To use streaming in async mode:
6463
async for chunk in async_openai.chat.completions.create(stream=True, ...):
6564
...
6665
"""
@@ -81,11 +80,6 @@ async def call_async_method(**call_kwargs):
8180
return await self._openai_client.chat.completions.create(**call_kwargs)
8281

8382
response = await track_usage_async(
84-
distinct_id,
85-
self._ph_client,
86-
posthog_trace_id,
87-
posthog_properties,
88-
call_async_method,
89-
**kwargs
83+
distinct_id, self._ph_client, posthog_trace_id, posthog_properties, call_async_method, **kwargs
9084
)
91-
return response
85+
return response

posthog/ai/utils.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import time
33
from posthog.client import Client as PostHogClient
44

5+
56
def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
67
"""
78
Extracts model parameters from the kwargs dictionary.
@@ -56,7 +57,6 @@ def process_sync_streaming_response(
5657
accumulated_content.append(chunk.choices[0].delta.content)
5758
yield chunk
5859
finally:
59-
# Once we've finished, capture the final content in PostHog
6060
final_content = "".join(accumulated_content)
6161
event_properties["$ai_output"] = {
6262
"choices": [
@@ -107,17 +107,18 @@ async def process_async_streaming_response(
107107
properties=event_properties,
108108
)
109109

110+
110111
def track_usage(
111112
distinct_id: str,
112113
ph_client: PostHogClient,
113114
posthog_trace_id: Optional[str],
114115
posthog_properties: Optional[Dict[str, Any]],
115-
call_method: Callable[..., Any], # This will be the function that actually calls OpenAI
116+
call_method: Callable[..., Any],
116117
**kwargs: Any,
117118
) -> Any:
118119
"""
119120
Common usage-tracking logic for both sync and async calls.
120-
call_method: a function or coroutine that actually implements the call to openai
121+
call_method: the llm call method (e.g. openai.chat.completions.create)
121122
"""
122123
start_time = time.time()
123124
response = None
@@ -134,11 +135,9 @@ def track_usage(
134135
end_time = time.time()
135136
latency = end_time - start_time
136137

137-
# Extract usage if available
138138
if response and hasattr(response, "usage"):
139139
usage = response.usage.model_dump()
140140

141-
# Prepare analytics data
142141
input_tokens = usage.get("prompt_tokens", 0)
143142
output_tokens = usage.get("completion_tokens", 0)
144143
event_properties = {
@@ -155,20 +154,20 @@ def track_usage(
155154
"$ai_posthog_properties": posthog_properties,
156155
}
157156

158-
# Capture usage in PostHog
157+
# send the event to posthog
159158
if hasattr(ph_client, "capture") and callable(ph_client.capture):
160159
ph_client.capture(
161160
distinct_id=distinct_id,
162161
event="$ai_generation",
163162
properties=event_properties,
164163
)
165164

166-
# Re-raise the error if it occurred
167165
if error:
168166
raise error
169167

170168
return response
171169

170+
172171
async def track_usage_async(
173172
distinct_id: str,
174173
ph_client: PostHogClient,
@@ -192,11 +191,9 @@ async def track_usage_async(
192191
end_time = time.time()
193192
latency = end_time - start_time
194193

195-
# Extract usage if available
196194
if response and hasattr(response, "usage"):
197195
usage = response.usage.model_dump()
198196

199-
# Prepare analytics data
200197
input_tokens = usage.get("prompt_tokens", 0)
201198
output_tokens = usage.get("completion_tokens", 0)
202199
event_properties = {
@@ -213,7 +210,7 @@ async def track_usage_async(
213210
"$ai_posthog_properties": posthog_properties,
214211
}
215212

216-
# Capture usage in PostHog
213+
# send the event to posthog
217214
if hasattr(ph_client, "capture") and callable(ph_client.capture):
218215
ph_client.capture(
219216
distinct_id=distinct_id,
@@ -224,4 +221,4 @@ async def track_usage_async(
224221
if error:
225222
raise error
226223

227-
return response
224+
return response

0 commit comments

Comments
 (0)