Skip to content

Commit caa407f

Browse files
committed
Run black
1 parent 7f2034d commit caa407f

File tree

2 files changed

+6
-18
lines changed

2 files changed

+6
-18
lines changed

posthog/ai/providers/openai/openai.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77
try:
88
import openai
99
except ImportError:
10-
raise ModuleNotFoundError(
11-
"Please install the OpenAI SDK to use this feature: 'pip install openai'"
12-
)
10+
raise ModuleNotFoundError("Please install the OpenAI SDK to use this feature: 'pip install openai'")
1311

1412
from posthog.ai.utils import call_llm_and_track_usage, get_model_params
1513
from posthog.client import Client as PostHogClient
@@ -84,9 +82,7 @@ def _create_streaming(
8482
usage_stats: Dict[str, int] = {}
8583
accumulated_content = []
8684
stream_options = {"include_usage": True}
87-
response = self._client.chat.completions.create(
88-
**kwargs, stream_options=stream_options
89-
)
85+
response = self._client.chat.completions.create(**kwargs, stream_options=stream_options)
9086

9187
def generator():
9288
nonlocal usage_stats
@@ -147,9 +143,7 @@ def _capture_streaming_event(
147143
}
148144
]
149145
},
150-
"$ai_request_url": str(
151-
self._client.base_url.join("chat/completions")
152-
),
146+
"$ai_request_url": str(self._client.base_url.join("chat/completions")),
153147
"$ai_http_status": 200,
154148
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
155149
"$ai_output_tokens": usage_stats.get("completion_tokens", 0),

posthog/ai/providers/openai/openai_async.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77
try:
88
import openai
99
except ImportError:
10-
raise ModuleNotFoundError(
11-
"Please install the OpenAI SDK to use this feature: 'pip install openai'"
12-
)
10+
raise ModuleNotFoundError("Please install the OpenAI SDK to use this feature: 'pip install openai'")
1311

1412
from posthog.ai.utils import call_llm_and_track_usage_async, get_model_params
1513
from posthog.client import Client as PostHogClient
@@ -85,9 +83,7 @@ async def _create_streaming(
8583
usage_stats: Dict[str, int] = {}
8684
accumulated_content = []
8785
stream_options = {"include_usage": True}
88-
response = await self._client.chat.completions.create(
89-
**kwargs, stream_options=stream_options
90-
)
86+
response = await self._client.chat.completions.create(**kwargs, stream_options=stream_options)
9187

9288
async def async_generator():
9389
nonlocal usage_stats, accumulated_content
@@ -153,9 +149,7 @@ def _capture_streaming_event(
153149
"$ai_latency": latency,
154150
"$ai_trace_id": posthog_trace_id,
155151
"$ai_posthog_properties": posthog_properties,
156-
"$ai_request_url": str(
157-
self._client.base_url.join("chat/completions")
158-
),
152+
"$ai_request_url": str(self._client.base_url.join("chat/completions")),
159153
}
160154

161155
if hasattr(self._client._ph_client, "capture"):

0 commit comments

Comments
 (0)