Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
## 4.7.0 - 2025-06-10

- feat: add support for parse endpoint in responses API (no longer beta)

## 4.6.2 - 2025-06-09

- fix: replace `import posthog` with direct method imports
Expand Down
36 changes: 36 additions & 0 deletions posthog/ai/openai/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,42 @@ def _capture_streaming_event(
groups=posthog_groups,
)

def parse(
self,
posthog_distinct_id: Optional[str] = None,
posthog_trace_id: Optional[str] = None,
posthog_properties: Optional[Dict[str, Any]] = None,
posthog_privacy_mode: bool = False,
posthog_groups: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
"""
Parse structured output using OpenAI's 'responses.parse' method, but also track usage in PostHog.

Args:
posthog_distinct_id: Optional ID to associate with the usage event.
posthog_trace_id: Optional trace UUID for linking events.
posthog_properties: Optional dictionary of extra properties to include in the event.
posthog_privacy_mode: Whether to anonymize the input and output.
posthog_groups: Optional dictionary of groups to associate with the event.
**kwargs: Any additional parameters for the OpenAI Responses Parse API.

Returns:
The response from OpenAI's responses.parse call.
"""
return call_llm_and_track_usage(
posthog_distinct_id,
self._client._ph_client,
"openai",
posthog_trace_id,
posthog_properties,
posthog_privacy_mode,
posthog_groups,
self._client.base_url,
self._original.parse,
**kwargs,
)


class WrappedChat:
"""Wrapper for OpenAI chat that tracks usage in PostHog."""
Expand Down
36 changes: 36 additions & 0 deletions posthog/ai/openai/openai_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,42 @@ async def _capture_streaming_event(
properties=event_properties,
groups=posthog_groups,
)

async def parse(
self,
posthog_distinct_id: Optional[str] = None,
posthog_trace_id: Optional[str] = None,
posthog_properties: Optional[Dict[str, Any]] = None,
posthog_privacy_mode: bool = False,
posthog_groups: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
"""
Parse structured output using OpenAI's 'responses.parse' method, but also track usage in PostHog.

Args:
posthog_distinct_id: Optional ID to associate with the usage event.
posthog_trace_id: Optional trace UUID for linking events.
posthog_properties: Optional dictionary of extra properties to include in the event.
posthog_privacy_mode: Whether to anonymize the input and output.
posthog_groups: Optional dictionary of groups to associate with the event.
**kwargs: Any additional parameters for the OpenAI Responses Parse API.

Returns:
The response from OpenAI's responses.parse call.
"""
return await call_llm_and_track_usage_async(
posthog_distinct_id,
self._client._ph_client,
"openai",
posthog_trace_id,
posthog_properties,
posthog_privacy_mode,
posthog_groups,
self._client.base_url,
self._original.parse,
**kwargs,
)


class WrappedChat:
Expand Down
114 changes: 114 additions & 0 deletions posthog/test/ai/openai/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@
ResponseOutputMessage,
ResponseOutputText,
ResponseUsage,
ParsedResponse,
)
from openai.types.responses.parsed_response import (
ParsedResponseOutputMessage,
ParsedResponseOutputText,
)

from posthog.ai.openai import OpenAI
Expand Down Expand Up @@ -115,6 +120,51 @@ def mock_openai_response_with_responses_api():
)


@pytest.fixture
def mock_parsed_response():
return ParsedResponse(
id="test",
model="gpt-4o-2024-08-06",
object="response",
created_at=1741476542,
status="completed",
error=None,
incomplete_details=None,
instructions=None,
max_output_tokens=None,
tools=[],
tool_choice="auto",
output=[
ParsedResponseOutputMessage(
id="msg_123",
type="message",
role="assistant",
status="completed",
content=[
ParsedResponseOutputText(
type="output_text",
text='{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}',
annotations=[],
parsed={"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]},
)
],
)
],
output_parsed={"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]},
parallel_tool_calls=True,
previous_response_id=None,
usage=ResponseUsage(
input_tokens=15,
output_tokens=20,
input_tokens_details={"prompt_tokens": 15, "cached_tokens": 0},
output_tokens_details={"reasoning_tokens": 5},
total_tokens=35,
),
user=None,
metadata={},
)


@pytest.fixture
def mock_embedding_response():
return CreateEmbeddingResponse(
Expand Down Expand Up @@ -646,3 +696,67 @@ def test_responses_api(mock_client, mock_openai_response_with_responses_api):
assert props["$ai_http_status"] == 200
assert props["foo"] == "bar"
assert isinstance(props["$ai_latency"], float)


def test_responses_parse(mock_client, mock_parsed_response):
with patch(
"openai.resources.responses.Responses.parse",
return_value=mock_parsed_response,
):
client = OpenAI(api_key="test-key", posthog_client=mock_client)
response = client.responses.parse(
model="gpt-4o-2024-08-06",
input=[
{"role": "system", "content": "Extract the event information."},
{
"role": "user",
"content": "Alice and Bob are going to a science fair on Friday.",
},
],
text={
"format": {
"type": "json_schema",
"json_schema": {
"name": "event",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"date": {"type": "string"},
"participants": {"type": "array", "items": {"type": "string"}},
},
"required": ["name", "date", "participants"],
},
},
}
},
posthog_distinct_id="test-id",
posthog_properties={"foo": "bar"},
)

assert response == mock_parsed_response
assert mock_client.capture.call_count == 1

call_args = mock_client.capture.call_args[1]
props = call_args["properties"]

assert call_args["distinct_id"] == "test-id"
assert call_args["event"] == "$ai_generation"
assert props["$ai_provider"] == "openai"
assert props["$ai_model"] == "gpt-4o-2024-08-06"
assert props["$ai_input"] == [
{"role": "system", "content": "Extract the event information."},
{
"role": "user",
"content": "Alice and Bob are going to a science fair on Friday.",
},
]
assert props["$ai_output_choices"] == [
{"role": "assistant", "content": '{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}'}
]
assert props["$ai_input_tokens"] == 15
assert props["$ai_output_tokens"] == 20
assert props["$ai_reasoning_tokens"] == 5
assert props["$ai_http_status"] == 200
assert props["foo"] == "bar"
assert isinstance(props["$ai_latency"], float)
2 changes: 1 addition & 1 deletion posthog/version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
VERSION = "4.6.2"
VERSION = "4.7.0"

if __name__ == "__main__":
print(VERSION, end="") # noqa: T201
Loading