@@ -1037,22 +1037,22 @@ def test_responses_api_streaming_with_tokens(mock_client):
10371037 """Test that Responses API streaming properly captures token usage from response.usage."""
10381038 from openai .types .responses import ResponseUsage
10391039 from unittest .mock import MagicMock
1040-
1040+
10411041 # Create mock response chunks with usage data in the correct location
10421042 chunks = []
1043-
1043+
10441044 # First chunk - just content, no usage
10451045 chunk1 = MagicMock ()
10461046 chunk1 .type = "response.text.delta"
10471047 chunk1 .text = "Test "
10481048 chunks .append (chunk1 )
1049-
1049+
10501050 # Second chunk - more content
10511051 chunk2 = MagicMock ()
10521052 chunk2 .type = "response.text.delta"
10531053 chunk2 .text = "response"
10541054 chunks .append (chunk2 )
1055-
1055+
10561056 # Final chunk - completed event with usage in response.usage
10571057 chunk3 = MagicMock ()
10581058 chunk3 .type = "response.completed"
@@ -1066,43 +1066,41 @@ def test_responses_api_streaming_with_tokens(mock_client):
10661066 )
10671067 chunk3 .response .output = ["Test response" ]
10681068 chunks .append (chunk3 )
1069-
1069+
10701070 captured_kwargs = {}
1071-
1071+
10721072 def mock_streaming_response (** kwargs ):
10731073 # Capture the kwargs to verify stream_options was NOT added
10741074 captured_kwargs .update (kwargs )
10751075 return iter (chunks )
1076-
1076+
10771077 with patch (
10781078 "openai.resources.responses.Responses.create" ,
10791079 side_effect = mock_streaming_response ,
10801080 ):
10811081 client = OpenAI (api_key = "test-key" , posthog_client = mock_client )
1082-
1082+
10831083 # Consume the streaming response
10841084 response = client .responses .create (
10851085 model = "gpt-4o-mini" ,
1086- input = [
1087- {"role" : "user" , "content" : "Test message" }
1088- ],
1086+ input = [{"role" : "user" , "content" : "Test message" }],
10891087 stream = True ,
10901088 posthog_distinct_id = "test-id" ,
10911089 posthog_properties = {"test" : "streaming" },
10921090 )
1093-
1091+
10941092 # Consume all chunks
10951093 list (response )
1096-
1094+
10971095 # Verify stream_options was NOT added (Responses API doesn't support it)
10981096 assert "stream_options" not in captured_kwargs
1099-
1097+
11001098 # Verify capture was called
11011099 assert mock_client .capture .call_count == 1
1102-
1100+
11031101 call_args = mock_client .capture .call_args [1 ]
11041102 props = call_args ["properties" ]
1105-
1103+
11061104 # Verify tokens are captured correctly from response.usage (not 0)
11071105 assert call_args ["distinct_id" ] == "test-id"
11081106 assert call_args ["event" ] == "$ai_generation"
0 commit comments