|
12 | 12 | ResponseContentPartAddedEvent, |
13 | 13 | ResponseContentPartDoneEvent, |
14 | 14 | ResponseCreatedEvent, |
| 15 | + ResponseError, |
| 16 | + ResponseErrorEvent, |
| 17 | + ResponseFailedEvent, |
15 | 18 | ResponseFunctionCallArgumentsDeltaEvent, |
16 | 19 | ResponseFunctionCallArgumentsDoneEvent, |
17 | 20 | ResponseFunctionToolCall, |
| 21 | + ResponseIncompleteEvent, |
18 | 22 | ResponseInProgressEvent, |
19 | 23 | ResponseOutputItemAddedEvent, |
20 | 24 | ResponseOutputItemDoneEvent, |
|
26 | 30 | ResponseTextDeltaEvent, |
27 | 31 | ResponseTextDoneEvent, |
28 | 32 | ) |
| 33 | +from openai.types.responses.response import IncompleteDetails |
29 | 34 | import pytest |
30 | 35 | from syrupy.assertion import SnapshotAssertion |
31 | 36 |
|
@@ -83,17 +88,40 @@ async def mock_generator(events, **kwargs): |
83 | 88 | response=response, |
84 | 89 | type="response.in_progress", |
85 | 90 | ) |
| 91 | + response.status = "completed" |
86 | 92 |
|
87 | 93 | for value in events: |
88 | 94 | if isinstance(value, ResponseOutputItemDoneEvent): |
89 | 95 | response.output.append(value.item) |
| 96 | + elif isinstance(value, IncompleteDetails): |
| 97 | + response.status = "incomplete" |
| 98 | + response.incomplete_details = value |
| 99 | + break |
| 100 | + if isinstance(value, ResponseError): |
| 101 | + response.status = "failed" |
| 102 | + response.error = value |
| 103 | + break |
| 104 | + |
90 | 105 | yield value |
91 | 106 |
|
92 | | - response.status = "completed" |
93 | | - yield ResponseCompletedEvent( |
94 | | - response=response, |
95 | | - type="response.completed", |
96 | | - ) |
| 107 | + if isinstance(value, ResponseErrorEvent): |
| 108 | + return |
| 109 | + |
| 110 | + if response.status == "incomplete": |
| 111 | + yield ResponseIncompleteEvent( |
| 112 | + response=response, |
| 113 | + type="response.incomplete", |
| 114 | + ) |
| 115 | + elif response.status == "failed": |
| 116 | + yield ResponseFailedEvent( |
| 117 | + response=response, |
| 118 | + type="response.failed", |
| 119 | + ) |
| 120 | + else: |
| 121 | + yield ResponseCompletedEvent( |
| 122 | + response=response, |
| 123 | + type="response.completed", |
| 124 | + ) |
97 | 125 |
|
98 | 126 | with patch( |
99 | 127 | "openai.resources.responses.AsyncResponses.create", |
@@ -175,6 +203,123 @@ async def test_error_handling( |
175 | 203 | assert result.response.speech["plain"]["speech"] == message, result.response.speech |
176 | 204 |
|
177 | 205 |
|
| 206 | +@pytest.mark.parametrize( |
| 207 | + ("reason", "message"), |
| 208 | + [ |
| 209 | + ( |
| 210 | + "max_output_tokens", |
| 211 | + "max output tokens reached", |
| 212 | + ), |
| 213 | + ( |
| 214 | + "content_filter", |
| 215 | + "content filter triggered", |
| 216 | + ), |
| 217 | + ( |
| 218 | + None, |
| 219 | + "unknown reason", |
| 220 | + ), |
| 221 | + ], |
| 222 | +) |
| 223 | +async def test_incomplete_response( |
| 224 | + hass: HomeAssistant, |
| 225 | + mock_config_entry_with_assist: MockConfigEntry, |
| 226 | + mock_init_component, |
| 227 | + mock_create_stream: AsyncMock, |
| 228 | + mock_chat_log: MockChatLog, # noqa: F811 |
| 229 | + reason: str, |
| 230 | + message: str, |
| 231 | +) -> None: |
| 232 | + """Test handling early model stop.""" |
| 233 | + # Incomplete details received after some content is generated |
| 234 | + mock_create_stream.return_value = [ |
| 235 | + ( |
| 236 | + # Start message |
| 237 | + *create_message_item( |
| 238 | + id="msg_A", |
| 239 | + text=["Once upon", " a time, ", "there was "], |
| 240 | + output_index=0, |
| 241 | + ), |
| 242 | + # Length limit or content filter |
| 243 | + IncompleteDetails(reason=reason), |
| 244 | + ) |
| 245 | + ] |
| 246 | + |
| 247 | + result = await conversation.async_converse( |
| 248 | + hass, |
| 249 | + "Please tell me a big story", |
| 250 | + "mock-conversation-id", |
| 251 | + Context(), |
| 252 | + agent_id="conversation.openai", |
| 253 | + ) |
| 254 | + |
| 255 | + assert result.response.response_type == intent.IntentResponseType.ERROR, result |
| 256 | + assert ( |
| 257 | + result.response.speech["plain"]["speech"] |
| 258 | + == f"OpenAI response incomplete: {message}" |
| 259 | + ), result.response.speech |
| 260 | + |
| 261 | + # Incomplete details received before any content is generated |
| 262 | + mock_create_stream.return_value = [ |
| 263 | + ( |
| 264 | + # Start generating response |
| 265 | + *create_reasoning_item(id="rs_A", output_index=0), |
| 266 | + # Length limit or content filter |
| 267 | + IncompleteDetails(reason=reason), |
| 268 | + ) |
| 269 | + ] |
| 270 | + |
| 271 | + result = await conversation.async_converse( |
| 272 | + hass, |
| 273 | + "please tell me a big story", |
| 274 | + "mock-conversation-id", |
| 275 | + Context(), |
| 276 | + agent_id="conversation.openai", |
| 277 | + ) |
| 278 | + |
| 279 | + assert result.response.response_type == intent.IntentResponseType.ERROR, result |
| 280 | + assert ( |
| 281 | + result.response.speech["plain"]["speech"] |
| 282 | + == f"OpenAI response incomplete: {message}" |
| 283 | + ), result.response.speech |
| 284 | + |
| 285 | + |
| 286 | +@pytest.mark.parametrize( |
| 287 | + ("error", "message"), |
| 288 | + [ |
| 289 | + ( |
| 290 | + ResponseError(code="rate_limit_exceeded", message="Rate limit exceeded"), |
| 291 | + "OpenAI response failed: Rate limit exceeded", |
| 292 | + ), |
| 293 | + ( |
| 294 | + ResponseErrorEvent(type="error", message="Some error"), |
| 295 | + "OpenAI response error: Some error", |
| 296 | + ), |
| 297 | + ], |
| 298 | +) |
| 299 | +async def test_failed_response( |
| 300 | + hass: HomeAssistant, |
| 301 | + mock_config_entry_with_assist: MockConfigEntry, |
| 302 | + mock_init_component, |
| 303 | + mock_create_stream: AsyncMock, |
| 304 | + mock_chat_log: MockChatLog, # noqa: F811 |
| 305 | + error: ResponseError | ResponseErrorEvent, |
| 306 | + message: str, |
| 307 | +) -> None: |
| 308 | + """Test handling failed and error responses.""" |
| 309 | + mock_create_stream.return_value = [(error,)] |
| 310 | + |
| 311 | + result = await conversation.async_converse( |
| 312 | + hass, |
| 313 | + "next natural number please", |
| 314 | + "mock-conversation-id", |
| 315 | + Context(), |
| 316 | + agent_id="conversation.openai", |
| 317 | + ) |
| 318 | + |
| 319 | + assert result.response.response_type == intent.IntentResponseType.ERROR, result |
| 320 | + assert result.response.speech["plain"]["speech"] == message, result.response.speech |
| 321 | + |
| 322 | + |
178 | 323 | async def test_conversation_agent( |
179 | 324 | hass: HomeAssistant, |
180 | 325 | mock_config_entry: MockConfigEntry, |
|
0 commit comments