Skip to content

Commit 6ac3e7b

Browse files
fix: handle updated OpenAI content filter error behavior #175
- Handle `openai.BadRequestError` with `content_filter` code by raising `DIALException` to propagate the error to the client - Re-raise `ContentFilterFinishReasonError` as `DIALException` instead of silently appending a message to the response - Add GPT-4.1 high content filter model variant - Fix watcher loop not exiting after cancelling a disconnected request
1 parent c68eccf commit 6ac3e7b

File tree

3 files changed

+16
-6
lines changed

3 files changed

+16
-6
lines changed

statgpt/app/application/channel_completion.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -135,14 +135,20 @@ async def _channel_completion(
135135
)
136136
state = ChainParameters.get_state(chains_response)
137137
state[StateVarsConfig.ERROR] = None
138-
except openai.ContentFilterFinishReasonError as e:
139-
_log.exception(e)
140-
choice.append_content(
141-
"The query was blocked by the LLM provider content filter for violating safety guidelines."
142-
)
138+
except openai.BadRequestError as e:
139+
_log.exception("openai.BadRequestError")
140+
if isinstance(error := e.body, dict) and error.get("code") == "content_filter":
141+
raise DIALException(status_code=400, **error) from None
142+
143+
choice.append_content("An error occurred while processing your request.")
143144
state[StateVarsConfig.ERROR] = str(e)
145+
except openai.ContentFilterFinishReasonError as e:
146+
_log.exception("openai.ContentFilterFinishReasonError")
147+
raise DIALException(
148+
message=str(e), status_code=400, param="prompt", code="content_filter"
149+
) from None
144150
except Exception as e:
145-
_log.exception(e)
151+
_log.exception("Exception")
146152
choice.append_content("An error occurred while processing your request.")
147153
state[StateVarsConfig.ERROR] = str(e)
148154

statgpt/common/config/llm_models.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ class LLMModelsEnum(StrEnum):
5050
GPT_4_1_MINI_2025_04_14 = "gpt-4.1-mini-2025-04-14"
5151
GPT_4_1_NANO_2025_04_14 = "gpt-4.1-nano-2025-04-14"
5252

53+
GPT_4_1_2025_04_14_HF = "gpt-4.1-2025-04-14-hf"
54+
"""GPT-4.1 models with high content filters."""
55+
5356
# GPT-5 models
5457
GPT_5_MINI_2025_08_07 = "gpt-5-mini-2025-08-07"
5558
GPT_5_1_2025_11_13 = "gpt-5.1-2025-11-13"

statgpt/common/utils/cancel_dependency.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ async def _watcher(request: Request, handler_task: asyncio.Task, interval: float
4848
if await request.is_disconnected():
4949
_log.info(f"Cancelling handler task for {request.url}")
5050
handler_task.cancel()
51+
return
5152
else:
5253
_log.debug(f"Request {request.url} still connected")
5354
await asyncio.sleep(interval)

0 commit comments

Comments
 (0)