diff --git a/src/llm/providers/claude_openrouter_client.py b/src/llm/providers/claude_openrouter_client.py index 4acd114e..fd441252 100644 --- a/src/llm/providers/claude_openrouter_client.py +++ b/src/llm/providers/claude_openrouter_client.py @@ -191,6 +191,8 @@ async def _create_message( or "exceeds the maximum length" in error_str or "exceeds the maximum allowed length" in error_str or "Input tokens exceed the configured limit" in error_str + or "Requested token count exceeds the model's maximum context length" in error_str + or "BadRequestError" in error_str and "context length" in error_str ): logger.debug(f"OpenRouter LLM Context limit exceeded: {error_str}") raise ContextLimitError(f"Context limit exceeded: {error_str}") diff --git a/src/llm/providers/mirothinker_sglang_client.py b/src/llm/providers/mirothinker_sglang_client.py index f02309a7..6008f8ca 100644 --- a/src/llm/providers/mirothinker_sglang_client.py +++ b/src/llm/providers/mirothinker_sglang_client.py @@ -159,6 +159,8 @@ async def _create_message( or "exceeds the maximum length" in error_str or "exceeds the maximum allowed length" in error_str or "Input tokens exceed the configured limit" in error_str + or "Requested token count exceeds the model's maximum context length" in error_str + or "BadRequestError" in error_str and "context length" in error_str ): logger.debug(f"MiroThinker LLM Context limit exceeded: {error_str}") raise ContextLimitError(f"Context limit exceeded: {error_str}")