Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pr_agent/algo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
'gpt-5.2-chat-latest': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-5.2-codex': 400000, # 400K, but may be limited by config.max_model_tokens
'gpt-5.3-codex': 400000, # 400K, but may be limited by config.max_model_tokens
'gpt-5.4': 272000, # 272K safe default without opt-in 1M context parameters
'gpt-5.4-2026-03-05': 272000, # 272K safe default without opt-in 1M context parameters
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
Expand Down
13 changes: 13 additions & 0 deletions tests/unittest/test_get_max_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,19 @@ def test_model_max_tokens(self, monkeypatch):

assert get_max_tokens(model) == expected

@pytest.mark.parametrize("model", ["gpt-5.4", "gpt-5.4-2026-03-05"])
def test_gpt54_model_max_tokens(self, monkeypatch, model):
fake_settings = type('', (), {
'config': type('', (), {
'custom_model_max_tokens': 0,
'max_model_tokens': 0
})()
})()

monkeypatch.setattr(utils, "get_settings", lambda: fake_settings)

assert get_max_tokens(model) == 272000

# Test situations where the model is not registered and exists as a custom model
def test_model_has_custom(self, monkeypatch):
fake_settings = type('', (), {
Expand Down
10 changes: 9 additions & 1 deletion tests/unittest/test_litellm_reasoning_effort.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,15 @@ async def test_gpt5_model_detection_various_versions(self, monkeypatch, mock_log
fake_settings = create_mock_settings("medium")
monkeypatch.setattr(litellm_handler, "get_settings", lambda: fake_settings)

gpt5_models = ["gpt-5-2025-08-07", "gpt-5.1", "gpt-5-turbo", "gpt-5.1-codex", "gpt-5.3-codex"]
gpt5_models = [
"gpt-5-2025-08-07",
"gpt-5.1",
"gpt-5.4",
"gpt-5.4-2026-03-05",
"gpt-5-turbo",
"gpt-5.1-codex",
"gpt-5.3-codex",
]

for model in gpt5_models:
with patch('pr_agent.algo.ai_handlers.litellm_ai_handler.acompletion', new_callable=AsyncMock) as mock_completion:
Expand Down