Skip to content

Commit 37f17ad

Browse files
committed
Add Support for the GPT 5.3 Codex and the Opus 4.6
1 parent 772ab87 commit 37f17ad

File tree

14 files changed

+301
-55
lines changed

14 files changed

+301
-55
lines changed

CHANGELOG.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,19 @@ All notable changes to SuperQode will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## [0.1.11] - 2026-02-07
9+
10+
### Fixed
11+
12+
- OpenAI BYOK routing for newer Codex models (`gpt-5.3-codex`) with provider-qualified model handling.
13+
- OpenAI BYOK fallback behavior when account/model rollout differs (retry path to compatible Codex model IDs).
14+
- BYOK streaming empty-response fallback to non-streaming completion to avoid silent failures.
15+
16+
### Changed
17+
18+
- Updated BYOK + ACP model catalogs to include `gpt-5.3-codex` and `claude-opus-4-6` and highlight them as latest/new in picker logic.
19+
- Refreshed default model recommendations and aliases for OpenAI/Anthropic.
20+
821
## [0.1.9] - 2026-01-31
922

1023
### Added

docs/providers/byok.md

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ export ANTHROPIC_API_KEY=sk-ant-...
2525
superqode providers test anthropic
2626

2727
# 3. Connect
28-
superqode connect byok anthropic claude-sonnet-4
28+
superqode connect byok anthropic claude-opus-4-6
2929
```
3030

3131
---
@@ -45,7 +45,7 @@ superqode connect byok anthropic claude-sonnet-4
4545

4646
| Model | Best For |
4747
|-------|----------|
48-
| `claude-opus-4-5` | Complex analysis |
48+
| `claude-opus-4-6` | Complex analysis (latest/new) |
4949
| `claude-sonnet-4-5` | Balanced performance |
5050
| `claude-sonnet-4` | General use |
5151
| `claude-haiku-4-5` | Fast, cost-effective |
@@ -56,11 +56,12 @@ superqode connect byok anthropic claude-sonnet-4
5656

5757
```bash
5858
export OPENAI_API_KEY=sk-...
59-
superqode connect byok openai gpt-4o
59+
superqode connect byok openai gpt-5.3-codex
6060
```
6161

6262
| Model | Best For |
6363
|-------|----------|
64+
| `gpt-5.3-codex` | Coding agent workloads (latest/new) |
6465
| `gpt-4o` | General use |
6566
| `gpt-4o-mini` | Cost-effective |
6667
| `o1` | Complex reasoning |
@@ -184,13 +185,15 @@ providers:
184185
api_key_env: ANTHROPIC_API_KEY
185186
recommended_models:
186187
- claude-opus-4-5
188+
- claude-opus-4-6
187189
- claude-sonnet-4-5
188190
- claude-sonnet-4
189191
- claude-haiku-4-5
190192

191193
openai:
192194
api_key_env: OPENAI_API_KEY
193195
recommended_models:
196+
- gpt-5.3-codex
194197
- gpt-4o
195198
- gpt-4o-mini
196199
- o1
@@ -213,12 +216,12 @@ team:
213216
security_tester:
214217
mode: byok
215218
provider: anthropic
216-
model: claude-sonnet-4
219+
model: claude-opus-4-6
217220

218221
performance_tester:
219222
mode: byok
220223
provider: openai
221-
model: gpt-4o
224+
model: gpt-5.3-codex
222225
```
223226
224227
---

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "superqode"
3-
version = "0.1.10"
3+
version = "0.1.11"
44
description = "SuperQode: Super Quality Engineering for Agentic Coding Teams"
55
readme = "README.md"
66
requires-python = ">=3.12"

src/superqode/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,4 @@
3030
"sidebar",
3131
]
3232

33-
__version__ = "0.1.10"
33+
__version__ = "0.1.11"

src/superqode/acp_discovery.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -598,6 +598,13 @@ async def _get_models(self, agent: DiscoveredAgent) -> List[AgentModel]:
598598
],
599599
# Claude Code - Anthropic's models
600600
"claude-code": [
601+
AgentModel(
602+
id="claude-opus-4-6",
603+
name="Claude Opus 4.6",
604+
provider="anthropic",
605+
description="Latest Claude Opus model",
606+
context_window=1000000,
607+
),
601608
AgentModel(
602609
id="claude-sonnet-4-20250514",
603610
name="Claude Sonnet 4",
@@ -619,6 +626,13 @@ async def _get_models(self, agent: DiscoveredAgent) -> List[AgentModel]:
619626
],
620627
# Codex - OpenAI's models
621628
"codex": [
629+
AgentModel(
630+
id="gpt-5.3-codex",
631+
name="GPT-5.3 Codex",
632+
provider="openai",
633+
description="Latest Codex model for coding",
634+
context_window=256000,
635+
),
622636
AgentModel(
623637
id="o3", name="O3", provider="openai", description="Latest reasoning model"
624638
),

src/superqode/agent/loop.py

Lines changed: 58 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -785,15 +785,64 @@ async def run_streaming(
785785
# Flush thinking buffer before handling error
786786
if thinking_buffer.strip() and self.on_thinking:
787787
await self.on_thinking(thinking_buffer.strip())
788-
789-
error_msg = str(e)
790-
error_type = type(e).__name__
791-
# Yield error message so it's displayed
792-
yield f"\n\n[Error: {error_type}] {error_msg}"
793-
# Don't return immediately - let the error be displayed
794-
# But mark that we had an error so we don't continue the loop
795-
full_content = f"[Error: {error_type}] {error_msg}"
796-
return
788+
# Streaming can fail for some provider/model combinations even when
789+
# a non-streaming request would succeed. Retry once before failing.
790+
try:
791+
if self.on_thinking:
792+
await self.on_thinking(
793+
"Streaming failed; retrying once with non-streaming completion..."
794+
)
795+
fallback = await self.gateway.chat_completion(
796+
messages=gateway_messages,
797+
model=self.config.model,
798+
provider=self.config.provider,
799+
tools=tools_to_send,
800+
temperature=self.config.temperature,
801+
max_tokens=self.config.max_tokens,
802+
)
803+
if fallback.tool_calls:
804+
tool_calls.extend(fallback.tool_calls)
805+
if fallback.content and fallback.content.strip():
806+
full_content = fallback.content
807+
yield fallback.content
808+
had_content = True
809+
elif not fallback.tool_calls:
810+
raise RuntimeError("Fallback completion returned no content")
811+
except Exception:
812+
error_msg = str(e)
813+
error_type = type(e).__name__
814+
# Yield original streaming error for maximum transparency
815+
yield f"\n\n[Error: {error_type}] {error_msg}"
816+
full_content = f"[Error: {error_type}] {error_msg}"
817+
return
818+
819+
# Some providers may return an empty stream even when the request succeeded.
820+
# Retry once with non-streaming completion so users still get a response.
821+
if not full_content.strip() and not tool_calls:
822+
if self.on_thinking:
823+
await self.on_thinking(
824+
"No streamed text received; retrying once with non-streaming completion..."
825+
)
826+
try:
827+
fallback = await self.gateway.chat_completion(
828+
messages=gateway_messages,
829+
model=self.config.model,
830+
provider=self.config.provider,
831+
tools=tools_to_send,
832+
temperature=self.config.temperature,
833+
max_tokens=self.config.max_tokens,
834+
)
835+
if fallback.tool_calls:
836+
tool_calls.extend(fallback.tool_calls)
837+
if fallback.content and fallback.content.strip():
838+
full_content = fallback.content
839+
yield fallback.content
840+
had_content = True
841+
except Exception as e:
842+
error_msg = str(e)
843+
error_type = type(e).__name__
844+
yield f"\n\n[Error: {error_type}] {error_msg}"
845+
return
797846

798847
# Handle tool calls
799848
if tool_calls:

src/superqode/app_main.py

Lines changed: 50 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -715,6 +715,13 @@ def claude_models(self) -> List[Dict]:
715715
def _get_claude_models(self) -> List[Dict]:
716716
"""Get Claude models list - synced with providers/models.py."""
717717
return [
718+
{
719+
"id": "claude-opus-4-6",
720+
"name": "Claude Opus 4.6 (Latest/New)",
721+
"context": 1000000,
722+
"desc": "Latest Claude Opus with 1M context",
723+
"recommended": True,
724+
},
718725
{
719726
"id": "claude-opus-4-5-20251101",
720727
"name": "Claude Opus 4.5",
@@ -753,6 +760,7 @@ def codex_models(self) -> List[Dict]:
753760
def _get_codex_models(self) -> List[Dict]:
754761
"""Get Codex/OpenAI models list - updated from models.dev."""
755762
return [
763+
{"id": "gpt-5.3-codex", "name": "GPT-5.3 Codex (Latest/New)", "context": 256000},
756764
{"id": "gpt-5.2", "name": "GPT-5.2 (Latest)", "context": 256000},
757765
{"id": "gpt-5.2-pro", "name": "GPT-5.2 Pro", "context": 256000},
758766
{"id": "gpt-5.2-chat-latest", "name": "GPT-5.2 Chat", "context": 256000},
@@ -777,8 +785,14 @@ def openhands_models(self) -> List[Dict]:
777785
def _get_openhands_models(self) -> List[Dict]:
778786
"""Get OpenHands models list - updated from models.dev."""
779787
return [
788+
{"id": "gpt-5.3-codex", "name": "GPT-5.3 Codex (Latest/New)", "context": 256000},
780789
{"id": "gpt-5.2", "name": "GPT-5.2 (Latest)", "context": 256000},
781790
{"id": "gpt-5.2-pro", "name": "GPT-5.2 Pro", "context": 256000},
791+
{
792+
"id": "claude-opus-4-6",
793+
"name": "Claude Opus 4.6 (Latest/New)",
794+
"context": 1000000,
795+
},
782796
{
783797
"id": "claude-opus-4-5-20251101",
784798
"name": "Claude Opus 4.5 (Latest)",
@@ -904,6 +918,13 @@ def _get_openhands_models(self) -> List[Dict]:
904918
# Claude Code models - https://docs.anthropic.com/en/docs/about-claude/models (updated from models.dev)
905919
# Uses claude-code-acp adapter from Zed Industries
906920
self._claude_models = [
921+
{
922+
"id": "claude/claude-opus-4-6",
923+
"name": "Claude Opus 4.6 (Latest/New)",
924+
"free": False,
925+
"recommended": True,
926+
"desc": "Latest Claude Opus model - 1M context",
927+
},
907928
{
908929
"id": "claude/claude-opus-4-5-20251101",
909930
"name": "Claude Opus 4.5",
@@ -941,6 +962,13 @@ def _get_openhands_models(self) -> List[Dict]:
941962
# Codex CLI / OpenAI models - https://platform.openai.com/docs/models (updated from models.dev)
942963
# Uses codex-acp adapter from Zed Industries
943964
self._codex_models = [
965+
{
966+
"id": "codex/gpt-5.3-codex",
967+
"name": "GPT-5.3 Codex (Latest/New)",
968+
"free": False,
969+
"recommended": True,
970+
"desc": "Latest GPT Codex model for coding workflows",
971+
},
944972
{
945973
"id": "codex/gpt-5.2",
946974
"name": "GPT-5.2 (Latest)",
@@ -12490,23 +12518,28 @@ def is_latest_model(model_id: str, info) -> bool:
1249012518
# Generic patterns that indicate latest models
1249112519
latest_indicators = [
1249212520
"latest",
12521+
"new",
1249312522
"preview",
1249412523
"newest",
1249512524
"current",
1249612525
# Version patterns (highest versions)
12526+
"5.3",
1249712527
"5.2",
1249812528
"5.1",
12529+
"4.6",
1249912530
"4.7",
1250012531
"4.5",
1250112532
"3.2",
1250212533
"3.1",
1250312534
"3.0",
1250412535
# Specific latest model patterns by provider
12536+
"gpt-5.3-codex",
1250512537
"gpt-5.2",
1250612538
"gpt-5.1",
1250712539
"gemini-3",
1250812540
"gemini 3",
1250912541
"gemini3",
12542+
"claude-opus-4-6",
1251012543
"claude-opus-4-5",
1251112544
"claude-sonnet-4-5",
1251212545
"claude-haiku-4-5",
@@ -12537,12 +12570,10 @@ def is_latest_model(model_id: str, info) -> bool:
1253712570
):
1253812571
return True
1253912572

12540-
# Check release date - if released in 2025, likely latest
12541-
if info.released and info.released.startswith("2025"):
12542-
# Prioritize models from late 2025 (newer)
12543-
if "-12" in info.released or "-11" in info.released:
12544-
return True
12545-
# Also include other 2025 models
12573+
# Check release date - if released in 2025+ likely latest
12574+
if info.released and (
12575+
info.released.startswith("2025") or info.released.startswith("2026")
12576+
):
1254612577
return True
1254712578

1254812579
return False
@@ -12572,8 +12603,10 @@ def get_latest_priority(model_id: str, info) -> int:
1257212603
model_lower = model_id.lower()
1257312604
name_lower = info.name.lower()
1257412605

12575-
# Highest priority: Very latest models (2025-12, 2025-11 releases)
12606+
# Highest priority: Very latest models (2026+, then late 2025 releases)
1257612607
if info.released:
12608+
if info.released.startswith("2026"):
12609+
return -11
1257712610
if "-12" in info.released:
1257812611
return -10 # Highest priority
1257912612
elif "-11" in info.released:
@@ -12586,6 +12619,10 @@ def get_latest_priority(model_id: str, info) -> int:
1258612619
# High priority: Latest version indicators
1258712620
# Priority order: -10 (highest) to -6 (medium)
1258812621
latest_patterns = [
12622+
("gpt-5.3-codex", -11),
12623+
("5.3", -11),
12624+
("claude-opus-4-6", -11),
12625+
("4.6", -11),
1258912626
# Latest flagship models (2025-12 releases)
1259012627
("gpt-5.2", -10),
1259112628
("5.2", -10),
@@ -12963,13 +13000,15 @@ def is_latest_model(model_id: str, info) -> bool:
1296313000
name_lower = info.name.lower()
1296413001

1296513002
# Check release date
12966-
if info.released and info.released.startswith("2025"):
12967-
if "-12" in info.released or "-11" in info.released:
12968-
return True
13003+
if info.released and (
13004+
info.released.startswith("2025") or info.released.startswith("2026")
13005+
):
1296913006
return True
1297013007

1297113008
# Check latest patterns
1297213009
latest_patterns = [
13010+
"gpt-5.3-codex",
13011+
"5.3",
1297313012
"gpt-5.2",
1297413013
"5.2",
1297513014
"gemini-3",
@@ -12979,6 +13018,7 @@ def is_latest_model(model_id: str, info) -> bool:
1297913018
"glm-4-plus",
1298013019
"deepseek-v3.2",
1298113020
"grok-3",
13021+
"claude-opus-4-6",
1298213022
"claude-opus-4-5",
1298313023
"claude-sonnet-4-5",
1298413024
"claude-haiku-4-5",

src/superqode/config/loader.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -658,13 +658,17 @@ def create_default_config() -> Config:
658658
"anthropic": ProviderConfig(
659659
api_key_env="ANTHROPIC_API_KEY",
660660
description="Anthropic Claude models via API",
661-
recommended_models=["claude-sonnet-4-5", "claude-opus-4-1", "claude-haiku-4-5"],
661+
recommended_models=[
662+
"claude-opus-4-6",
663+
"claude-sonnet-4-5-20250929",
664+
"claude-haiku-4-5-20251001",
665+
],
662666
custom_models_allowed=True,
663667
),
664668
"openai": ProviderConfig(
665669
api_key_env="OPENAI_API_KEY",
666670
description="OpenAI GPT models via API",
667-
recommended_models=["gpt-4o", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"],
671+
recommended_models=["gpt-5.3-codex", "gpt-5.2", "gpt-5.2-pro", "gpt-4o"],
668672
custom_models_allowed=True,
669673
),
670674
"google": ProviderConfig(
@@ -797,13 +801,13 @@ def create_default_config() -> Config:
797801

798802
# Model aliases
799803
config.model_aliases = {
800-
"latest-sonnet": "claude-sonnet-4-5",
801-
"latest-gpt": "gpt-4o",
804+
"latest-sonnet": "claude-sonnet-4-5-20250929",
805+
"latest-gpt": "gpt-5.3-codex",
802806
"latest-gemini": "gemini-3-pro-preview",
803807
"latest-glm": "glm-4.7",
804-
"fast": "claude-haiku-4-5",
805-
"balanced": "claude-sonnet-4-5",
806-
"powerful": "claude-opus-4-1",
808+
"fast": "claude-haiku-4-5-20251001",
809+
"balanced": "claude-sonnet-4-5-20250929",
810+
"powerful": "claude-opus-4-6",
807811
"thinking": "DeepSeek-V3.2-Exp-Think",
808812
"vision": "glm-4.6v",
809813
"coding": "codellama:34b",

0 commit comments

Comments
 (0)