Skip to content

Commit 64a775d

Browse files
Bentlybromajdyzclaude
authored
feat(backend/blocks): Add GPT-5.1 and GPT-5.1-codex (#11406)
This pr adds the latest gpt-5.1 and gpt-5.1-codex llm's from openai, as well as update the price of the gpt-5-chat model https://platform.openai.com/docs/models/gpt-5.1 https://platform.openai.com/docs/models/gpt-5.1-codex I have also had to add a new codex block as it uses a different openai API and has other options the main llm's dont use <img width="231" height="755" alt="image" src="https://github.com/user-attachments/assets/a4056633-7b0f-446f-ae86-d7755c5b88ec" /> #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: <!-- Put your test plan here: --> - [x] Test the latest gpt-5.1 llm - [x] Test the latest gpt-5.1-codex block --------- Co-authored-by: Zamil Majdy <[email protected]> Co-authored-by: Claude <[email protected]>
1 parent 5d97706 commit 64a775d

File tree

3 files changed

+243
-1
lines changed

3 files changed

+243
-1
lines changed
Lines changed: 224 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
from dataclasses import dataclass
2+
from enum import Enum
3+
from typing import Any, Literal
4+
5+
from openai import AsyncOpenAI
6+
from openai.types.responses import Response as OpenAIResponse
7+
from pydantic import SecretStr
8+
9+
from backend.data.block import (
10+
Block,
11+
BlockCategory,
12+
BlockOutput,
13+
BlockSchemaInput,
14+
BlockSchemaOutput,
15+
)
16+
from backend.data.model import (
17+
APIKeyCredentials,
18+
CredentialsField,
19+
CredentialsMetaInput,
20+
NodeExecutionStats,
21+
SchemaField,
22+
)
23+
from backend.integrations.providers import ProviderName
24+
25+
26+
@dataclass
27+
class CodexCallResult:
28+
"""Structured response returned by Codex invocations."""
29+
30+
response: str
31+
reasoning: str
32+
response_id: str
33+
34+
35+
class CodexModel(str, Enum):
36+
"""Codex-capable OpenAI models."""
37+
38+
GPT5_1_CODEX = "gpt-5.1-codex"
39+
40+
41+
class CodexReasoningEffort(str, Enum):
42+
"""Configuration for the Responses API reasoning effort."""
43+
44+
NONE = "none"
45+
LOW = "low"
46+
MEDIUM = "medium"
47+
HIGH = "high"
48+
49+
50+
CodexCredentials = CredentialsMetaInput[
51+
Literal[ProviderName.OPENAI], Literal["api_key"]
52+
]
53+
54+
TEST_CREDENTIALS = APIKeyCredentials(
55+
id="e2fcb203-3f2d-4ad4-a344-8df3bc7db36b",
56+
provider="openai",
57+
api_key=SecretStr("mock-openai-api-key"),
58+
title="Mock OpenAI API key",
59+
expires_at=None,
60+
)
61+
TEST_CREDENTIALS_INPUT = {
62+
"provider": TEST_CREDENTIALS.provider,
63+
"id": TEST_CREDENTIALS.id,
64+
"type": TEST_CREDENTIALS.type,
65+
"title": TEST_CREDENTIALS.title,
66+
}
67+
68+
69+
def CodexCredentialsField() -> CodexCredentials:
70+
return CredentialsField(
71+
description="OpenAI API key with access to Codex models (Responses API).",
72+
)
73+
74+
75+
class CodeGenerationBlock(Block):
76+
"""Block that talks to Codex models via the OpenAI Responses API."""
77+
78+
class Input(BlockSchemaInput):
79+
prompt: str = SchemaField(
80+
description="Primary coding request passed to the Codex model.",
81+
placeholder="Generate a Python function that reverses a list.",
82+
)
83+
system_prompt: str = SchemaField(
84+
title="System Prompt",
85+
default=(
86+
"You are Codex, an elite software engineer. "
87+
"Favor concise, working code and highlight important caveats."
88+
),
89+
description="Optional instructions injected via the Responses API instructions field.",
90+
advanced=True,
91+
)
92+
model: CodexModel = SchemaField(
93+
title="Codex Model",
94+
default=CodexModel.GPT5_1_CODEX,
95+
description="Codex-optimized model served via the Responses API.",
96+
advanced=False,
97+
)
98+
reasoning_effort: CodexReasoningEffort = SchemaField(
99+
title="Reasoning Effort",
100+
default=CodexReasoningEffort.MEDIUM,
101+
description="Controls the Responses API reasoning budget. Select 'none' to skip reasoning configs.",
102+
advanced=True,
103+
)
104+
max_output_tokens: int | None = SchemaField(
105+
title="Max Output Tokens",
106+
default=2048,
107+
description="Upper bound for generated tokens (hard limit 128,000). Leave blank to let OpenAI decide.",
108+
advanced=True,
109+
)
110+
credentials: CodexCredentials = CodexCredentialsField()
111+
112+
class Output(BlockSchemaOutput):
113+
response: str = SchemaField(
114+
description="Code-focused response returned by the Codex model."
115+
)
116+
reasoning: str = SchemaField(
117+
description="Reasoning summary returned by the model, if available.",
118+
default="",
119+
)
120+
response_id: str = SchemaField(
121+
description="ID of the Responses API call for auditing/debugging.",
122+
default="",
123+
)
124+
125+
def __init__(self):
126+
super().__init__(
127+
id="86a2a099-30df-47b4-b7e4-34ae5f83e0d5",
128+
description="Generate or refactor code using OpenAI's Codex (Responses API).",
129+
categories={BlockCategory.AI, BlockCategory.DEVELOPER_TOOLS},
130+
input_schema=CodeGenerationBlock.Input,
131+
output_schema=CodeGenerationBlock.Output,
132+
test_input=[
133+
{
134+
"prompt": "Write a TypeScript function that deduplicates an array.",
135+
"credentials": TEST_CREDENTIALS_INPUT,
136+
}
137+
],
138+
test_output=[
139+
("response", str),
140+
("reasoning", str),
141+
("response_id", str),
142+
],
143+
test_mock={
144+
"call_codex": lambda *_args, **_kwargs: CodexCallResult(
145+
response="function dedupe<T>(items: T[]): T[] { return [...new Set(items)]; }",
146+
reasoning="Used Set to remove duplicates in O(n).",
147+
response_id="resp_test",
148+
)
149+
},
150+
test_credentials=TEST_CREDENTIALS,
151+
)
152+
self.execution_stats = NodeExecutionStats()
153+
154+
async def call_codex(
155+
self,
156+
*,
157+
credentials: APIKeyCredentials,
158+
model: CodexModel,
159+
prompt: str,
160+
system_prompt: str,
161+
max_output_tokens: int | None,
162+
reasoning_effort: CodexReasoningEffort,
163+
) -> CodexCallResult:
164+
"""Invoke the OpenAI Responses API."""
165+
client = AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
166+
167+
request_payload: dict[str, Any] = {
168+
"model": model.value,
169+
"input": prompt,
170+
}
171+
if system_prompt:
172+
request_payload["instructions"] = system_prompt
173+
if max_output_tokens is not None:
174+
request_payload["max_output_tokens"] = max_output_tokens
175+
if reasoning_effort != CodexReasoningEffort.NONE:
176+
request_payload["reasoning"] = {"effort": reasoning_effort.value}
177+
178+
response = await client.responses.create(**request_payload)
179+
if not isinstance(response, OpenAIResponse):
180+
raise TypeError(f"Expected OpenAIResponse, got {type(response).__name__}")
181+
182+
# Extract data directly from typed response
183+
text_output = response.output_text or ""
184+
reasoning_summary = (
185+
str(response.reasoning.summary)
186+
if response.reasoning and response.reasoning.summary
187+
else ""
188+
)
189+
response_id = response.id or ""
190+
191+
# Update usage stats
192+
self.execution_stats.input_token_count = (
193+
response.usage.input_tokens if response.usage else 0
194+
)
195+
self.execution_stats.output_token_count = (
196+
response.usage.output_tokens if response.usage else 0
197+
)
198+
self.execution_stats.llm_call_count += 1
199+
200+
return CodexCallResult(
201+
response=text_output,
202+
reasoning=reasoning_summary,
203+
response_id=response_id,
204+
)
205+
206+
async def run(
207+
self,
208+
input_data: Input,
209+
*,
210+
credentials: APIKeyCredentials,
211+
**_kwargs,
212+
) -> BlockOutput:
213+
result = await self.call_codex(
214+
credentials=credentials,
215+
model=input_data.model,
216+
prompt=input_data.prompt,
217+
system_prompt=input_data.system_prompt,
218+
max_output_tokens=input_data.max_output_tokens,
219+
reasoning_effort=input_data.reasoning_effort,
220+
)
221+
222+
yield "response", result.response
223+
yield "reasoning", result.reasoning
224+
yield "response_id", result.response_id

autogpt_platform/backend/backend/blocks/llm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
9393
O1_MINI = "o1-mini"
9494
# GPT-5 models
9595
GPT5 = "gpt-5-2025-08-07"
96+
GPT5_1 = "gpt-5.1-2025-11-13"
9697
GPT5_MINI = "gpt-5-mini-2025-08-07"
9798
GPT5_NANO = "gpt-5-nano-2025-08-07"
9899
GPT5_CHAT = "gpt-5-chat-latest"
@@ -194,6 +195,7 @@ def max_output_tokens(self) -> int | None:
194195
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
195196
# GPT-5 models
196197
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
198+
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
197199
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
198200
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
199201
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),

autogpt_platform/backend/backend/data/block_cost_config.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from backend.blocks.apollo.organization import SearchOrganizationsBlock
1212
from backend.blocks.apollo.people import SearchPeopleBlock
1313
from backend.blocks.apollo.person import GetPersonDetailBlock
14+
from backend.blocks.codex import CodeGenerationBlock, CodexModel
1415
from backend.blocks.enrichlayer.linkedin import (
1516
GetLinkedinProfileBlock,
1617
GetLinkedinProfilePictureBlock,
@@ -63,9 +64,10 @@
6364
LlmModel.O1_MINI: 4,
6465
# GPT-5 models
6566
LlmModel.GPT5: 2,
67+
LlmModel.GPT5_1: 5,
6668
LlmModel.GPT5_MINI: 1,
6769
LlmModel.GPT5_NANO: 1,
68-
LlmModel.GPT5_CHAT: 2,
70+
LlmModel.GPT5_CHAT: 5,
6971
LlmModel.GPT41: 2,
7072
LlmModel.GPT41_MINI: 1,
7173
LlmModel.GPT4O_MINI: 1,
@@ -265,6 +267,20 @@
265267
AIStructuredResponseGeneratorBlock: LLM_COST,
266268
AITextSummarizerBlock: LLM_COST,
267269
AIListGeneratorBlock: LLM_COST,
270+
CodeGenerationBlock: [
271+
BlockCost(
272+
cost_type=BlockCostType.RUN,
273+
cost_filter={
274+
"model": CodexModel.GPT5_1_CODEX,
275+
"credentials": {
276+
"id": openai_credentials.id,
277+
"provider": openai_credentials.provider,
278+
"type": openai_credentials.type,
279+
},
280+
},
281+
cost_amount=5,
282+
)
283+
],
268284
CreateTalkingAvatarVideoBlock: [
269285
BlockCost(
270286
cost_amount=15,

0 commit comments

Comments
 (0)