Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ You can configure `~/.metagpt/config2.yaml` according to the [example](https://g
```yaml
llm:
api_type: "openai" # or azure / ollama / groq etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo
model: "gpt-4-turbo" # or gpt-5 / gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
```
Expand Down
2 changes: 1 addition & 1 deletion config/config2.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ llm:
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_BASE_URL"
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo" # or gpt-3.5-turbo
model: "gpt-4-turbo" # or gpt-5 / gpt-3.5-turbo
proxy: "YOUR_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
Expand Down
6 changes: 3 additions & 3 deletions metagpt/ext/spo/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,17 +116,17 @@ def main():
# LLM Settings
st.subheader("LLM Settings")
opt_model = st.selectbox(
"Optimization Model", ["claude-3-5-sonnet-20240620", "gpt-4o", "gpt-4o-mini", "deepseek-chat"], index=0
"Optimization Model", ["claude-3-5-sonnet-20240620", "gpt-5", "gpt-4o", "gpt-4o-mini", "deepseek-chat"], index=0
)
opt_temp = st.slider("Optimization Temperature", 0.0, 1.0, 0.7)

eval_model = st.selectbox(
"Evaluation Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
"Evaluation Model", ["gpt-5", "gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
)
eval_temp = st.slider("Evaluation Temperature", 0.0, 1.0, 0.3)

exec_model = st.selectbox(
"Execution Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
"Execution Model", ["gpt-5", "gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
)
exec_temp = st.slider("Execution Temperature", 0.0, 1.0, 0.0)

Expand Down
3 changes: 3 additions & 0 deletions metagpt/provider/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@
"gpt-4o",
"gpt-4o-mini",
"openai/gpt-4o",
# Assume gpt-5 supports multimodal; adjust if needed
"gpt-5",
"openai/gpt-5",
"gemini-2.0-flash-exp",
"gemini-2.0-pro-exp-02-05",
"claude-3-5-sonnet-v2",
Expand Down
15 changes: 12 additions & 3 deletions metagpt/provider/openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
@File : openai.py
@Modified By: mashenquan, 2023/11/21. Fix bug: ReadTimeout.
@Modified By: mashenquan, 2023/12/1. Fix bug: Unclosed connection caused by openai 0.x.
@Modified By: dickymoore, 2025/09/13. gpt-5 support
"""
from __future__ import annotations

Expand Down Expand Up @@ -138,17 +139,25 @@ async def _achat_completion_stream(self, messages: list[dict], timeout=USE_CONFI
def _cons_kwargs(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT, **extra_kwargs) -> dict:
kwargs = {
"messages": messages,
"max_tokens": self._get_max_tokens(messages),
# "n": 1, # Some services do not provide this parameter, such as mistral
# "stop": None, # default it's None and gpt4-v can't have this one
"temperature": self.config.temperature,
"model": self.model,
"timeout": self.get_timeout(timeout),
}
if "o1-" in self.model:
# compatible to openai o1-series
# compatible to openai o1-series (do not send max tokens)
kwargs["temperature"] = 1
kwargs.pop("max_tokens")
else:
max_tokens_value = self._get_max_tokens(messages)
if str(self.model).startswith("gpt-5"):
# OpenAI GPT-5 family expects max_completion_tokens
kwargs["max_completion_tokens"] = max_tokens_value
# GPT-5 only supports default temperature = 1
kwargs["temperature"] = 1
else:
# Other OpenAI-compatible models expect max_tokens
kwargs["max_tokens"] = max_tokens_value
if extra_kwargs:
kwargs.update(extra_kwargs)
return kwargs
Expand Down
8 changes: 8 additions & 0 deletions metagpt/utils/token_counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4o": {"prompt": 0.005, "completion": 0.015},
# Placeholder pricing for gpt-5; update when official
"gpt-5": {"prompt": 0.005, "completion": 0.015},
"gpt-4o-mini": {"prompt": 0.00015, "completion": 0.0006},
"gpt-4o-mini-2024-07-18": {"prompt": 0.00015, "completion": 0.0006},
"gpt-4o-2024-05-13": {"prompt": 0.005, "completion": 0.015},
Expand Down Expand Up @@ -78,6 +80,8 @@
"openai/gpt-4": {"prompt": 0.03, "completion": 0.06}, # start, for openrouter
"openai/gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
"openai/gpt-4o": {"prompt": 0.005, "completion": 0.015},
# Placeholder pricing for gpt-5 via OpenRouter-style prefix
"openai/gpt-5": {"prompt": 0.005, "completion": 0.015},
"openai/gpt-4o-2024-05-13": {"prompt": 0.005, "completion": 0.015},
"openai/gpt-4o-mini": {"prompt": 0.00015, "completion": 0.0006},
"openai/gpt-4o-mini-2024-07-18": {"prompt": 0.00015, "completion": 0.0006},
Expand Down Expand Up @@ -250,6 +254,8 @@
"o1-mini": 128000,
"o1-mini-2024-09-12": 128000,
"gpt-4o": 128000,
# Assume gpt-5 has at least 128k context; update if needed
"gpt-5": 128000,
"gpt-4o-2024-05-13": 128000,
"gpt-4o-2024-08-06": 128000,
"gpt-4o-mini-2024-07-18": 128000,
Expand Down Expand Up @@ -298,6 +304,7 @@
"openai/gpt-4": 8192, # start, for openrouter
"openai/gpt-4-turbo": 128000,
"openai/gpt-4o": 128000,
"openai/gpt-5": 128000,
"openai/gpt-4o-2024-05-13": 128000,
"openai/gpt-4o-mini": 128000,
"openai/gpt-4o-mini-2024-07-18": 128000,
Expand Down Expand Up @@ -461,6 +468,7 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
"gpt-4o-2024-08-06",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-5",
"o1-preview",
"o1-preview-2024-09-12",
"o1-mini",
Expand Down
Loading