@@ -3514,7 +3514,7 @@ class MyAgent(SpoonReactAI):
35143514 super().__init__(**kwargs)
35153515
35163516 # Configure LLM
3517- self.llm = ChatBot(model_name="gpt-4.1 ")
3517+ self.llm = ChatBot(model_name="gpt-5.1-chat-latest ")
35183518
35193519 # Set system prompt
35203520 self.system_prompt = "You are a helpful AI assistant."
@@ -3602,7 +3602,7 @@ class ResearchAgent(SpoonReactAI):
36023602 super().__init__(**kwargs)
36033603
36043604 # Configure LLM
3605- self.llm = ChatBot(model_name="gpt-4.1 ")
3605+ self.llm = ChatBot(model_name="gpt-5.1-chat-latest ")
36063606
36073607 # Set up MCP tools
36083608 tools = []
@@ -3673,7 +3673,7 @@ class ConfigurableAgent(SpoonReactAI):
36733673
36743674 # Configure LLM with specific parameters
36753675 self.llm = ChatBot(
3676- model_name="gpt-4.1 ",
3676+ model_name="gpt-5.1-chat-latest ",
36773677 temperature=0.7, # Creativity level
36783678 max_tokens=4096, # Response length limit
36793679 timeout=60 # Request timeout
@@ -3697,7 +3697,7 @@ from spoon_ai.chat import ChatBot
36973697
36983698# Create agent instance
36993699agent = SpoonReactAI(
3700- llm=ChatBot(model_name="gpt-4.1 "),
3700+ llm=ChatBot(model_name="gpt-5.1-chat-latest "),
37013701 system_prompt="You are a helpful assistant.",
37023702 max_steps=15
37033703)
@@ -3801,7 +3801,7 @@ simple_agent = ToolCallAgent(
38013801)
38023802
38033803complex_agent = ToolCallAgent(
3804- llm=ChatBot(model_name="gpt-4.1 ") # More capable
3804+ llm=ChatBot(model_name="gpt-5.1-chat-latest ") # More capable
38053805)
38063806
38073807# Set reasonable limits
@@ -3889,7 +3889,7 @@ class MCPEnabledAgent(SpoonReactMCP):
38893889 super().__init__(**kwargs)
38903890
38913891 # Configure LLM
3892- self.llm = ChatBot(model_name="gpt-4.1 ")
3892+ self.llm = ChatBot(model_name="gpt-5.1-chat-latest ")
38933893
38943894 # Configure stdio MCP tool
38953895 search_tool = MCPTool(
@@ -4084,7 +4084,7 @@ class AdvancedConfigAgent(SpoonReactMCP):
40844084 def __init__(self, **kwargs):
40854085 # Advanced LLM configuration
40864086 llm_config = {
4087- "model_name": "gpt-4.1 ",
4087+ "model_name": "gpt-5.1-chat-latest ",
40884088 "temperature": 0.3, # Creativity control
40894089 "max_tokens": 4096, # Maximum response length
40904090 "top_p": 0.9, # Nucleus sampling parameter
@@ -4228,7 +4228,7 @@ class DynamicConfigAgent(SpoonReactMCP):
42284228 "system_prompt": "You are an advanced AI agent..."
42294229 },
42304230 "llm_config": {
4231- "model_name": "gpt-4.1 ",
4231+ "model_name": "gpt-5.1-chat-latest ",
42324232 "temperature": 0.3,
42334233 "max_tokens": 4096,
42344234 "top_p": 0.9,
@@ -4487,7 +4487,7 @@ import asyncio
44874487from spoon_ai.agents import SpoonReactAI
44884488from spoon_ai.chat import ChatBot
44894489
4490- agent = SpoonReactAI(llm=ChatBot(model_name="gpt-4.1 ", llm_provider="openai"))
4490+ agent = SpoonReactAI(llm=ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai"))
44914491
44924492async def main():
44934493 response = await agent.run("What is the capital of France?")
@@ -4540,7 +4540,7 @@ tavily_search = MCPTool(
45404540
45414541# Agent with real tools
45424542agent = SpoonReactAI(
4543- llm=ChatBot(model_name="gpt-4.1 ", llm_provider="openai"),
4543+ llm=ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai"),
45444544 tools=ToolManager([tavily_search, PercentageTool()]),
45454545 max_iterations=10 # Limit reasoning loops
45464546)
@@ -4672,7 +4672,7 @@ class SpoonMacroAnalysisAgent(SpoonReactMCP):
46724672 "env": {"TAVILY_API_KEY": tavily_key},
46734673 },
46744674 )
4675-
4675+
46764676 # Pre-load MCP tool parameters
46774677 await tavily_tool.ensure_parameters_loaded()
46784678
@@ -4686,7 +4686,7 @@ async def main():
46864686 llm=ChatBot(llm_provider="gemini", model_name="gemini-2.5-flash")
46874687 )
46884688 await agent.initialize()
4689-
4689+
46904690 query = (
46914691 "Perform a macro analysis of Bitcoin (BTC). "
46924692 "Search for recent news and get current market data from Binance using BTC/USDT pair."
@@ -4729,7 +4729,7 @@ from spoon_ai.tools import ToolManager
47294729from spoon_toolkits.crypto.crypto_powerdata.tools import CryptoPowerDataCEXTool
47304730
47314731agent = SpoonReactAI(
4732- llm=ChatBot(model_name="gpt-4.1 ", llm_provider="openai"),
4732+ llm=ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai"),
47334733 tools=ToolManager([CryptoPowerDataCEXTool()]) # real tool with retries/failures
47344734)
47354735
@@ -5706,7 +5706,7 @@ import asyncio
57065706from spoon_ai.chat import ChatBot
57075707
57085708# Same interface for all providers—just change model_name and llm_provider
5709- llm = ChatBot(model_name="gpt-4.1 ", llm_provider="openai")
5709+ llm = ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai")
57105710
57115711async def main():
57125712 response = await llm.ask([{"role": "user", "content": "Explain quantum computing in one sentence"}])
@@ -5721,7 +5721,7 @@ asyncio.run(main())
57215721
57225722### OpenAI
57235723
5724- - **Models**: GPT-4.1 (default), GPT-4o, GPT-4o-mini, o1-preview, o1-mini
5724+ - **Models**: gpt-5.1-chat-latest (default), GPT-4o, GPT-4o-mini, o1-preview, o1-mini
57255725- **Features**: Function calling, streaming, embeddings, reasoning models
57265726- **Best for**: General-purpose tasks, reasoning, code generation
57275727
@@ -5730,7 +5730,7 @@ from spoon_ai.chat import ChatBot
57305730
57315731# OpenAI configuration with default model
57325732llm = ChatBot(
5733- model_name="gpt-4.1 ", # Framework default
5733+ model_name="gpt-5.1-chat-latest ", # Framework default
57345734 llm_provider="openai",
57355735 temperature=0.7
57365736)
@@ -5808,7 +5808,7 @@ llm_manager = LLMManager(
58085808 primary_provider="openai",
58095809 fallback_providers=["anthropic", "gemini"],
58105810 model_preferences={
5811- "openai": "gpt-4.1 ",
5811+ "openai": "gpt-5.1-chat-latest ",
58125812 "anthropic": "claude-sonnet-4-20250514",
58135813 "gemini": "gemini-2.5-pro",
58145814 "deepseek": "deepseek-reasoner"
@@ -5833,7 +5833,7 @@ OPENROUTER_API_KEY=sk-or-your_openrouter_key_here
58335833
58345834# Default Settings
58355835DEFAULT_LLM_PROVIDER=openai
5836- DEFAULT_MODEL=gpt-4.1
5836+ DEFAULT_MODEL=gpt-5.1-chat-latest
58375837DEFAULT_TEMPERATURE=0.3
58385838```
58395839
@@ -5843,7 +5843,7 @@ DEFAULT_TEMPERATURE=0.3
58435843{
58445844 "llm": {
58455845 "provider": "openai",
5846- "model": "gpt-4.1 ",
5846+ "model": "gpt-5.1-chat-latest ",
58475847 "temperature": 0.3,
58485848 "max_tokens": 32768,
58495849 "fallback_providers": ["anthropic", "deepseek", "gemini"]
@@ -5903,18 +5903,18 @@ response = await llm.generate(
59035903
59045904#### Code Generation
59055905
5906- - Primary: DeepSeek-Reasoner, GPT-4.1
5906+ - Primary: DeepSeek-Reasoner, gpt-5.1-chat-latest
59075907- Alternative: Claude-Sonnet-4
59085908
59095909#### Analysis & Reasoning
59105910
5911- - Primary: DeepSeek-Reasoner, GPT-4.1 , Claude-Sonnet-4
5911+ - Primary: DeepSeek-Reasoner, gpt-5.1-chat-latest , Claude-Sonnet-4
59125912- Alternative: Gemini-2.5-Pro
59135913
59145914#### Cost-Sensitive Tasks
59155915
59165916- Primary: DeepSeek-Reasoner, Gemini-2.5-Pro
5917- - Alternative: GPT-4.1
5917+ - Alternative: gpt-5.1-chat-latest
59185918
59195919#### Long Context Tasks
59205920
@@ -5925,7 +5925,7 @@ response = await llm.generate(
59255925
59265926| Provider | Speed | Cost | Context | Quality |
59275927| ------------------------- | --------- | -------- | ------- | -------------------- |
5928- | OpenAI GPT-4.1 | Fast | Medium | 128K | Excellent |
5928+ | OpenAI gpt-5.1-chat-latest | Fast | Medium | 128K | Excellent |
59295929| Anthropic Claude-Sonnet-4 | Medium | Medium | 200K | Excellent |
59305930| Google Gemini-2.5-Pro | Very Fast | Low | 250K | Very Good |
59315931| DeepSeek-Reasoner | Fast | Very Low | 65K | Superior (Reasoning) |
@@ -6828,7 +6828,7 @@ import asyncio
68286828from spoon_ai.chat import ChatBot
68296829
68306830# ChatBot includes built-in short-term memory with auto-trimming
6831- llm = ChatBot(model_name="gpt-4.1 ", llm_provider="openai")
6831+ llm = ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai")
68326832
68336833async def main():
68346834 await llm.ask([{"role": "user", "content": "My name is Alice"}])
@@ -8121,7 +8121,7 @@ SpoonOS eliminates common development complexity:
81218121```python
81228122# Simple agent creation - no error handling needed
81238123agent = ToolCallAgent(
8124- llm=ChatBot(llm_provider="openai", model_name="gpt-4.1 "),
8124+ llm=ChatBot(llm_provider="openai", model_name="gpt-5.1-chat-latest "),
81258125 available_tools=ToolManager([CryptoTool(), Web3Tool()])
81268126)
81278127
@@ -8835,28 +8835,19 @@ from spoon_ai.agents import SpoonReactAI
88358835from spoon_ai.chat import ChatBot
88368836from spoon_toolkits import CryptoPowerDataPriceTool, CryptoPowerDataCEXTool
88378837
8838- def get_crypto_tools():
8839- """Helper for docs: instantiate available crypto/Web3 tools."""
8840- return [
8841- CryptoPowerDataPriceTool(),
8842- CryptoPowerDataCEXTool(),
8843- ]
8844-
88458838# Create your first agent
88468839def create_agent():
88478840 # Configure LLM
88488841 llm = ChatBot(
8849- # Pick up provider/model from env to support Gemini out of the box.
8850- # Example: set DEFAULT_LLM_PROVIDER=gemini and GEMINI_API_KEY=***
8851- llm_provider=os.getenv("LLM_PROVIDER") or os.getenv("DEFAULT_LLM_PROVIDER") or "gemini",
8852- model_name=os.getenv("LLM_MODEL") or "gemini-2.5-pro",
8842+ llm_provider="openai",
8843+ model_name="gpt-5.1-chat-latest",
88538844 temperature=0.3
88548845 )
88558846
88568847 # Create agent with tools
88578848 agent = SpoonReactAI(
88588849 llm=llm,
8859- tools=[*get_crypto_tools()] # requires `pip install -e toolkit`
8850+ tools=[CryptoPowerDataPriceTool(), CryptoPowerDataCEXTool()]
88608851 )
88618852
88628853 return agent
@@ -8959,7 +8950,7 @@ If you want token-by-token output (works with any supported provider):
89598950
89608951```python
89618952async def stream_demo():
8962- llm = ChatBot(model_name="gpt-4.1 ", llm_provider="openai")
8953+ llm = ChatBot(model_name="gpt-5.1-chat-latest ", llm_provider="openai")
89638954 messages = [{"role": "user", "content": "Stream a 3-step plan to learn SpoonOS"}]
89648955 async for chunk in llm.astream(messages=messages):
89658956 print(chunk.delta or "", end="", flush=True)
@@ -9239,7 +9230,7 @@ class ProductionAgent:
92399230 default_config = {
92409231 "llm": {
92419232 "provider": "openai",
9242- "model": "gpt-4.1 ",
9233+ "model": "gpt-5.1-chat-latest ",
92439234 "temperature": 0.3
92449235 },
92459236 "tools": ["crypto_tools", "greeting_tool"],
0 commit comments