Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion spoon_ai/llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ def _get_provider_defaults(self, provider_name: str) -> Dict[str, Any]:
'anthropic': {
'model': 'claude-sonnet-4-20250514',
'base_url': 'https://api.anthropic.com',
'max_tokens': 200000,
'max_tokens': 63000, # Claude Sonnet-4 supports max_token of context <64000
'temperature': 0.1, # Lower temperature for Claude
**{k: v for k, v in common_defaults.items() if k != 'temperature'}
},
Expand Down
19 changes: 11 additions & 8 deletions spoon_ai/llm/providers/anthropic_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,15 +330,18 @@ async def chat_stream(self, messages: List[Message],callbacks: Optional[List] =
usage_data["cache_read_input_tokens"] = chunk.usage.cache_read_input_tokens

# Trigger on_llm_end callback
final_response = LLMResponse(
content=full_content,
provider="anthropic",
model=model,
finish_reason=finish_reason or "stop",
native_finish_reason=finish_reason or "stop",
tool_calls=[],
usage=usage_data,
metadata={}
)
await callback_manager.on_llm_end(
response=LLMResponseChunk(
content=full_content,
provider="anthropic",
model=model,
finish_reason=finish_reason,
tool_calls=[],
usage=usage_data
),
response=final_response,
run_id=run_id
)

Expand Down
19 changes: 11 additions & 8 deletions spoon_ai/llm/providers/gemini_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,15 +343,18 @@ async def chat_stream(self, messages: List[Message],callbacks: Optional[List] =
yield response_chunk

# Trigger on_llm_end callback
final_response = LLMResponse(
content=full_content,
provider="gemini",
model=model,
finish_reason=finish_reason or "stop",
native_finish_reason=finish_reason or "stop",
tool_calls=[],
usage=usage_data,
metadata={}
)
await callback_manager.on_llm_end(
response=LLMResponseChunk(
content=full_content,
provider="gemini",
model=model,
finish_reason=finish_reason,
tool_calls=[],
usage=usage_data
),
response=final_response,
run_id=run_id
)

Expand Down