From b935b6039227df21a43674efc903e7902b95ca1a Mon Sep 17 00:00:00 2001 From: helloissariel <2590254500@qq.com> Date: Wed, 3 Dec 2025 11:46:14 +0800 Subject: [PATCH 1/2] Fix gemini/anthropic on_llm_end payload --- spoon_ai/llm/providers/anthropic_provider.py | 19 +++++++++++-------- spoon_ai/llm/providers/gemini_provider.py | 19 +++++++++++-------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/spoon_ai/llm/providers/anthropic_provider.py b/spoon_ai/llm/providers/anthropic_provider.py index ab9ad8b..380b84c 100644 --- a/spoon_ai/llm/providers/anthropic_provider.py +++ b/spoon_ai/llm/providers/anthropic_provider.py @@ -330,15 +330,18 @@ async def chat_stream(self, messages: List[Message],callbacks: Optional[List] = usage_data["cache_read_input_tokens"] = chunk.usage.cache_read_input_tokens # Trigger on_llm_end callback + final_response = LLMResponse( + content=full_content, + provider="anthropic", + model=model, + finish_reason=finish_reason or "stop", + native_finish_reason=finish_reason or "stop", + tool_calls=[], + usage=usage_data, + metadata={} + ) await callback_manager.on_llm_end( - response=LLMResponseChunk( - content=full_content, - provider="anthropic", - model=model, - finish_reason=finish_reason, - tool_calls=[], - usage=usage_data - ), + response=final_response, run_id=run_id ) diff --git a/spoon_ai/llm/providers/gemini_provider.py b/spoon_ai/llm/providers/gemini_provider.py index 0337d60..c1fe75e 100644 --- a/spoon_ai/llm/providers/gemini_provider.py +++ b/spoon_ai/llm/providers/gemini_provider.py @@ -343,15 +343,18 @@ async def chat_stream(self, messages: List[Message],callbacks: Optional[List] = yield response_chunk # Trigger on_llm_end callback + final_response = LLMResponse( + content=full_content, + provider="gemini", + model=model, + finish_reason=finish_reason or "stop", + native_finish_reason=finish_reason or "stop", + tool_calls=[], + usage=usage_data, + metadata={} + ) await callback_manager.on_llm_end( - response=LLMResponseChunk( - content=full_content, - provider="gemini", - model=model, - finish_reason=finish_reason, - tool_calls=[], - usage=usage_data - ), + response=final_response, run_id=run_id ) From 7dfd1b7f86adbbd00eac32a708865fe93463e766 Mon Sep 17 00:00:00 2001 From: helloissariel <2590254500@qq.com> Date: Wed, 3 Dec 2025 15:35:39 +0800 Subject: [PATCH 2/2] change anthropic max_tokens --- spoon_ai/llm/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spoon_ai/llm/config.py b/spoon_ai/llm/config.py index c161f1e..339d935 100644 --- a/spoon_ai/llm/config.py +++ b/spoon_ai/llm/config.py @@ -335,7 +335,7 @@ def _get_provider_defaults(self, provider_name: str) -> Dict[str, Any]: 'anthropic': { 'model': 'claude-sonnet-4-20250514', 'base_url': 'https://api.anthropic.com', - 'max_tokens': 200000, + 'max_tokens': 63000, # Claude Sonnet-4 supports max_token of context <64000 'temperature': 0.1, # Lower temperature for Claude **{k: v for k, v in common_defaults.items() if k != 'temperature'} },