Skip to content

Commit af6c61b

Browse files
devin-ai-integration[bot]João
andcommitted
fix: Resolve type checking errors in prompt caching implementation
- Add type ignore comment for intentional content field transformation - Convert ChatCompletionDeltaToolCall to ToolCall format for event emission - Fixes mypy errors on lines 416 and 789 Co-Authored-By: João <[email protected]>
1 parent a395a5c commit af6c61b

File tree

1 file changed

+13
-2
lines changed

1 file changed

+13
-2
lines changed

src/crewai/llm.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ def _apply_prompt_caching(
413413
):
414414
content = message.get("content", "")
415415
if isinstance(content, str):
416-
formatted_message["content"] = [
416+
formatted_message["content"] = [ # type: ignore[assignment]
417417
{
418418
"type": "text",
419419
"text": content,
@@ -783,10 +783,21 @@ def _handle_streaming_tool_calls(
783783
tool_call.function.arguments
784784
)
785785
assert hasattr(crewai_event_bus, "emit")
786+
# Convert ChatCompletionDeltaToolCall to ToolCall format
787+
from crewai.events.types.llm_events import ToolCall, FunctionCall
788+
converted_tool_call = ToolCall(
789+
id=tool_call.id,
790+
function=FunctionCall(
791+
name=tool_call.function.name,
792+
arguments=tool_call.function.arguments or ""
793+
),
794+
type=tool_call.type,
795+
index=tool_call.index
796+
)
786797
crewai_event_bus.emit(
787798
self,
788799
event=LLMStreamChunkEvent(
789-
tool_call=tool_call.to_dict(),
800+
tool_call=converted_tool_call,
790801
chunk=tool_call.function.arguments,
791802
from_task=from_task,
792803
from_agent=from_agent,

0 commit comments

Comments
 (0)