Skip to content

Commit 7da3afa

Browse files
committed
fix: fix function call need send sig back to vertex
1 parent 04dcfdc commit 7da3afa

File tree

2 files changed

+51
-5
lines changed

2 files changed

+51
-5
lines changed

models/vertex_ai/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,4 @@ resource:
3232
tool:
3333
enabled: true
3434
type: plugin
35-
version: 0.0.34
35+
version: 0.0.35

models/vertex_ai/models/llm/llm.py

Lines changed: 50 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import base64
22
import io
33
import json
4+
import logging
45
import time
56
from collections.abc import Generator, Sequence
67
from typing import Optional, Union, cast
@@ -52,6 +53,8 @@
5253
# For more information about the models, please refer to https://ai.google.dev/gemini-api/docs/thinking
5354
DEFAULT_NO_THINKING_MODELS = ["gemini-2.5-flash-lite"]
5455

56+
logger = logging.getLogger(__name__)
57+
5558

5659
class VertexAiLargeLanguageModel(LargeLanguageModel):
5760
def _invoke(
@@ -634,6 +637,12 @@ def _handle_generate_response(
634637
),
635638
)
636639
assistant_prompt_message.tool_calls.append(tool_call)
640+
# Capture thought_signature if the SDK surfaced it on the same part
641+
sig = self._extract_thought_signature(part)
642+
if sig:
643+
if not hasattr(self, "_last_function_call_signatures"):
644+
self._last_function_call_signatures = []
645+
self._last_function_call_signatures.append(sig)
637646
# Check for text
638647
elif hasattr(part, 'text') and part.text:
639648
if part.thought is True and not is_thinking:
@@ -698,6 +707,12 @@ def _handle_generate_stream_response(
698707
),
699708
)
700709
)
710+
# Capture thought_signature if present on the streaming part
711+
sig = self._extract_thought_signature(part)
712+
if sig:
713+
if not hasattr(self, "_last_function_call_signatures"):
714+
self._last_function_call_signatures = []
715+
self._last_function_call_signatures.append(sig)
701716
# Check for text
702717
elif hasattr(part, 'text') and part.text:
703718
if part.thought is True and not is_thinking:
@@ -774,6 +789,32 @@ def _handle_generate_stream_response(
774789
),
775790
)
776791

792+
def _extract_thought_signature(self, part) -> Optional[str]:
793+
"""
794+
Best-effort extractor for Vertex AI thought signatures from a Part.
795+
Handles snake_case and camelCase, and tries dict/extraContent fallbacks.
796+
"""
797+
# Direct attributes first
798+
sig = getattr(part, "thought_signature", None) or getattr(part, "thoughtSignature", None)
799+
if isinstance(sig, str) and sig:
800+
return sig
801+
# Try dict conversion if the SDK object supports it
802+
try:
803+
d = part.to_dict() if hasattr(part, "to_dict") else (getattr(part, "__dict__", {}) or {})
804+
if isinstance(d, dict):
805+
sig = d.get("thoughtSignature") or d.get("thought_signature")
806+
if not sig:
807+
extra = d.get("extraContent") or d.get("extra_content") or {}
808+
if isinstance(extra, dict):
809+
g = extra.get("google")
810+
if isinstance(g, dict):
811+
sig = g.get("thought_signature")
812+
if isinstance(sig, str) and sig:
813+
return sig
814+
except Exception as e:
815+
logger.warning(e, exc_info=True)
816+
return None
817+
777818
def _convert_one_message_to_text(self, message: PromptMessage) -> str:
778819
"""
779820
Convert a single message to a string.
@@ -830,15 +871,20 @@ def _format_message_to_genai_content(self, message: PromptMessage) -> dict:
830871
return {"role": "user", "parts": parts}
831872
elif isinstance(message, AssistantPromptMessage):
832873
if message.tool_calls:
833-
parts = [
834-
{
874+
parts = []
875+
for tool_call in message.tool_calls:
876+
part_dict = {
835877
"function_call": {
836878
"name": tool_call.function.name,
837879
"args": json.loads(tool_call.function.arguments),
838880
}
839881
}
840-
for tool_call in message.tool_calls
841-
]
882+
# Attach thought_signature if we captured one from the previous model output
883+
if hasattr(self, "_last_function_call_signatures") and self._last_function_call_signatures:
884+
sig = self._last_function_call_signatures.pop(0)
885+
if sig:
886+
part_dict["thought_signature"] = sig
887+
parts.append(part_dict)
842888
else:
843889
parts = [{"text": message.content}]
844890
return {"role": "model", "parts": parts}

0 commit comments

Comments
 (0)