Skip to content
27 changes: 18 additions & 9 deletions libs/genai/langchain_google_genai/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -929,15 +929,18 @@ def _parse_response_candidate(
}
function_call_signatures.append(sig_block)

# Add function call signatures to content only if there's already other content
# This preserves backward compatibility where content is "" for
# function-only responses
if function_call_signatures and content is not None:
for sig_block in function_call_signatures:
content = _append_to_content(content, sig_block)

if content is None:
content = ""
# Add function call signatures to content only if there's already other content
# This preserves backward compatibility where content is "" for
# function-only responses
if function_call_signatures and content is not None:
for sig_block in function_call_signatures:
content = _append_to_content(content, sig_block)
if hasattr(response_candidate, "logprobs_result"):
response_metadata["logprobs"] = proto.Message.to_dict(
response_candidate.logprobs_result
)
if content is None:
content = ""
if isinstance(content, list) and any(
isinstance(item, dict) and "executable_code" in item for item in content
):
Expand Down Expand Up @@ -1825,6 +1828,9 @@ class Joke(BaseModel):
stop: Optional[List[str]] = None
"""Stop sequences for the model."""

logprobs: Optional[int] = None
"""The number of logprobs to return."""

streaming: Optional[bool] = None
"""Whether to stream responses from the model."""

Expand Down Expand Up @@ -2037,6 +2043,7 @@ def _prepare_params(
"max_output_tokens": self.max_output_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"logprobs": getattr(self, "logprobs", None),
"response_modalities": self.response_modalities,
"thinking_config": (
(
Expand All @@ -2058,6 +2065,8 @@ def _prepare_params(
}.items()
if v is not None
}
if getattr(self, "logprobs", None) is not None:
gen_config["response_logprobs"] = True
if generation_config:
gen_config = {**gen_config, **generation_config}

Expand Down
49 changes: 49 additions & 0 deletions libs/genai/tests/unit_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,55 @@ def test_initialization_inside_threadpool() -> None:
).result()


def test_logprobs() -> None:
"""Test that logprobs parameter is set correctly and is in the response."""
llm = ChatGoogleGenerativeAI(
model=MODEL_NAME,
google_api_key=SecretStr("secret-api-key"),
logprobs=10,
)
assert llm.logprobs == 10

raw_response = {
"candidates": [
{
"content": {"parts": [{"text": "Test response"}]},
"logprobs_result": {
"top_candidates": [
{
"candidates": [
{"token": "Test", "log_probability": -0.1},
]
}
]
},
}
],
}
response = GenerateContentResponse(raw_response)

with patch(
"langchain_google_genai.chat_models._chat_with_retry"
) as mock_chat_with_retry:
mock_chat_with_retry.return_value = response
llm = ChatGoogleGenerativeAI(
model=MODEL_NAME,
google_api_key="test-key",
logprobs=1,
)
result = llm.invoke("test")
assert "logprobs" in result.response_metadata
assert result.response_metadata["logprobs"] == {
"top_candidates": [
{
"candidates": [
{"token": "Test", "log_probability": -0.1},
]
}
]
}


def test_client_transport() -> None:
"""Test client transport configuration."""
model = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key="fake-key")
Expand Down
Loading