Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion libs/vertexai/langchain_google_vertexai/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,11 @@
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models import (
LanguageModelInput,
ModelProfile,
ModelProfileRegistry,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
LangSmithParams,
Expand Down Expand Up @@ -115,6 +119,8 @@
ToolConfig as GapicToolConfig,
VideoMetadata,
)

from langchain_google_vertexai.data._profiles import _PROFILES
from langchain_google_vertexai._base import _VertexAICommon
from langchain_google_vertexai._compat import _convert_from_v1_to_vertex
from langchain_google_vertexai._image_utils import (
Expand Down Expand Up @@ -178,6 +184,14 @@
]


_MODEL_PROFILES = cast("ModelProfileRegistry", _PROFILES)


def _get_default_model_profile(model_name: str) -> ModelProfile:
default = _MODEL_PROFILES.get(model_name) or {}
return default.copy()


_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY = (
"__gemini_function_call_thought_signatures__"
)
Expand Down Expand Up @@ -1867,6 +1881,14 @@ def validate_environment(self) -> Self:

return self

@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
model_id = re.sub(r"-\d{3}$", "", self.model_name.replace("models/", ""))
self.profile = _get_default_model_profile(model_id)
return self

def _prepare_params(
self,
stop: list[str] | None = None,
Expand Down
1 change: 1 addition & 0 deletions libs/vertexai/langchain_google_vertexai/data/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Model profile data. All edits should be made in profile_augmentations.toml."""
291 changes: 291 additions & 0 deletions libs/vertexai/langchain_google_vertexai/data/_profiles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,291 @@
"""Auto-generated model profiles.

DO NOT EDIT THIS FILE MANUALLY.
This file is generated by the langchain-profiles CLI tool.

It contains data derived from the models.dev project.

Source: https://github.com/sst/models.dev
License: MIT License

To update these data, refer to the instructions here:

https://docs.langchain.com/oss/python/langchain/models#updating-or-overwriting-profile-data
"""

from typing import Any

_PROFILES: dict[str, dict[str, Any]] = {
"gemini-embedding-001": {
"max_input_tokens": 2048,
"max_output_tokens": 3072,
"image_inputs": False,
"audio_inputs": False,
"video_inputs": False,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": False,
"tool_calling": False,
"image_url_inputs": True,
"pdf_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-preview-05-20": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-flash-lite-latest": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-3-pro-preview": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"structured_output": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
},
"gemini-2.5-flash": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-flash-latest": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-pro-preview-05-06": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.0-flash-lite": {
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": False,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.0-flash": {
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": False,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-lite": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-pro-preview-06-05": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-lite-preview-06-17": {
"max_input_tokens": 65536,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-preview-09-2025": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-preview-04-17": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-pro": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
"gemini-2.5-flash-lite-preview-09-2025": {
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"image_inputs": True,
"audio_inputs": True,
"pdf_inputs": True,
"video_inputs": True,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": True,
"image_url_inputs": True,
"image_tool_message": True,
"tool_choice": True,
"structured_output": True,
},
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
provider = "google-vertex"

[overrides]
image_url_inputs = true
pdf_inputs = true
image_tool_message = true
tool_choice = true
structured_output = true
3 changes: 3 additions & 0 deletions libs/vertexai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,9 @@ test_integration = [
"langchain-tests>=1.0.0,<2.0.0",
]

[tool.uv.sources]
langchain-core = { git = "https://github.com/langchain-ai/langchain", subdirectory = "libs/core", branch = "cc/model_profiles_distributed" }

[tool.ruff]
fix = true

Expand Down
17 changes: 17 additions & 0 deletions libs/vertexai/tests/unit_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,23 @@ def test_init_client_with_custom_model_kwargs() -> None:
assert default_params["thinking"] == {"type": "enabled", "budget_tokens": 1024}


def test_profile() -> None:
model = ChatVertexAI(
model="gemini-2.0-flash", project="test-project", location="moon-dark1"
)
assert model.profile
assert not model.profile["reasoning_output"]

model = ChatVertexAI(
model="gemini-2.5-flash", project="test-project", location="moon-dark1"
)
assert model.profile
assert model.profile["reasoning_output"]

model = ChatVertexAI(model="foo", project="test-project", location="moon-dark1")
assert model.profile == {}


@pytest.mark.parametrize(
("model", "location"),
[
Expand Down
Loading