Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 39 additions & 8 deletions libs/core/langchain_core/language_models/chat_models.py
Original file line number Diff line number Diff line change
@@ -1,70 +1,70 @@
"""Chat models for conversational AI."""

from __future__ import annotations

import asyncio
import inspect
import json
import typing
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from functools import cached_property
from operator import itemgetter
from typing import TYPE_CHECKING, Any, Literal, cast

from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import override

from langchain_core._api.beta_decorator import beta
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain_core.globals import get_llm_cache
from langchain_core.language_models._utils import (
_normalize_messages,
_update_message_content_to_blocks,
)
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
)
from langchain_core.load import dumpd, dumps
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
AnyMessage,
BaseMessage,
convert_to_messages,
is_data_content_block,
message_chunk_to_message,
)
from langchain_core.messages import content as types
from langchain_core.messages.block_translators.openai import (
convert_to_openai_image_block,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
Generation,
LLMResult,
RunInfo,
)
from langchain_core.outputs.chat_generation import merge_chat_generation_chunks
from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue
from langchain_core.rate_limiters import BaseRateLimiter
from langchain_core.runnables import RunnableMap, RunnablePassthrough
from langchain_core.runnables.config import ensure_config, run_in_executor
from langchain_core.runnables.config import ensure_config, run_in_executor, get_executor_for_config

Check failure on line 67 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.10) / Python 3.10

Ruff (E501)

langchain_core/language_models/chat_models.py:67:89: E501 Line too long (99 > 88)

Check failure on line 67 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.11) / Python 3.11

Ruff (E501)

langchain_core/language_models/chat_models.py:67:89: E501 Line too long (99 > 88)

Check failure on line 67 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.12) / Python 3.12

Ruff (E501)

langchain_core/language_models/chat_models.py:67:89: E501 Line too long (99 > 88)

Check failure on line 67 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.13) / Python 3.13

Ruff (E501)

langchain_core/language_models/chat_models.py:67:89: E501 Line too long (99 > 88)

Check failure on line 67 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.14) / Python 3.14

Ruff (E501)

langchain_core/language_models/chat_models.py:67:89: E501 Line too long (99 > 88)
from langchain_core.tracers._streaming import _StreamingCallbackHandler
from langchain_core.utils.function_calling import (
convert_to_json_schema,
Expand Down Expand Up @@ -904,30 +904,61 @@
run_id=run_id,
batch_size=len(messages),
)
results = []
input_messages = [
_normalize_messages(message_list) for message_list in messages
]
for i, m in enumerate(input_messages):
if len(input_messages) == 1:
try:
results.append(
results = [
self._generate_with_cache(
m,
input_messages[0],
stop=stop,
run_manager=run_managers[i] if run_managers else None,
run_manager=run_managers[0] if run_managers else None,
**kwargs,
)
)
]
except BaseException as e:
if run_managers:
generations_with_error_metadata = _generate_response_from_error(e)
run_managers[i].on_llm_error(
run_managers[0].on_llm_error(
e,
response=LLMResult(
generations=[generations_with_error_metadata]
),
)
raise
else:
def _invoke(index_and_message: tuple[int, list[BaseMessage]]):

Check failure on line 931 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.10) / Python 3.10

Ruff (ANN202)

langchain_core/language_models/chat_models.py:931:17: ANN202 Missing return type annotation for private function `_invoke`

Check failure on line 931 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.11) / Python 3.11

Ruff (ANN202)

langchain_core/language_models/chat_models.py:931:17: ANN202 Missing return type annotation for private function `_invoke`

Check failure on line 931 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.12) / Python 3.12

Ruff (ANN202)

langchain_core/language_models/chat_models.py:931:17: ANN202 Missing return type annotation for private function `_invoke`

Check failure on line 931 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.13) / Python 3.13

Ruff (ANN202)

langchain_core/language_models/chat_models.py:931:17: ANN202 Missing return type annotation for private function `_invoke`

Check failure on line 931 in libs/core/langchain_core/language_models/chat_models.py

View workflow job for this annotation

GitHub Actions / lint (libs/core, 3.14) / Python 3.14

Ruff (ANN202)

langchain_core/language_models/chat_models.py:931:17: ANN202 Missing return type annotation for private function `_invoke`
i, m = index_and_message
try:
return self._generate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
except BaseException as e:
return (i, e)

with get_executor_for_config(None) as executor:
mapped = list(
executor.map(_invoke, list(enumerate(input_messages)))
)
results = []
for i, res in enumerate(mapped):
if isinstance(res, tuple) and isinstance(res[1], BaseException):
if run_managers:
generations_with_error_metadata = _generate_response_from_error(
res[1]
)
run_managers[i].on_llm_error(
res[1],
response=LLMResult(
generations=[generations_with_error_metadata]
),
)
raise res[1]
results.append(res)
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
Expand Down
Loading