Skip to content

Commit 5a016de

Browse files
mdrxykeenborder786casparbccurmecbornet
authored
chore: delete deprecated items (#33192)
Removed: - `libs/core/langchain_core/chat_history.py`: `add_user_message` and `add_ai_message` in favor of `add_messages` and `aadd_messages` - `libs/core/langchain_core/language_models/base.py`: `predict`, `predict_messages`, and async versions in favor of `invoke`. removed `_all_required_field_names` since it was a wrapper on `get_pydantic_field_names` - `libs/core/langchain_core/language_models/chat_models.py`: `callback_manager` param in favor of `callbacks`. `__call__` and `call_as_llm` method in favor of `invoke` - `libs/core/langchain_core/language_models/llms.py`: `callback_manager` param in favor of `callbacks`. `__call__`, `predict`, `apredict`, and `apredict_messages` methods in favor of `invoke` - `libs/core/langchain_core/prompts/chat.py`: `from_role_strings` and `from_strings` in favor of `from_messages` - `libs/core/langchain_core/prompts/pipeline.py`: removed `PipelinePromptTemplate` - `libs/core/langchain_core/prompts/prompt.py`: `input_variables` param on `from_file` as it wasn't used - `libs/core/langchain_core/tools/base.py`: `callback_manager` param in favor of `callbacks` - `libs/core/langchain_core/tracers/context.py`: `tracing_enabled` in favor of `tracing_enabled_v2` - `libs/core/langchain_core/tracers/langchain_v1.py`: entire module - `libs/core/langchain_core/utils/loading.py`: entire module, `try_load_from_hub` - `libs/core/langchain_core/vectorstores/in_memory.py`: `upsert` in favor of `add_documents` - `libs/standard-tests/langchain_tests/integration_tests/chat_models.py` and `libs/standard-tests/langchain_tests/unit_tests/chat_models.py`: `tool_choice_value` as models should accept `tool_choice="any"` - `langchain` will consequently no longer expose these items if it was previously --------- Co-authored-by: Mohammad Mohtashim <[email protected]> Co-authored-by: Caspar Broekhuizen <[email protected]> Co-authored-by: ccurme <[email protected]> Co-authored-by: Christophe Bornet <[email protected]> Co-authored-by: Eugene Yurtsev <[email protected]> Co-authored-by: Sadra Barikbin <[email protected]> Co-authored-by: Vadym Barda <[email protected]>
1 parent b541a56 commit 5a016de

File tree

44 files changed

+402
-1088
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+402
-1088
lines changed

libs/cli/langchain_cli/namespaces/migrate/.grit/patterns/langchain_to_core.json

Lines changed: 380 additions & 99 deletions
Large diffs are not rendered by default.

libs/core/langchain_core/chat_history.py

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,12 @@
1717
from __future__ import annotations
1818

1919
from abc import ABC, abstractmethod
20-
from typing import TYPE_CHECKING, Union
20+
from typing import TYPE_CHECKING
2121

2222
from pydantic import BaseModel, Field
2323

2424
from langchain_core.messages import (
25-
AIMessage,
2625
BaseMessage,
27-
HumanMessage,
2826
get_buffer_string,
2927
)
3028
from langchain_core.runnables.config import run_in_executor
@@ -126,40 +124,6 @@ async def aget_messages(self) -> list[BaseMessage]:
126124
"""
127125
return await run_in_executor(None, lambda: self.messages)
128126

129-
def add_user_message(self, message: Union[HumanMessage, str]) -> None:
130-
"""Convenience method for adding a human message string to the store.
131-
132-
!!! note
133-
This is a convenience method. Code should favor the bulk ``add_messages``
134-
interface instead to save on round-trips to the persistence layer.
135-
136-
This method may be deprecated in a future release.
137-
138-
Args:
139-
message: The human message to add to the store.
140-
"""
141-
if isinstance(message, HumanMessage):
142-
self.add_message(message)
143-
else:
144-
self.add_message(HumanMessage(content=message))
145-
146-
def add_ai_message(self, message: Union[AIMessage, str]) -> None:
147-
"""Convenience method for adding an AI message string to the store.
148-
149-
!!! note
150-
This is a convenience method. Code should favor the bulk ``add_messages``
151-
interface instead to save on round-trips to the persistence layer.
152-
153-
This method may be deprecated in a future release.
154-
155-
Args:
156-
message: The AI message to add.
157-
"""
158-
if isinstance(message, AIMessage):
159-
self.add_message(message)
160-
else:
161-
self.add_message(AIMessage(content=message))
162-
163127
def add_message(self, message: BaseMessage) -> None:
164128
"""Add a Message object to the store.
165129

libs/core/langchain_core/language_models/base.py

Lines changed: 0 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
from pydantic import BaseModel, ConfigDict, Field, field_validator
2121
from typing_extensions import TypedDict, override
2222

23-
from langchain_core._api import deprecated
2423
from langchain_core.caches import BaseCache
2524
from langchain_core.callbacks import Callbacks
2625
from langchain_core.globals import get_verbose
@@ -37,7 +36,6 @@
3736
StringPromptValue,
3837
)
3938
from langchain_core.runnables import Runnable, RunnableSerializable
40-
from langchain_core.utils import get_pydantic_field_names
4139

4240
if TYPE_CHECKING:
4341
from langchain_core.outputs import LLMResult
@@ -259,102 +257,6 @@ def with_structured_output(
259257
# generate responses that match a given schema.
260258
raise NotImplementedError
261259

262-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
263-
@abstractmethod
264-
def predict(
265-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
266-
) -> str:
267-
"""Pass a single string input to the model and return a string.
268-
269-
Use this method when passing in raw text. If you want to pass in specific types
270-
of chat messages, use predict_messages.
271-
272-
Args:
273-
text: String input to pass to the model.
274-
stop: Stop words to use when generating. Model output is cut off at the
275-
first occurrence of any of these substrings.
276-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
277-
to the model provider API call.
278-
279-
Returns:
280-
Top model prediction as a string.
281-
282-
"""
283-
284-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
285-
@abstractmethod
286-
def predict_messages(
287-
self,
288-
messages: list[BaseMessage],
289-
*,
290-
stop: Optional[Sequence[str]] = None,
291-
**kwargs: Any,
292-
) -> BaseMessage:
293-
"""Pass a message sequence to the model and return a message.
294-
295-
Use this method when passing in chat messages. If you want to pass in raw text,
296-
use predict.
297-
298-
Args:
299-
messages: A sequence of chat messages corresponding to a single model input.
300-
stop: Stop words to use when generating. Model output is cut off at the
301-
first occurrence of any of these substrings.
302-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
303-
to the model provider API call.
304-
305-
Returns:
306-
Top model prediction as a message.
307-
308-
"""
309-
310-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
311-
@abstractmethod
312-
async def apredict(
313-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
314-
) -> str:
315-
"""Asynchronously pass a string to the model and return a string.
316-
317-
Use this method when calling pure text generation models and only the top
318-
candidate generation is needed.
319-
320-
Args:
321-
text: String input to pass to the model.
322-
stop: Stop words to use when generating. Model output is cut off at the
323-
first occurrence of any of these substrings.
324-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
325-
to the model provider API call.
326-
327-
Returns:
328-
Top model prediction as a string.
329-
330-
"""
331-
332-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
333-
@abstractmethod
334-
async def apredict_messages(
335-
self,
336-
messages: list[BaseMessage],
337-
*,
338-
stop: Optional[Sequence[str]] = None,
339-
**kwargs: Any,
340-
) -> BaseMessage:
341-
"""Asynchronously pass messages to the model and return a message.
342-
343-
Use this method when calling chat models and only the top candidate generation
344-
is needed.
345-
346-
Args:
347-
messages: A sequence of chat messages corresponding to a single model input.
348-
stop: Stop words to use when generating. Model output is cut off at the
349-
first occurrence of any of these substrings.
350-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
351-
to the model provider API call.
352-
353-
Returns:
354-
Top model prediction as a message.
355-
356-
"""
357-
358260
@property
359261
def _identifying_params(self) -> Mapping[str, Any]:
360262
"""Get the identifying parameters."""
@@ -417,12 +319,3 @@ def get_num_tokens_from_messages(
417319
stacklevel=2,
418320
)
419321
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
420-
421-
@classmethod
422-
def _all_required_field_names(cls) -> set:
423-
"""DEPRECATED: Kept for backwards compatibility.
424-
425-
Use ``get_pydantic_field_names``.
426-
427-
"""
428-
return get_pydantic_field_names(cls)

libs/core/langchain_core/language_models/chat_models.py

Lines changed: 1 addition & 155 deletions
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,19 @@
66
import inspect
77
import json
88
import typing
9-
import warnings
109
from abc import ABC, abstractmethod
1110
from collections.abc import AsyncIterator, Iterator, Sequence
1211
from functools import cached_property
1312
from operator import itemgetter
1413
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union, cast
1514

16-
from pydantic import BaseModel, ConfigDict, Field, model_validator
15+
from pydantic import BaseModel, ConfigDict, Field
1716
from typing_extensions import override
1817

19-
from langchain_core._api import deprecated
2018
from langchain_core.caches import BaseCache
2119
from langchain_core.callbacks import (
2220
AsyncCallbackManager,
2321
AsyncCallbackManagerForLLMRun,
24-
BaseCallbackManager,
2522
CallbackManager,
2623
CallbackManagerForLLMRun,
2724
Callbacks,
@@ -42,7 +39,6 @@
4239
AIMessageChunk,
4340
AnyMessage,
4441
BaseMessage,
45-
HumanMessage,
4642
convert_to_messages,
4743
is_data_content_block,
4844
message_chunk_to_message,
@@ -319,16 +315,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
319315
320316
""" # noqa: E501
321317

322-
callback_manager: Optional[BaseCallbackManager] = deprecated(
323-
name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks"
324-
)(
325-
Field(
326-
default=None,
327-
exclude=True,
328-
description="Callback manager to add to the run trace.",
329-
)
330-
)
331-
332318
rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True)
333319
"An optional rate limiter to use for limiting the number of requests."
334320

@@ -373,27 +359,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
373359
374360
"""
375361

376-
@model_validator(mode="before")
377-
@classmethod
378-
def raise_deprecation(cls, values: dict) -> Any:
379-
"""Emit deprecation warning if ``callback_manager`` is used.
380-
381-
Args:
382-
values (Dict): Values to validate.
383-
384-
Returns:
385-
Dict: Validated values.
386-
387-
"""
388-
if values.get("callback_manager") is not None:
389-
warnings.warn(
390-
"callback_manager is deprecated. Please use callbacks instead.",
391-
DeprecationWarning,
392-
stacklevel=5,
393-
)
394-
values["callbacks"] = values.pop("callback_manager", None)
395-
return values
396-
397362
model_config = ConfigDict(
398363
arbitrary_types_allowed=True,
399364
)
@@ -1455,40 +1420,6 @@ async def _astream(
14551420
break
14561421
yield item # type: ignore[misc]
14571422

1458-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
1459-
def __call__(
1460-
self,
1461-
messages: list[BaseMessage],
1462-
stop: Optional[list[str]] = None,
1463-
callbacks: Callbacks = None,
1464-
**kwargs: Any,
1465-
) -> BaseMessage:
1466-
"""Call the model.
1467-
1468-
Args:
1469-
messages: List of messages.
1470-
stop: Stop words to use when generating. Model output is cut off at the
1471-
first occurrence of any of these substrings.
1472-
callbacks: Callbacks to pass through. Used for executing additional
1473-
functionality, such as logging or streaming, throughout generation.
1474-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
1475-
to the model provider API call.
1476-
1477-
Raises:
1478-
ValueError: If the generation is not a chat generation.
1479-
1480-
Returns:
1481-
The model output message.
1482-
1483-
"""
1484-
generation = self.generate(
1485-
[messages], stop=stop, callbacks=callbacks, **kwargs
1486-
).generations[0][0]
1487-
if isinstance(generation, ChatGeneration):
1488-
return generation.message
1489-
msg = "Unexpected generation type"
1490-
raise ValueError(msg)
1491-
14921423
async def _call_async(
14931424
self,
14941425
messages: list[BaseMessage],
@@ -1505,91 +1436,6 @@ async def _call_async(
15051436
msg = "Unexpected generation type"
15061437
raise ValueError(msg)
15071438

1508-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
1509-
def call_as_llm(
1510-
self, message: str, stop: Optional[list[str]] = None, **kwargs: Any
1511-
) -> str:
1512-
"""Call the model.
1513-
1514-
Args:
1515-
message: The input message.
1516-
stop: Stop words to use when generating. Model output is cut off at the
1517-
first occurrence of any of these substrings.
1518-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
1519-
to the model provider API call.
1520-
1521-
Returns:
1522-
The model output string.
1523-
1524-
"""
1525-
return self.predict(message, stop=stop, **kwargs)
1526-
1527-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
1528-
@override
1529-
def predict(
1530-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
1531-
) -> str:
1532-
"""Predict the next message.
1533-
1534-
Args:
1535-
text: The input message.
1536-
stop: Stop words to use when generating. Model output is cut off at the
1537-
first occurrence of any of these substrings.
1538-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
1539-
to the model provider API call.
1540-
1541-
Raises:
1542-
ValueError: If the output is not a string.
1543-
1544-
Returns:
1545-
The predicted output string.
1546-
1547-
"""
1548-
stop_ = None if stop is None else list(stop)
1549-
result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
1550-
if isinstance(result.content, str):
1551-
return result.content
1552-
msg = "Cannot use predict when output is not a string."
1553-
raise ValueError(msg)
1554-
1555-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
1556-
@override
1557-
def predict_messages(
1558-
self,
1559-
messages: list[BaseMessage],
1560-
*,
1561-
stop: Optional[Sequence[str]] = None,
1562-
**kwargs: Any,
1563-
) -> BaseMessage:
1564-
stop_ = None if stop is None else list(stop)
1565-
return self(messages, stop=stop_, **kwargs)
1566-
1567-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
1568-
@override
1569-
async def apredict(
1570-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
1571-
) -> str:
1572-
stop_ = None if stop is None else list(stop)
1573-
result = await self._call_async(
1574-
[HumanMessage(content=text)], stop=stop_, **kwargs
1575-
)
1576-
if isinstance(result.content, str):
1577-
return result.content
1578-
msg = "Cannot use predict when output is not a string."
1579-
raise ValueError(msg)
1580-
1581-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
1582-
@override
1583-
async def apredict_messages(
1584-
self,
1585-
messages: list[BaseMessage],
1586-
*,
1587-
stop: Optional[Sequence[str]] = None,
1588-
**kwargs: Any,
1589-
) -> BaseMessage:
1590-
stop_ = None if stop is None else list(stop)
1591-
return await self._call_async(messages, stop=stop_, **kwargs)
1592-
15931439
@property
15941440
@abstractmethod
15951441
def _llm_type(self) -> str:

0 commit comments

Comments
 (0)