Skip to content

Commit 095f4a7

Browse files
keenborder786mdrxy
andauthored
fix(core): fix parse_resultin case of self.first_tool_only with multiple keys matching for JsonOutputKeyToolsParser (#32106)
* **Description:** Updated `parse_result` logic to handle cases where `self.first_tool_only` is `True` and multiple matching keys share the same function name. Instead of returning the first match prematurely, the method now prioritizes filtering results by the specified key to ensure correct selection. * **Issue:** #32100 --------- Co-authored-by: Mason Daugherty <[email protected]> Co-authored-by: Mason Daugherty <[email protected]>
1 parent ddaba21 commit 095f4a7

File tree

6 files changed

+336
-33
lines changed

6 files changed

+336
-33
lines changed

libs/core/langchain_core/output_parsers/openai_tools.py

Lines changed: 38 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -234,23 +234,53 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An
234234
Returns:
235235
The parsed tool calls.
236236
"""
237-
parsed_result = super().parse_result(result, partial=partial)
238-
237+
generation = result[0]
238+
if not isinstance(generation, ChatGeneration):
239+
msg = "This output parser can only be used with a chat generation."
240+
raise OutputParserException(msg)
241+
message = generation.message
242+
if isinstance(message, AIMessage) and message.tool_calls:
243+
parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
244+
for tool_call in parsed_tool_calls:
245+
if not self.return_id:
246+
_ = tool_call.pop("id")
247+
else:
248+
try:
249+
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
250+
except KeyError:
251+
if self.first_tool_only:
252+
return None
253+
return []
254+
parsed_tool_calls = parse_tool_calls(
255+
raw_tool_calls,
256+
partial=partial,
257+
strict=self.strict,
258+
return_id=self.return_id,
259+
)
260+
# For backwards compatibility
261+
for tc in parsed_tool_calls:
262+
tc["type"] = tc.pop("name")
239263
if self.first_tool_only:
264+
parsed_result = list(
265+
filter(lambda x: x["type"] == self.key_name, parsed_tool_calls)
266+
)
240267
single_result = (
241-
parsed_result
242-
if parsed_result and parsed_result["type"] == self.key_name
268+
parsed_result[0]
269+
if parsed_result and parsed_result[0]["type"] == self.key_name
243270
else None
244271
)
245272
if self.return_id:
246273
return single_result
247274
if single_result:
248275
return single_result["args"]
249276
return None
250-
parsed_result = [res for res in parsed_result if res["type"] == self.key_name]
251-
if not self.return_id:
252-
parsed_result = [res["args"] for res in parsed_result]
253-
return parsed_result
277+
return (
278+
[res for res in parsed_tool_calls if res["type"] == self.key_name]
279+
if self.return_id
280+
else [
281+
res["args"] for res in parsed_tool_calls if res["type"] == self.key_name
282+
]
283+
)
254284

255285

256286
# Common cause of ValidationError is truncated output due to max_tokens.

libs/core/langchain_core/outputs/__init__.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,23 @@
11
"""Output classes.
22
3-
**Output** classes are used to represent the output of a language model call
4-
and the output of a chat.
3+
Used to represent the output of a language model call and the output of a chat.
54
6-
The top container for information is the `LLMResult` object. `LLMResult` is used by
7-
both chat models and LLMs. This object contains the output of the language
8-
model and any additional information that the model provider wants to return.
5+
The top container for information is the `LLMResult` object. `LLMResult` is used by both
6+
chat models and LLMs. This object contains the output of the language model and any
7+
additional information that the model provider wants to return.
98
109
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
10+
1111
- Chat models will return `AIMessage` objects.
1212
- LLMs will return regular text strings.
1313
1414
In addition, users can access the raw output of either LLMs or chat models via
15-
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
15+
callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
1616
LLMResult object containing the generated outputs and any additional information
1717
returned by the model provider.
1818
19-
In general, if information is already available
20-
in the AIMessage object, it is recommended to access it from there rather than
21-
from the `LLMResult` object.
19+
In general, if information is already available in the AIMessage object, it is
20+
recommended to access it from there rather than from the `LLMResult` object.
2221
"""
2322

2423
from typing import TYPE_CHECKING

libs/core/langchain_core/outputs/chat_generation.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,11 @@ class ChatGeneration(Generation):
2727
"""
2828

2929
text: str = ""
30-
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
30+
"""The text contents of the output message.
31+
32+
.. warning::
33+
SHOULD NOT BE SET DIRECTLY!
34+
"""
3135
message: BaseMessage
3236
"""The message output by the chat model."""
3337
# Override type to be ChatGeneration, ignore mypy error as this is intentional

libs/core/langchain_core/outputs/generation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
class Generation(Serializable):
1212
"""A single text generation output.
1313
14-
Generation represents the response from an "old-fashioned" LLM that
14+
Generation represents the response from an
15+
`"old-fashioned" LLM <https://python.langchain.com/docs/concepts/text_llms/>__` that
1516
generates regular text (not chat messages).
1617
1718
This model is used internally by chat model and will eventually

libs/core/langchain_core/outputs/llm_result.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,37 +15,35 @@
1515
class LLMResult(BaseModel):
1616
"""A container for results of an LLM call.
1717
18-
Both chat models and LLMs generate an LLMResult object. This object contains
19-
the generated outputs and any additional information that the model provider
20-
wants to return.
18+
Both chat models and LLMs generate an LLMResult object. This object contains the
19+
generated outputs and any additional information that the model provider wants to
20+
return.
2121
"""
2222

2323
generations: list[
2424
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
2525
]
2626
"""Generated outputs.
2727
28-
The first dimension of the list represents completions for different input
29-
prompts.
28+
The first dimension of the list represents completions for different input prompts.
3029
31-
The second dimension of the list represents different candidate generations
32-
for a given prompt.
30+
The second dimension of the list represents different candidate generations for a
31+
given prompt.
3332
34-
When returned from an LLM the type is list[list[Generation]].
35-
When returned from a chat model the type is list[list[ChatGeneration]].
33+
- When returned from **an LLM**, the type is ``list[list[Generation]]``.
34+
- When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
3635
37-
ChatGeneration is a subclass of Generation that has a field for a structured
38-
chat message.
36+
ChatGeneration is a subclass of Generation that has a field for a structured chat
37+
message.
3938
"""
4039
llm_output: Optional[dict] = None
4140
"""For arbitrary LLM provider specific output.
4241
4342
This dictionary is a free-form dictionary that can contain any information that the
4443
provider wants to return. It is not standardized and is provider-specific.
4544
46-
Users should generally avoid relying on this field and instead rely on
47-
accessing relevant information from standardized fields present in
48-
AIMessage.
45+
Users should generally avoid relying on this field and instead rely on accessing
46+
relevant information from standardized fields present in AIMessage.
4947
"""
5048
run: Optional[list[RunInfo]] = None
5149
"""List of metadata info for model call for each input."""

0 commit comments

Comments
 (0)