1
1
"""Ollama chat models."""
2
2
3
+ from __future__ import annotations
4
+
3
5
import json
4
6
from collections .abc import AsyncIterator , Iterator , Mapping , Sequence
5
7
from operator import itemgetter
@@ -74,7 +76,9 @@ def _get_usage_metadata_from_generation_info(
74
76
75
77
76
78
def _parse_json_string (
77
- json_string : str , raw_tool_call : dict [str , Any ], skip : bool
79
+ json_string : str ,
80
+ raw_tool_call : dict [str , Any ],
81
+ skip : bool , # noqa: FBT001
78
82
) -> Any :
79
83
"""Attempt to parse a JSON string for tool calling.
80
84
@@ -148,16 +152,19 @@ def _get_tool_calls_from_response(
148
152
) -> list [ToolCall ]:
149
153
"""Get tool calls from ollama response."""
150
154
tool_calls = []
151
- if "message" in response :
152
- if raw_tool_calls := response ["message" ].get ("tool_calls" ):
153
- for tc in raw_tool_calls :
154
- tool_calls .append (
155
- tool_call (
156
- id = str ( uuid4 ()),
157
- name = tc [ "function" ][ "name" ] ,
158
- args = _parse_arguments_from_tool_call ( tc ) or {} ,
159
- )
155
+ if "message" in response and (
156
+ raw_tool_calls := response ["message" ].get ("tool_calls" )
157
+ ) :
158
+ tool_calls .extend (
159
+ [
160
+ tool_call (
161
+ id = str ( uuid4 ()) ,
162
+ name = tc [ "function" ][ "name" ] ,
163
+ args = _parse_arguments_from_tool_call ( tc ) or {},
160
164
)
165
+ for tc in raw_tool_calls
166
+ ]
167
+ )
161
168
return tool_calls
162
169
163
170
@@ -178,14 +185,12 @@ def _get_image_from_data_content_block(block: dict) -> str:
178
185
if block ["type" ] == "image" :
179
186
if block ["source_type" ] == "base64" :
180
187
return block ["data" ]
181
- else :
182
- error_message = "Image data only supported through in-line base64 format."
183
- raise ValueError (error_message )
184
-
185
- else :
186
- error_message = f"Blocks of type { block ['type' ]} not supported."
188
+ error_message = "Image data only supported through in-line base64 format."
187
189
raise ValueError (error_message )
188
190
191
+ error_message = f"Blocks of type { block ['type' ]} not supported."
192
+ raise ValueError (error_message )
193
+
189
194
190
195
def _is_pydantic_class (obj : Any ) -> bool :
191
196
return isinstance (obj , type ) and is_basemodel_subclass (obj )
@@ -459,7 +464,7 @@ class Multiply(BaseModel):
459
464
"""Base url the model is hosted under."""
460
465
461
466
client_kwargs : Optional [dict ] = {}
462
- """Additional kwargs to pass to the httpx clients.
467
+ """Additional kwargs to pass to the httpx clients.
463
468
These arguments are passed to both synchronous and async clients.
464
469
Use sync_client_kwargs and async_client_kwargs to pass different arguments
465
470
to synchronous and asynchronous clients.
@@ -496,7 +501,8 @@ def _chat_params(
496
501
ollama_messages = self ._convert_messages_to_ollama_messages (messages )
497
502
498
503
if self .stop is not None and stop is not None :
499
- raise ValueError ("`stop` found in both the input and default params." )
504
+ msg = "`stop` found in both the input and default params."
505
+ raise ValueError (msg )
500
506
if self .stop is not None :
501
507
stop = self .stop
502
508
@@ -584,7 +590,8 @@ def _convert_messages_to_ollama_messages(
584
590
role = "tool"
585
591
tool_call_id = message .tool_call_id
586
592
else :
587
- raise ValueError ("Received unsupported message type for Ollama." )
593
+ msg = "Received unsupported message type for Ollama."
594
+ raise ValueError (msg )
588
595
589
596
content = ""
590
597
images = []
@@ -608,10 +615,11 @@ def _convert_messages_to_ollama_messages(
608
615
):
609
616
image_url = temp_image_url ["url" ]
610
617
else :
611
- raise ValueError (
618
+ msg = (
612
619
"Only string image_url or dict with string 'url' "
613
620
"inside content parts are supported."
614
621
)
622
+ raise ValueError (msg )
615
623
616
624
image_url_components = image_url .split ("," )
617
625
# Support data:image/jpeg;base64,<image> format
@@ -624,22 +632,24 @@ def _convert_messages_to_ollama_messages(
624
632
image = _get_image_from_data_content_block (content_part )
625
633
images .append (image )
626
634
else :
627
- raise ValueError (
635
+ msg = (
628
636
"Unsupported message content type. "
629
637
"Must either have type 'text' or type 'image_url' "
630
638
"with a string 'image_url' field."
631
639
)
632
- # Should convert to ollama.Message once role includes tool, and tool_call_id is in Message # noqa: E501
633
- msg : dict = {
640
+ raise ValueError (msg )
641
+ # Should convert to ollama.Message once role includes tool,
642
+ # and tool_call_id is in Message
643
+ msg_ : dict = {
634
644
"role" : role ,
635
645
"content" : content ,
636
646
"images" : images ,
637
647
}
638
648
if tool_calls :
639
- msg ["tool_calls" ] = tool_calls
649
+ msg_ ["tool_calls" ] = tool_calls
640
650
if tool_call_id :
641
- msg ["tool_call_id" ] = tool_call_id
642
- ollama_messages .append (msg )
651
+ msg_ ["tool_call_id" ] = tool_call_id
652
+ ollama_messages .append (msg_ )
643
653
644
654
return ollama_messages
645
655
@@ -677,7 +687,7 @@ def _chat_stream_with_aggregation(
677
687
messages : list [BaseMessage ],
678
688
stop : Optional [list [str ]] = None ,
679
689
run_manager : Optional [CallbackManagerForLLMRun ] = None ,
680
- verbose : bool = False ,
690
+ verbose : bool = False , # noqa: FBT001, FBT002
681
691
** kwargs : Any ,
682
692
) -> ChatGenerationChunk :
683
693
final_chunk = None
@@ -693,7 +703,8 @@ def _chat_stream_with_aggregation(
693
703
verbose = verbose ,
694
704
)
695
705
if final_chunk is None :
696
- raise ValueError ("No data received from Ollama stream." )
706
+ msg = "No data received from Ollama stream."
707
+ raise ValueError (msg )
697
708
698
709
return final_chunk
699
710
@@ -702,7 +713,7 @@ async def _achat_stream_with_aggregation(
702
713
messages : list [BaseMessage ],
703
714
stop : Optional [list [str ]] = None ,
704
715
run_manager : Optional [AsyncCallbackManagerForLLMRun ] = None ,
705
- verbose : bool = False ,
716
+ verbose : bool = False , # noqa: FBT001, FBT002
706
717
** kwargs : Any ,
707
718
) -> ChatGenerationChunk :
708
719
final_chunk = None
@@ -718,7 +729,8 @@ async def _achat_stream_with_aggregation(
718
729
verbose = verbose ,
719
730
)
720
731
if final_chunk is None :
721
- raise ValueError ("No data received from Ollama stream." )
732
+ msg = "No data received from Ollama stream."
733
+ raise ValueError (msg )
722
734
723
735
return final_chunk
724
736
@@ -908,7 +920,7 @@ def bind_tools(
908
920
self ,
909
921
tools : Sequence [Union [dict [str , Any ], type , Callable , BaseTool ]],
910
922
* ,
911
- tool_choice : Optional [Union [dict , str , Literal ["auto" , "any" ], bool ]] = None ,
923
+ tool_choice : Optional [Union [dict , str , Literal ["auto" , "any" ], bool ]] = None , # noqa: PYI051
912
924
** kwargs : Any ,
913
925
) -> Runnable [LanguageModelInput , BaseMessage ]:
914
926
"""Bind tool-like objects to this chat model.
@@ -923,7 +935,7 @@ def bind_tools(
923
935
is currently ignored as it is not supported by Ollama.**
924
936
kwargs: Any additional parameters are passed directly to
925
937
``self.bind(**kwargs)``.
926
- """ # noqa: E501
938
+ """
927
939
formatted_tools = [convert_to_openai_tool (tool ) for tool in tools ]
928
940
return super ().bind (tools = formatted_tools , ** kwargs )
929
941
@@ -1180,14 +1192,16 @@ class AnswerWithJustification(BaseModel):
1180
1192
""" # noqa: E501, D301
1181
1193
_ = kwargs .pop ("strict" , None )
1182
1194
if kwargs :
1183
- raise ValueError (f"Received unsupported arguments { kwargs } " )
1195
+ msg = f"Received unsupported arguments { kwargs } "
1196
+ raise ValueError (msg )
1184
1197
is_pydantic_schema = _is_pydantic_class (schema )
1185
1198
if method == "function_calling" :
1186
1199
if schema is None :
1187
- raise ValueError (
1200
+ msg = (
1188
1201
"schema must be specified when method is not 'json_mode'. "
1189
1202
"Received None."
1190
1203
)
1204
+ raise ValueError (msg )
1191
1205
formatted_tool = convert_to_openai_tool (schema )
1192
1206
tool_name = formatted_tool ["function" ]["name" ]
1193
1207
llm = self .bind_tools (
@@ -1222,10 +1236,11 @@ class AnswerWithJustification(BaseModel):
1222
1236
)
1223
1237
elif method == "json_schema" :
1224
1238
if schema is None :
1225
- raise ValueError (
1239
+ msg = (
1226
1240
"schema must be specified when method is not 'json_mode'. "
1227
1241
"Received None."
1228
1242
)
1243
+ raise ValueError (msg )
1229
1244
if is_pydantic_schema :
1230
1245
schema = cast (TypeBaseModel , schema )
1231
1246
if issubclass (schema , BaseModelV1 ):
@@ -1259,10 +1274,11 @@ class AnswerWithJustification(BaseModel):
1259
1274
)
1260
1275
output_parser = JsonOutputParser ()
1261
1276
else :
1262
- raise ValueError (
1277
+ msg = (
1263
1278
f"Unrecognized method argument. Expected one of 'function_calling', "
1264
1279
f"'json_schema', or 'json_mode'. Received: '{ method } '"
1265
1280
)
1281
+ raise ValueError (msg )
1266
1282
1267
1283
if include_raw :
1268
1284
parser_assign = RunnablePassthrough .assign (
0 commit comments