@@ -159,8 +159,7 @@ class ChatGroq(BaseChatModel):
159
159
.. code-block:: python
160
160
161
161
messages = [
162
- ("system", "You are a helpful translator. Translate the user
163
- sentence to French."),
162
+ ("system", "You are a helpful translator. Translate the user sentence to French."),
164
163
("human", "I love programming."),
165
164
]
166
165
llm.invoke(messages)
@@ -244,14 +243,12 @@ class ChatGroq(BaseChatModel):
244
243
class GetWeather(BaseModel):
245
244
'''Get the current weather in a given location'''
246
245
247
- location: str = Field(..., description="The city and state,
248
- e.g. San Francisco, CA")
246
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
249
247
250
248
class GetPopulation(BaseModel):
251
249
'''Get the current population in a given location'''
252
250
253
- location: str = Field(..., description="The city and state,
254
- e.g. San Francisco, CA")
251
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
255
252
256
253
model_with_tools = llm.bind_tools([GetWeather, GetPopulation])
257
254
ai_msg = model_with_tools.invoke("What is the population of NY?")
@@ -277,16 +274,14 @@ class Joke(BaseModel):
277
274
278
275
setup: str = Field(description="The setup of the joke")
279
276
punchline: str = Field(description="The punchline to the joke")
280
- rating: Optional[int] = Field(description="How funny the joke
281
- is, from 1 to 10")
277
+ rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
282
278
283
279
structured_model = llm.with_structured_output(Joke)
284
280
structured_model.invoke("Tell me a joke about cats")
285
281
286
282
.. code-block:: python
287
283
288
- Joke(setup="Why don't cats play poker in the jungle?",
289
- punchline='Too many cheetahs!', rating=None)
284
+ Joke(setup="Why don't cats play poker in the jungle?", punchline='Too many cheetahs!', rating=None)
290
285
291
286
See ``ChatGroq.with_structured_output()`` for more.
292
287
@@ -309,7 +304,7 @@ class Joke(BaseModel):
309
304
'system_fingerprint': 'fp_c5f20b5bb1',
310
305
'finish_reason': 'stop',
311
306
'logprobs': None}
312
- """
307
+ """ # noqa: E501
313
308
314
309
client : Any = Field (default = None , exclude = True ) #: :meta private:
315
310
async_client : Any = Field (default = None , exclude = True ) #: :meta private:
@@ -834,7 +829,7 @@ def bind_tools(
834
829
"auto" to automatically determine which function to call
835
830
with the option to not call any function, "any" to enforce that some
836
831
function is called, or a dict of the form:
837
- {"type": "function", "function": {"name": <<tool_name>>}}.
832
+ `` {"type": "function", "function": {"name": <<tool_name>>}}`` .
838
833
**kwargs: Any additional parameters to pass to the
839
834
:class:`~langchain.runnable.Runnable` constructor.
840
835
@@ -876,10 +871,12 @@ def with_structured_output(
876
871
Args:
877
872
schema:
878
873
The output schema. Can be passed in as:
879
- - an OpenAI function/tool schema,
880
- - a JSON Schema,
881
- - a TypedDict class (supported added in 0.1.9),
882
- - or a Pydantic class.
874
+
875
+ - an OpenAI function/tool schema,
876
+ - a JSON Schema,
877
+ - a TypedDict class (supported added in 0.1.9),
878
+ - or a Pydantic class.
879
+
883
880
If ``schema`` is a Pydantic class then the model output will be a
884
881
Pydantic instance of that class, and the model-generated fields will be
885
882
validated by the Pydantic class. Otherwise the model output will be a
@@ -891,19 +888,27 @@ def with_structured_output(
891
888
892
889
Added support for TypedDict class.
893
890
method:
894
- The method for steering model generation, either " function_calling"
895
- or " json_mode" . If " function_calling" then the schema will be converted
891
+ The method for steering model generation, either ``' function_calling'``
892
+ or ``' json_mode'`` . If ``' function_calling'`` then the schema will be converted
896
893
to an OpenAI function and the returned model will make use of the
897
- function-calling API. If "json_mode" then OpenAI's JSON mode will be
898
- used. Note that if using "json_mode" then you must include instructions
899
- for formatting the output into the desired schema into the model call.
894
+ function-calling API. If ``'json_mode'`` then OpenAI's JSON mode will be
895
+ used.
896
+
897
+ .. note::
898
+ If using ``'json_mode'`` then you must include instructions for formatting
899
+ the output into the desired schema into the model call. (either via the
900
+ prompt itself or in the system message/prompt/instructions).
901
+
902
+ .. warning::
903
+ ``'json_mode'`` does not support streaming responses stop sequences.
904
+
900
905
include_raw:
901
906
If False then only the parsed structured output is returned. If
902
907
an error occurs during model output parsing it will be raised. If True
903
908
then both the raw model response (a BaseMessage) and the parsed model
904
909
response will be returned. If an error occurs during output parsing it
905
910
will be caught and returned as well. The final output is always a dict
906
- with keys " raw", " parsed" , and " parsing_error" .
911
+ with keys ``' raw'``, ``' parsed'`` , and ``' parsing_error'`` .
907
912
kwargs:
908
913
Any additional parameters to pass to the
909
914
:class:`~langchain.runnable.Runnable` constructor.
@@ -917,9 +922,10 @@ def with_structured_output(
917
922
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
918
923
919
924
If ``include_raw`` is True, then Runnable outputs a dict with keys:
920
- - ``"raw"``: BaseMessage
921
- - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
922
- - ``"parsing_error"``: Optional[BaseException]
925
+
926
+ - ``"raw"``: BaseMessage
927
+ - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
928
+ - ``"parsing_error"``: Optional[BaseException]
923
929
924
930
Example: schema=Pydantic class, method="function_calling", include_raw=False:
925
931
.. code-block:: python
0 commit comments