@@ -107,6 +107,12 @@ class ChatGroq(BaseChatModel):
107
107
Sampling temperature. Ranges from 0.0 to 1.0.
108
108
max_tokens: Optional[int]
109
109
Max number of tokens to generate.
110
+ reasoning_format: Optional[Literal["parsed", "raw", "hidden]]
111
+ The format for reasoning output.
112
+
113
+ - ``parsed``: Separates reasoning into a dedicated field while keeping the response concise.
114
+ - ``raw``: Includes reasoning within think tags in the content.
115
+ - ``hidden``: Returns only the final answer.
110
116
model_kwargs: Dict[str, Any]
111
117
Holds any model parameters valid for create call not
112
118
explicitly specified.
@@ -292,7 +298,7 @@ class Joke(BaseModel):
292
298
'system_fingerprint': 'fp_c5f20b5bb1',
293
299
'finish_reason': 'stop',
294
300
'logprobs': None}
295
- """
301
+ """ # noqa: E501
296
302
297
303
client : Any = Field (default = None , exclude = True ) #: :meta private:
298
304
async_client : Any = Field (default = None , exclude = True ) #: :meta private:
@@ -302,6 +308,13 @@ class Joke(BaseModel):
302
308
"""What sampling temperature to use."""
303
309
stop : Optional [Union [list [str ], str ]] = Field (default = None , alias = "stop_sequences" )
304
310
"""Default stop sequences."""
311
+ reasoning_format : Optional [Literal ["parsed" , "raw" , "hidden" ]] = None
312
+ """The format for reasoning output.
313
+
314
+ - ``parsed``: Separates reasoning into a dedicated field while keeping the response concise.
315
+ - ``raw``: Includes reasoning within think tags in the content.
316
+ - ``hidden``: Returns only the final answer.
317
+ """ # noqa: E501
305
318
model_kwargs : dict [str , Any ] = Field (default_factory = dict )
306
319
"""Holds any model parameters valid for `create` call not explicitly specified."""
307
320
groq_api_key : Optional [SecretStr ] = Field (
@@ -606,6 +619,7 @@ def _default_params(self) -> dict[str, Any]:
606
619
"n" : self .n ,
607
620
"temperature" : self .temperature ,
608
621
"stop" : self .stop ,
622
+ "reasoning_format" : self .reasoning_format ,
609
623
** self .model_kwargs ,
610
624
}
611
625
if self .max_tokens is not None :
@@ -1153,6 +1167,8 @@ def _convert_chunk_to_message_chunk(
1153
1167
if role == "user" or default_class == HumanMessageChunk :
1154
1168
return HumanMessageChunk (content = content )
1155
1169
elif role == "assistant" or default_class == AIMessageChunk :
1170
+ if reasoning := _dict .get ("reasoning" ):
1171
+ additional_kwargs ["reasoning_content" ] = reasoning
1156
1172
if usage := (chunk .get ("x_groq" ) or {}).get ("usage" ):
1157
1173
input_tokens = usage .get ("prompt_tokens" , 0 )
1158
1174
output_tokens = usage .get ("completion_tokens" , 0 )
@@ -1196,6 +1212,8 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
1196
1212
elif role == "assistant" :
1197
1213
content = _dict .get ("content" , "" ) or ""
1198
1214
additional_kwargs : dict = {}
1215
+ if reasoning := _dict .get ("reasoning" ):
1216
+ additional_kwargs ["reasoning_content" ] = reasoning
1199
1217
if function_call := _dict .get ("function_call" ):
1200
1218
additional_kwargs ["function_call" ] = dict (function_call )
1201
1219
tool_calls = []
0 commit comments