File tree Expand file tree Collapse file tree 1 file changed +6
-6
lines changed
articles/ai-foundry/model-inference/includes/use-chat-reasoning Expand file tree Collapse file tree 1 file changed +6
-6
lines changed Original file line number Diff line number Diff line change @@ -108,8 +108,6 @@ response = client.chat.completions.create(
108
108
{" role" : " user" , " content" : " How many languages are in the world?" }
109
109
]
110
110
)
111
-
112
- print (response.model_dump_json(indent = 2 )
113
111
```
114
112
115
113
# [ Model Inference API (preview)] ( #tab/inference )
@@ -255,15 +253,17 @@ def print_stream(completion):
255
253
for event in completion:
256
254
if event.choices:
257
255
content = event.choices[0 ].delta.content
258
- reasoning_content = event.choices[0 ].delta.reasoning_content
259
- if reasoning_content:
256
+ reasoning_content = event.choices[0 ].delta.reasoning_content if hasattr (event.choices[ 0 ].delta, " reasoning_content " ) else None
257
+ if reasoning_content and not is_thinking :
260
258
is_thinking = True
261
259
print (" 🧠 Thinking..." , end = " " , flush = True )
262
260
elif content:
263
261
if is_thinking:
264
262
is_thinking = False
265
263
print (" 🛑\n\n " )
266
- print (content, end = " " , flush = True )
264
+ print (content or reasoning_content, end = " " , flush = True )
265
+
266
+ print_stream(response)
267
267
```
268
268
269
269
# [ Model Inference API (preview)] ( #tab/inference )
@@ -294,7 +294,7 @@ You can visualize how streaming generates content:
294
294
295
295
296
296
``` python
297
- print_stream(result )
297
+ print_stream(response )
298
298
```
299
299
300
300
### Parameters
You can’t perform that action at this time.
0 commit comments