Skip to content

Commit e3b8739

Browse files
Merge pull request #5249 from santiagxf/santiagxf-patch-1
Update python.md
2 parents 3296107 + e05a831 commit e3b8739

File tree

1 file changed

+8
-8
lines changed
  • articles/ai-foundry/model-inference/includes/use-chat-reasoning

1 file changed

+8
-8
lines changed

articles/ai-foundry/model-inference/includes/use-chat-reasoning/python.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ import os
3636
from openai import AzureOpenAI
3737

3838
client = AzureOpenAI(
39-
azure_endpoint = "https://<resource>.services.ai.azure.com"
39+
azure_endpoint = "https://<resource>.services.ai.azure.com",
4040
api_key=os.getenv("AZURE_INFERENCE_CREDENTIAL"),
4141
api_version="2024-10-21",
4242
)
@@ -71,7 +71,7 @@ token_provider = get_bearer_token_provider(
7171
)
7272

7373
client = AzureOpenAI(
74-
azure_endpoint = "https://<resource>.services.ai.azure.com"
74+
azure_endpoint = "https://<resource>.services.ai.azure.com",
7575
azure_ad_token_provider=token_provider,
7676
api_version="2024-10-21",
7777
)
@@ -108,8 +108,6 @@ response = client.chat.completions.create(
108108
{"role": "user", "content": "How many languages are in the world?"}
109109
]
110110
)
111-
112-
print(response.model_dump_json(indent=2)
113111
```
114112

115113
# [Model Inference API (preview)](#tab/inference)
@@ -255,15 +253,17 @@ def print_stream(completion):
255253
for event in completion:
256254
if event.choices:
257255
content = event.choices[0].delta.content
258-
reasoning_content = event.choices[0].delta.reasoning_content
259-
if reasoning_content:
256+
reasoning_content = event.choices[0].delta.reasoning_content if hasattr(event.choices[0].delta, "reasoning_content") else None
257+
if reasoning_content and not is_thinking:
260258
is_thinking = True
261259
print("🧠 Thinking...", end="", flush=True)
262260
elif content:
263261
if is_thinking:
264262
is_thinking = False
265263
print("🛑\n\n")
266-
print(content, end="", flush=True)
264+
print(content or reasoning_content, end="", flush=True)
265+
266+
print_stream(response)
267267
```
268268

269269
# [Model Inference API (preview)](#tab/inference)
@@ -294,7 +294,7 @@ You can visualize how streaming generates content:
294294

295295

296296
```python
297-
print_stream(result)
297+
print_stream(response)
298298
```
299299

300300
### Parameters

0 commit comments

Comments
 (0)