Skip to content

Commit 71e6005

Browse files
authored
Add basic Agent streaming sample (#44251)
1 parent abb8123 commit 71e6005

File tree

6 files changed

+158
-117
lines changed

6 files changed

+158
-117
lines changed
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
DESCRIPTION:
8+
This sample demonstrates how to run basic Prompt Agent operations
9+
using the synchronous AIProjectClient and OpenAI clients. The response
10+
is streamed by setting `stream=True` in the `.responses.create` call.
11+
12+
The OpenAI compatible Responses and Conversation calls in this sample are made using
13+
the OpenAI client from the `openai` package. See https://platform.openai.com/docs/api-reference
14+
for more information.
15+
16+
USAGE:
17+
python sample_agent_stream_events.py
18+
19+
Before running the sample:
20+
21+
pip install "azure-ai-projects>=2.0.0b1" python-dotenv
22+
23+
Set these environment variables with your own values:
24+
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
25+
page of your Microsoft Foundry portal.
26+
2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in
27+
the "Models + endpoints" tab in your Microsoft Foundry project.
28+
"""
29+
30+
import os
31+
from dotenv import load_dotenv
32+
from azure.identity import DefaultAzureCredential
33+
from azure.ai.projects import AIProjectClient
34+
from azure.ai.projects.models import PromptAgentDefinition
35+
36+
load_dotenv()
37+
38+
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
39+
40+
with (
41+
DefaultAzureCredential() as credential,
42+
AIProjectClient(endpoint=endpoint, credential=credential) as project_client,
43+
project_client.get_openai_client() as openai_client,
44+
):
45+
46+
agent = project_client.agents.create_version(
47+
agent_name="MyAgent",
48+
definition=PromptAgentDefinition(
49+
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
50+
instructions="You are a helpful assistant that answers general questions",
51+
),
52+
)
53+
print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})")
54+
55+
conversation = openai_client.conversations.create(
56+
items=[{"type": "message", "role": "user", "content": "Tell me about the capital city of France"}],
57+
)
58+
print(f"Created conversation with initial user message (id: {conversation.id})")
59+
60+
with openai_client.responses.create(
61+
conversation=conversation.id,
62+
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
63+
input="",
64+
stream=True,
65+
) as response_stream_events:
66+
67+
for event in response_stream_events:
68+
if event.type == "response.created":
69+
print(f"Stream response created with ID: {event.response.id}\n")
70+
elif event.type == "response.output_text.delta":
71+
print(event.delta, end="", flush=True)
72+
elif event.type == "response.text.done":
73+
print(f"\n\nResponse text done. Access final text in 'event.text'")
74+
elif event.type == "response.completed":
75+
print(f"\n\nResponse completed. Access final text in 'event.response.output_text'")
76+
77+
openai_client.conversations.delete(conversation_id=conversation.id)
78+
print("Conversation deleted")
79+
80+
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
81+
print("Agent deleted")
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
DESCRIPTION:
8+
This sample demonstrates how to run a basic streaming responses operation
9+
using OpenAI client `.responses.create()` method with `stream=True`.
10+
11+
See also https://platform.openai.com/docs/guides/streaming-responses?api-mode=responses&lang=python
12+
13+
Note also the alternative streaming approach shown in sample_responses_stream_manager.py.
14+
15+
USAGE:
16+
python sample_responses_stream_events.py
17+
18+
Before running the sample:
19+
20+
pip install "azure-ai-projects>=2.0.0b1" python-dotenv
21+
22+
Set these environment variables with your own values:
23+
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
24+
page of your Microsoft Foundry portal.
25+
2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in
26+
the "Models + endpoints" tab in your Microsoft Foundry project.
27+
"""
28+
29+
import os
30+
from dotenv import load_dotenv
31+
32+
from azure.identity import DefaultAzureCredential
33+
from azure.ai.projects import AIProjectClient
34+
35+
load_dotenv()
36+
37+
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
38+
39+
with (
40+
DefaultAzureCredential() as credential,
41+
AIProjectClient(endpoint=endpoint, credential=credential) as project_client,
42+
project_client.get_openai_client() as openai_client,
43+
):
44+
45+
with openai_client.responses.create(
46+
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
47+
input=[
48+
{"role": "user", "content": "Tell me about the capital city of France"},
49+
],
50+
stream=True,
51+
) as response_stream_events:
52+
53+
for event in response_stream_events:
54+
if event.type == "response.created":
55+
print(f"Stream response created with ID: {event.response.id}\n")
56+
elif event.type == "response.output_text.delta":
57+
print(event.delta, end="", flush=True)
58+
elif event.type == "response.text.done":
59+
print(f"\n\nResponse text done. Access final text in 'event.text'")
60+
elif event.type == "response.completed":
61+
print(f"\n\nResponse completed. Access final text in 'event.response.output_text'")

sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py

Lines changed: 15 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,15 @@
55

66
"""
77
DESCRIPTION:
8-
This sample demonstrates how to stream through responses.stream that returns a responses stream manager.
8+
This sample demonstrates how to run a basic streaming responses operation
9+
using OpenAI client `.responses.stream()` method.
10+
11+
See also https://platform.openai.com/docs/api-reference/responses/create?lang=python
12+
13+
Note also the alternative streaming approach shown in sample_responses_stream_events.py.
914
1015
USAGE:
11-
python sample_responses_stream_method.py
16+
python sample_responses_stream_manager.py
1217
1318
Before running the sample:
1419
@@ -23,7 +28,7 @@
2328

2429
import os
2530
from dotenv import load_dotenv
26-
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
31+
from azure.identity import DefaultAzureCredential
2732
from azure.ai.projects import AIProjectClient
2833

2934
load_dotenv()
@@ -35,36 +40,20 @@
3540
AIProjectClient(endpoint=endpoint, credential=credential) as project_client,
3641
project_client.get_openai_client() as openai_client,
3742
):
38-
# [START response_stream_method]
39-
response = openai_client.responses.create(
40-
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
41-
input=[
42-
{"role": "user", "content": "What is the size of France in square miles?"},
43-
],
44-
stream=False, # Create non-streaming response
45-
)
46-
47-
print(f"Initial response: {response.output_text}")
48-
print(f"Response ID: {response.id}")
4943

50-
# Now create a streaming version using the same input but with stream=True
51-
# This demonstrates an alternative approach since response.stream() may not be available
5244
with openai_client.responses.stream(
5345
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
5446
input=[
55-
{"role": "user", "content": "Now tell me about the capital city of France."},
47+
{"role": "user", "content": "Tell me about the capital city of France"},
5648
],
57-
previous_response_id=response.id, # Continue the conversation
58-
) as responses_stream_manager:
49+
) as response_stream_manager:
5950

60-
# Process streaming events as they arrive
61-
for event in responses_stream_manager:
51+
for event in response_stream_manager:
6252
if event.type == "response.created":
63-
print(f"Stream response created with ID: {event.response.id}")
53+
print(f"Stream response created with ID: {event.response.id}\n")
6454
elif event.type == "response.output_text.delta":
65-
print(f"Delta: {event.delta}")
55+
print(event.delta, end="", flush=True)
6656
elif event.type == "response.text.done":
67-
print(f"Response done with full message: {event.text}")
57+
print(f"\n\nResponse text done. Access final text in 'event.text'")
6858
elif event.type == "response.completed":
69-
print(f"Response completed with full message: {event.response.output_text}")
70-
# [END response_stream_method]
59+
print(f"\n\nResponse completed. Access final text in 'event.response.output_text'")

sdk/ai/azure-ai-projects/samples/responses/sample_responses_streaming.py

Lines changed: 0 additions & 89 deletions
This file was deleted.

sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -384,8 +384,6 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
384384
vector_store = openai_client.vector_stores.create(name="ResearchStore")
385385
print(f"Vector store created: {vector_store.id}")
386386

387-
388-
389387
file1 = BytesIO(doc1_content.encode("utf-8"))
390388
file1.name = "ml_healthcare.txt"
391389
file2 = BytesIO(doc2_content.encode("utf-8"))

sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from devtools_testutils.aio import recorded_by_proxy_async
1010
from devtools_testutils import RecordedTransport
1111

12+
1213
class TestResponsesAsync(TestBase):
1314

1415
# To run this test:

0 commit comments

Comments
 (0)