Skip to content

Commit 57e5a0e

Browse files
author
Lucas Alencar Xisto
committed
docs(examples): add async streaming demos for Responses and Chat Completions
1 parent 4a8456a commit 57e5a0e

File tree

2 files changed

+43
-0
lines changed

2 files changed

+43
-0
lines changed

examples/async_chat_stream.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
2+
#!/usr/bin/env -S rye run python
3+
import asyncio
4+
from openai import AsyncOpenAI
5+
6+
client = AsyncOpenAI()
7+
8+
async def main() -> None:
9+
# Chat Completions is still supported indefinitely, but it's no longer the primary API.
10+
# This example remains useful for users who still rely on chat.completions.
11+
stream = await client.chat.completions.create(
12+
model="gpt-4o-mini",
13+
messages=[{"role": "user", "content": "Say this is a test (streaming)."}],
14+
stream=True,
15+
)
16+
async for chunk in stream:
17+
# Some users prefer accessing delta objects; for demo purposes, printing the chunk is enough.
18+
print(chunk)
19+
20+
if __name__ == "__main__":
21+
asyncio.run(main())
22+

examples/async_responses_stream.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
2+
#!/usr/bin/env -S rye run python
3+
import asyncio
4+
from openai import AsyncOpenAI
5+
6+
client = AsyncOpenAI()
7+
8+
async def main() -> None:
9+
# Async streaming with the Responses API (the recommended primary API).
10+
stream = await client.responses.create(
11+
model="gpt-4o-mini",
12+
input="Write a one-sentence bedtime story about a unicorn.",
13+
stream=True,
14+
)
15+
async for event in stream:
16+
# Each event may contain deltas and final results; printing directly is sufficient for demo purposes.
17+
print(event)
18+
19+
if __name__ == "__main__":
20+
asyncio.run(main())
21+

0 commit comments

Comments
 (0)