-
Notifications
You must be signed in to change notification settings - Fork 2.7k
Fix #1564 Add conversations API support #1587
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 13 commits
e4b7816
f54bdb0
32e7c57
c919f6e
be053ff
6b5277f
ab847ff
8577c17
038ea00
1f24ff5
e9fcdd0
0f1c99d
d9cd39b
c225c55
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
""" | ||
Example demonstrating session memory functionality. | ||
|
||
This example shows how to use session memory to maintain conversation history | ||
across multiple agent runs without manually handling .to_input_list(). | ||
""" | ||
|
||
import asyncio | ||
|
||
from agents import Agent, OpenAIConversationsSession, Runner | ||
|
||
|
||
async def main(): | ||
# Create an agent | ||
agent = Agent( | ||
name="Assistant", | ||
instructions="Reply very concisely.", | ||
) | ||
|
||
# Create a session instance that will persist across runs | ||
session = OpenAIConversationsSession() | ||
|
||
print("=== Session Example ===") | ||
print("The agent will remember previous messages automatically.\n") | ||
|
||
# First turn | ||
print("First turn:") | ||
print("User: What city is the Golden Gate Bridge in?") | ||
result = await Runner.run( | ||
agent, | ||
"What city is the Golden Gate Bridge in?", | ||
session=session, | ||
) | ||
print(f"Assistant: {result.final_output}") | ||
print() | ||
|
||
# Second turn - the agent will remember the previous conversation | ||
print("Second turn:") | ||
print("User: What state is it in?") | ||
result = await Runner.run(agent, "What state is it in?", session=session) | ||
print(f"Assistant: {result.final_output}") | ||
print() | ||
|
||
# Third turn - continuing the conversation | ||
print("Third turn:") | ||
print("User: What's the population of that state?") | ||
result = await Runner.run( | ||
agent, | ||
"What's the population of that state?", | ||
session=session, | ||
) | ||
print(f"Assistant: {result.final_output}") | ||
print() | ||
|
||
print("=== Conversation Complete ===") | ||
print("Notice how the agent remembered the context from previous turns!") | ||
print("Sessions automatically handles conversation history.") | ||
|
||
# Demonstrate the limit parameter - get only the latest 2 items | ||
print("\n=== Latest Items Demo ===") | ||
latest_items = await session.get_items(limit=2) | ||
# print(latest_items) | ||
print("Latest 2 items:") | ||
for i, msg in enumerate(latest_items, 1): | ||
role = msg.get("role", "unknown") | ||
content = msg.get("content", "") | ||
print(f" {i}. {role}: {content}") | ||
|
||
print(f"\nFetched {len(latest_items)} out of total conversation history.") | ||
|
||
# Get all items to show the difference | ||
all_items = await session.get_items() | ||
# print(all_items) | ||
print(f"Total items in session: {len(all_items)}") | ||
|
||
|
||
if __name__ == "__main__": | ||
asyncio.run(main()) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,10 @@ | ||
from .session import Session, SQLiteSession | ||
from .openai_conversations_session import OpenAIConversationsSession | ||
from .session import Session, SessionABC | ||
from .sqlite_session import SQLiteSession | ||
|
||
__all__ = ["Session", "SQLiteSession"] | ||
__all__ = [ | ||
"Session", | ||
"SessionABC", | ||
"SQLiteSession", | ||
"OpenAIConversationsSession", | ||
] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
from __future__ import annotations | ||
|
||
from openai import AsyncOpenAI | ||
|
||
from agents.models._openai_shared import get_default_openai_client | ||
|
||
from ..items import TResponseInputItem | ||
from .session import SessionABC | ||
|
||
|
||
async def start_openai_conversations_session(openai_client: AsyncOpenAI | None = None) -> str: | ||
_maybe_openai_client = openai_client | ||
if openai_client is None: | ||
_maybe_openai_client = get_default_openai_client() or AsyncOpenAI() | ||
# this never be None here | ||
_openai_client: AsyncOpenAI = _maybe_openai_client # type: ignore [assignment] | ||
|
||
response = await _openai_client.conversations.create(items=[]) | ||
return response.id | ||
|
||
|
||
_EMPTY_SESSION_ID = "" | ||
|
||
|
||
class OpenAIConversationsSession(SessionABC): | ||
def __init__( | ||
self, | ||
*, | ||
session_id: str | None = None, | ||
|
||
openai_client: AsyncOpenAI | None = None, | ||
): | ||
# this implementation allows to set this value later | ||
self.session_id = session_id or _EMPTY_SESSION_ID | ||
|
||
_openai_client = openai_client | ||
if _openai_client is None: | ||
_openai_client = get_default_openai_client() or AsyncOpenAI() | ||
# this never be None here | ||
self.openai_client: AsyncOpenAI = _openai_client | ||
|
||
|
||
async def _ensure_session_id(self) -> None: | ||
if self.session_id == _EMPTY_SESSION_ID: | ||
self.session_id = await start_openai_conversations_session(self.openai_client) | ||
|
||
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]: | ||
await self._ensure_session_id() | ||
|
||
all_items = [] | ||
if limit is None: | ||
async for item in self.openai_client.conversations.items.list( | ||
conversation_id=self.session_id, | ||
order="asc", | ||
): | ||
# calling model_dump() to make this serializable | ||
all_items.append(item.model_dump()) | ||
else: | ||
async for item in self.openai_client.conversations.items.list( | ||
conversation_id=self.session_id, | ||
limit=limit, | ||
order="desc", | ||
): | ||
# calling model_dump() to make this serializable | ||
all_items.append(item.model_dump()) | ||
if limit is not None and len(all_items) >= limit: | ||
break | ||
all_items.reverse() | ||
|
||
return all_items # type: ignore | ||
|
||
async def add_items(self, items: list[TResponseInputItem]) -> None: | ||
await self._ensure_session_id() | ||
await self.openai_client.conversations.items.create( | ||
conversation_id=self.session_id, | ||
items=items, | ||
) | ||
|
||
async def pop_item(self) -> TResponseInputItem | None: | ||
await self._ensure_session_id() | ||
items = await self.get_items(limit=1) | ||
if not items: | ||
return None | ||
item_id: str = str(items[0]["id"]) # type: ignore [typeddict-item] | ||
await self.openai_client.conversations.items.delete( | ||
conversation_id=self.session_id, item_id=item_id | ||
) | ||
return items[0] | ||
|
||
async def clear_session(self) -> None: | ||
await self._ensure_session_id() | ||
await self.openai_client.conversations.delete( | ||
conversation_id=self.session_id, | ||
) | ||
self.session_id = _EMPTY_SESSION_ID |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
moved to memory dir and made the code consistent with others