Skip to content

Commit 6553ddc

Browse files
authored
Narrow get_items once at return; remove per‑item casts and keep behavior unchanged.
- Replace multiple per-item casts in get_items by accumulating plain dicts and applying a single cast at the return so the function matches its annotated return type while keeping runtime behavior identical.​ - Retain a focused type ignore for item_id in pop_item because the TypedDict union does not guarantee an id key even though the API does, avoiding broader casts or schema changes in this small patch.​ - Preserve ordering, pagination, and session behavior; no public API changes, no control-flow changes, and no added dependencies, making the change safe and easy to review.
1 parent 7e5cbbf commit 6553ddc

File tree

1 file changed

+8
-7
lines changed

1 file changed

+8
-7
lines changed

src/agents/memory/openai_conversations_session.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import annotations
22

3-
from typing import cast
3+
from typing import Any, cast
44

55
from openai import AsyncOpenAI
66

@@ -46,27 +46,28 @@ async def _clear_session_id(self) -> None:
4646

4747
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
4848
session_id = await self._get_session_id()
49-
all_items: list[TResponseInputItem] = []
49+
all_items: list[dict[str, Any]] = []
5050
if limit is None:
5151
async for item in self._openai_client.conversations.items.list(
5252
conversation_id=session_id,
5353
order="asc",
5454
):
55-
# calling model_dump() to make this serializable
56-
all_items.append(cast(TResponseInputItem, item.model_dump(exclude_unset=True)))
55+
# model_dump for serialization; shape matches TResponseInputItem at runtime
56+
all_items.append(item.model_dump(exclude_unset=True))
5757
else:
5858
async for item in self._openai_client.conversations.items.list(
5959
conversation_id=session_id,
6060
limit=limit,
6161
order="desc",
6262
):
63-
# calling model_dump() to make this serializable
64-
all_items.append(cast(TResponseInputItem, item.model_dump(exclude_unset=True)))
63+
# model_dump for serialization; shape matches TResponseInputItem at runtime
64+
all_items.append(item.model_dump(exclude_unset=True))
6565
if limit is not None and len(all_items) >= limit:
6666
break
6767
all_items.reverse()
6868

69-
return all_items
69+
# The Conversations API guarantees this shape; narrow once for type checkers
70+
return cast(list[TResponseInputItem], all_items)
7071

7172
async def add_items(self, items: list[TResponseInputItem]) -> None:
7273
session_id = await self._get_session_id()

0 commit comments

Comments
 (0)