Skip to content

Commit 164acb5

Browse files
seratchrm-openai
andauthored
Fix #1564 Add conversations API support (#1587)
This pull request resolves #1564 --------- Co-authored-by: Rohan Mehta <rm@openai.com>
1 parent d7eeaf2 commit 164acb5

24 files changed

+591
-295
lines changed
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
"""
2+
Example demonstrating session memory functionality.
3+
4+
This example shows how to use session memory to maintain conversation history
5+
across multiple agent runs without manually handling .to_input_list().
6+
"""
7+
8+
import asyncio
9+
10+
from agents import Agent, OpenAIConversationsSession, Runner
11+
12+
13+
async def main():
14+
# Create an agent
15+
agent = Agent(
16+
name="Assistant",
17+
instructions="Reply very concisely.",
18+
)
19+
20+
# Create a session instance that will persist across runs
21+
session = OpenAIConversationsSession()
22+
23+
print("=== Session Example ===")
24+
print("The agent will remember previous messages automatically.\n")
25+
26+
# First turn
27+
print("First turn:")
28+
print("User: What city is the Golden Gate Bridge in?")
29+
result = await Runner.run(
30+
agent,
31+
"What city is the Golden Gate Bridge in?",
32+
session=session,
33+
)
34+
print(f"Assistant: {result.final_output}")
35+
print()
36+
37+
# Second turn - the agent will remember the previous conversation
38+
print("Second turn:")
39+
print("User: What state is it in?")
40+
result = await Runner.run(agent, "What state is it in?", session=session)
41+
print(f"Assistant: {result.final_output}")
42+
print()
43+
44+
# Third turn - continuing the conversation
45+
print("Third turn:")
46+
print("User: What's the population of that state?")
47+
result = await Runner.run(
48+
agent,
49+
"What's the population of that state?",
50+
session=session,
51+
)
52+
print(f"Assistant: {result.final_output}")
53+
print()
54+
55+
print("=== Conversation Complete ===")
56+
print("Notice how the agent remembered the context from previous turns!")
57+
print("Sessions automatically handles conversation history.")
58+
59+
# Demonstrate the limit parameter - get only the latest 2 items
60+
print("\n=== Latest Items Demo ===")
61+
latest_items = await session.get_items(limit=2)
62+
# print(latest_items)
63+
print("Latest 2 items:")
64+
for i, msg in enumerate(latest_items, 1):
65+
role = msg.get("role", "unknown")
66+
content = msg.get("content", "")
67+
print(f" {i}. {role}: {content}")
68+
69+
print(f"\nFetched {len(latest_items)} out of total conversation history.")
70+
71+
# Get all items to show the difference
72+
all_items = await session.get_items()
73+
# print(all_items)
74+
print(f"Total items in session: {len(all_items)}")
75+
76+
77+
if __name__ == "__main__":
78+
asyncio.run(main())

examples/basic/sqlalchemy_session_example.py renamed to examples/memory/sqlalchemy_session_example.py

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,28 +20,56 @@ async def main():
2020
create_tables=True,
2121
)
2222

23-
print("=== SQLAlchemySession Example ===")
23+
print("=== Session Example ===")
2424
print("The agent will remember previous messages automatically.\n")
2525

2626
# First turn
27+
print("First turn:")
2728
print("User: What city is the Golden Gate Bridge in?")
2829
result = await Runner.run(
2930
agent,
3031
"What city is the Golden Gate Bridge in?",
3132
session=session,
3233
)
33-
print(f"Assistant: {result.final_output}\n")
34+
print(f"Assistant: {result.final_output}")
35+
print()
3436

3537
# Second turn - the agent will remember the previous conversation
38+
print("Second turn:")
3639
print("User: What state is it in?")
40+
result = await Runner.run(agent, "What state is it in?", session=session)
41+
print(f"Assistant: {result.final_output}")
42+
print()
43+
44+
# Third turn - continuing the conversation
45+
print("Third turn:")
46+
print("User: What's the population of that state?")
3747
result = await Runner.run(
3848
agent,
39-
"What state is it in?",
49+
"What's the population of that state?",
4050
session=session,
4151
)
42-
print(f"Assistant: {result.final_output}\n")
52+
print(f"Assistant: {result.final_output}")
53+
print()
4354

4455
print("=== Conversation Complete ===")
56+
print("Notice how the agent remembered the context from previous turns!")
57+
print("Sessions automatically handles conversation history.")
58+
59+
# Demonstrate the limit parameter - get only the latest 2 items
60+
print("\n=== Latest Items Demo ===")
61+
latest_items = await session.get_items(limit=2)
62+
print("Latest 2 items:")
63+
for i, msg in enumerate(latest_items, 1):
64+
role = msg.get("role", "unknown")
65+
content = msg.get("content", "")
66+
print(f" {i}. {role}: {content}")
67+
68+
print(f"\nFetched {len(latest_items)} out of total conversation history.")
69+
70+
# Get all items to show the difference
71+
all_items = await session.get_items()
72+
print(f"Total items in session: {len(all_items)}")
4573

4674

4775
if __name__ == "__main__":

examples/reasoning_content/main.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ async def stream_with_reasoning_content():
4747
handoffs=[],
4848
tracing=ModelTracing.DISABLED,
4949
previous_response_id=None,
50+
conversation_id=None,
5051
prompt=None,
5152
):
5253
if event.type == "response.reasoning_summary_text.delta":
@@ -83,6 +84,7 @@ async def get_response_with_reasoning_content():
8384
handoffs=[],
8485
tracing=ModelTracing.DISABLED,
8586
previous_response_id=None,
87+
conversation_id=None,
8688
prompt=None,
8789
)
8890

src/agents/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
TResponseInputItem,
4747
)
4848
from .lifecycle import AgentHooks, RunHooks
49-
from .memory import Session, SQLiteSession
49+
from .memory import OpenAIConversationsSession, Session, SessionABC, SQLiteSession
5050
from .model_settings import ModelSettings
5151
from .models.interface import Model, ModelProvider, ModelTracing
5252
from .models.multi_provider import MultiProvider
@@ -221,7 +221,9 @@ def enable_verbose_stdout_logging():
221221
"RunHooks",
222222
"AgentHooks",
223223
"Session",
224+
"SessionABC",
224225
"SQLiteSession",
226+
"OpenAIConversationsSession",
225227
"RunContextWrapper",
226228
"TContext",
227229
"RunErrorDetails",

src/agents/extensions/models/litellm_model.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ async def get_response(
8282
output_schema: AgentOutputSchemaBase | None,
8383
handoffs: list[Handoff],
8484
tracing: ModelTracing,
85-
previous_response_id: str | None,
85+
previous_response_id: str | None = None, # unused
86+
conversation_id: str | None = None, # unused
8687
prompt: Any | None = None,
8788
) -> ModelResponse:
8889
with generation_span(
@@ -171,7 +172,8 @@ async def stream_response(
171172
output_schema: AgentOutputSchemaBase | None,
172173
handoffs: list[Handoff],
173174
tracing: ModelTracing,
174-
previous_response_id: str | None,
175+
previous_response_id: str | None = None, # unused
176+
conversation_id: str | None = None, # unused
175177
prompt: Any | None = None,
176178
) -> AsyncIterator[TResponseStreamEvent]:
177179
with generation_span(

src/agents/memory/__init__.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1-
from .session import Session, SQLiteSession
1+
from .openai_conversations_session import OpenAIConversationsSession
2+
from .session import Session, SessionABC
3+
from .sqlite_session import SQLiteSession
24

3-
__all__ = ["Session", "SQLiteSession"]
5+
__all__ = [
6+
"Session",
7+
"SessionABC",
8+
"SQLiteSession",
9+
"OpenAIConversationsSession",
10+
]
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
from __future__ import annotations
2+
3+
from openai import AsyncOpenAI
4+
5+
from agents.models._openai_shared import get_default_openai_client
6+
7+
from ..items import TResponseInputItem
8+
from .session import SessionABC
9+
10+
11+
async def start_openai_conversations_session(openai_client: AsyncOpenAI | None = None) -> str:
12+
_maybe_openai_client = openai_client
13+
if openai_client is None:
14+
_maybe_openai_client = get_default_openai_client() or AsyncOpenAI()
15+
# this never be None here
16+
_openai_client: AsyncOpenAI = _maybe_openai_client # type: ignore [assignment]
17+
18+
response = await _openai_client.conversations.create(items=[])
19+
return response.id
20+
21+
22+
_EMPTY_SESSION_ID = ""
23+
24+
25+
class OpenAIConversationsSession(SessionABC):
26+
def __init__(
27+
self,
28+
*,
29+
conversation_id: str | None = None,
30+
openai_client: AsyncOpenAI | None = None,
31+
):
32+
self._session_id: str | None = conversation_id
33+
_openai_client = openai_client
34+
if _openai_client is None:
35+
_openai_client = get_default_openai_client() or AsyncOpenAI()
36+
# this never be None here
37+
self._openai_client: AsyncOpenAI = _openai_client
38+
39+
async def _get_session_id(self) -> str:
40+
if self._session_id is None:
41+
self._session_id = await start_openai_conversations_session(self._openai_client)
42+
return self._session_id
43+
44+
async def _clear_session_id(self) -> None:
45+
self._session_id = None
46+
47+
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
48+
session_id = await self._get_session_id()
49+
all_items = []
50+
if limit is None:
51+
async for item in self._openai_client.conversations.items.list(
52+
conversation_id=session_id,
53+
order="asc",
54+
):
55+
# calling model_dump() to make this serializable
56+
all_items.append(item.model_dump())
57+
else:
58+
async for item in self._openai_client.conversations.items.list(
59+
conversation_id=session_id,
60+
limit=limit,
61+
order="desc",
62+
):
63+
# calling model_dump() to make this serializable
64+
all_items.append(item.model_dump())
65+
if limit is not None and len(all_items) >= limit:
66+
break
67+
all_items.reverse()
68+
69+
return all_items # type: ignore
70+
71+
async def add_items(self, items: list[TResponseInputItem]) -> None:
72+
session_id = await self._get_session_id()
73+
await self._openai_client.conversations.items.create(
74+
conversation_id=session_id,
75+
items=items,
76+
)
77+
78+
async def pop_item(self) -> TResponseInputItem | None:
79+
session_id = await self._get_session_id()
80+
items = await self.get_items(limit=1)
81+
if not items:
82+
return None
83+
item_id: str = str(items[0]["id"]) # type: ignore [typeddict-item]
84+
await self._openai_client.conversations.items.delete(
85+
conversation_id=session_id, item_id=item_id
86+
)
87+
return items[0]
88+
89+
async def clear_session(self) -> None:
90+
session_id = await self._get_session_id()
91+
await self._openai_client.conversations.delete(
92+
conversation_id=session_id,
93+
)
94+
await self._clear_session_id()

0 commit comments

Comments
 (0)