Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changeset/heavy-foxes-sit.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
'@openai/agents-core': patch
---

fix: improve the compatibility for conversationId / previousResponseId + tool calls

ref: https://github.com/openai/openai-agents-python/pull/1827
28 changes: 28 additions & 0 deletions docs/src/content/docs/guides/running-agents.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import helloWorldWithRunnerExample from '../../../../../examples/docs/hello-worl
import helloWorldExample from '../../../../../examples/docs/hello-world.ts?raw';
import runningAgentsExceptionExample from '../../../../../examples/docs/running-agents/exceptions1.ts?raw';
import chatLoopExample from '../../../../../examples/docs/running-agents/chatLoop.ts?raw';
import conversationIdExample from '../../../../../examples/docs/running-agents/conversationId.ts?raw';
import previousResponseIdExample from '../../../../../examples/docs/running-agents/previousResponseId.ts?raw';

Agents do nothing by themselves – you **run** them with the `Runner` class or the `run()` utility.

Expand Down Expand Up @@ -95,6 +97,32 @@ Each call to `runner.run()` (or `run()` utility) represents one **turn** in your

See [the chat example](https://github.com/openai/openai-agents-js/tree/main/examples/basic/chat.ts) for an interactive version.

### Server-managed conversations

You can let the OpenAI Responses API persist conversation history for you instead of sending your entire local transcript on every turn. This is useful when you are coordinating long conversations or multiple services. See the [Conversation state guide](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses) for details.

OpenAI exposes two ways to reuse server-side state:

#### 1. `conversationId` for an entire conversation

You can create a conversation once using [Conversations API](https://platform.openai.com/docs/api-reference/conversations/create) and then reuse its ID for every turn. The SDK automatically includes only the newly generated items.

<Code
lang="typescript"
code={conversationIdExample}
title="Reusing a server conversation"
/>

#### 2. `previousResponseId` to continue from the last turn

If you want to start only with Responses API anyway, you can chain each request using the ID returned from the previous response. This keeps the context alive across turns without creating a full conversation resource.

<Code
lang="typescript"
code={previousResponseIdExample}
title="Chaining with previousResponseId"
/>

## Exceptions

The SDK throws a small set of errors you can catch:
Expand Down
25 changes: 25 additions & 0 deletions examples/docs/running-agents/conversationId.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import { Agent, run } from '@openai/agents';
import { OpenAI } from 'openai';

const agent = new Agent({
name: 'Assistant',
instructions: 'Reply very concisely.',
});

async function main() {
// Create a server-managed conversation:
const client = new OpenAI();
const { id: conversationId } = await client.conversations.create({});

const first = await run(agent, 'What city is the Golden Gate Bridge in?', {
conversationId,
});
console.log(first.finalOutput);
// -> "San Francisco"

const second = await run(agent, 'What state is it in?', { conversationId });
console.log(second.finalOutput);
// -> "California"
}

main().catch(console.error);
21 changes: 21 additions & 0 deletions examples/docs/running-agents/previousResponseId.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import { Agent, run } from '@openai/agents';

const agent = new Agent({
name: 'Assistant',
instructions: 'Reply very concisely.',
});

async function main() {
const first = await run(agent, 'What city is the Golden Gate Bridge in?');
console.log(first.finalOutput);
// -> "San Francisco"

const previousResponseId = first.lastResponseId;
const second = await run(agent, 'What state is it in?', {
previousResponseId,
});
console.log(second.finalOutput);
// -> "California"
}

main().catch(console.error);
166 changes: 151 additions & 15 deletions packages/agents-core/src/run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -178,19 +178,114 @@ export function getTracing(
return 'enabled_without_data';
}

function toAgentInputList(
originalInput: string | AgentInputItem[],
): AgentInputItem[] {
if (typeof originalInput === 'string') {
return [{ type: 'message', role: 'user', content: originalInput }];
}

return [...originalInput];
}

/**
* Internal module for tracking the items in turns and ensuring that we don't send duplicate items.
* This logic is vital for properly handling the items to send during multiple turns
* when you use either `conversationId` or `previousResponseId`.
* Both scenarios expect an agent loop to send only new items for each Responses API call.
*
* see also: https://platform.openai.com/docs/guides/conversation-state?api-mode=responses
*/
class ServerConversationTracker {
// Conversation ID:
// - https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#using-the-conversations-api
// - https://platform.openai.com/docs/api-reference/conversations/create
public conversationId?: string;

// Previous Response ID:
// https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#passing-context-from-the-previous-response
public previousResponseId?: string;

// Using this flag because WeakSet does not provide a way to check its size
private sentInitialInput = false;
// The items already sent to the model; using WeakSet for memory efficiency
private sentItems = new WeakSet<object>();
// The items received from the server; using WeakSet for memory efficiency
private serverItems = new WeakSet<object>();

constructor({
conversationId,
previousResponseId,
}: {
conversationId?: string;
previousResponseId?: string;
}) {
this.conversationId = conversationId ?? undefined;
this.previousResponseId = previousResponseId ?? undefined;
}

trackServerItems(modelResponse: ModelResponse | undefined) {
if (!modelResponse) {
return;
}
for (const item of modelResponse.output) {
if (item && typeof item === 'object') {
this.serverItems.add(item);
}
}
if (
!this.conversationId &&
this.previousResponseId !== undefined &&
modelResponse.responseId
) {
this.previousResponseId = modelResponse.responseId;
}
}

prepareInput(
originalInput: string | AgentInputItem[],
generatedItems: RunItem[],
): AgentInputItem[] {
const inputItems: AgentInputItem[] = [];

if (!this.sentInitialInput) {
const initialItems = toAgentInputList(originalInput);
for (const item of initialItems) {
inputItems.push(item);
if (item && typeof item === 'object') {
this.sentItems.add(item);
}
}
this.sentInitialInput = true;
}

for (const item of generatedItems) {
if (item.type === 'tool_approval_item') {
continue;
}
const rawItem = item.rawItem;
if (!rawItem || typeof rawItem !== 'object') {
continue;
}
if (this.sentItems.has(rawItem) || this.serverItems.has(rawItem)) {
continue;
}
inputItems.push(rawItem as AgentInputItem);
this.sentItems.add(rawItem);
}

return inputItems;
}
}

export function getTurnInput(
originalInput: string | AgentInputItem[],
generatedItems: RunItem[],
): AgentInputItem[] {
const rawItems = generatedItems
.filter((item) => item.type !== 'tool_approval_item') // don't include approval items to avoid double function calls
.map((item) => item.rawItem);

if (typeof originalInput === 'string') {
originalInput = [{ type: 'message', role: 'user', content: originalInput }];
}

return [...originalInput, ...rawItems];
return [...toAgentInputList(originalInput), ...rawItems];
}

/**
Expand Down Expand Up @@ -254,6 +349,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
options.maxTurns ?? DEFAULT_MAX_TURNS,
);

const serverConversationTracker =
options.conversationId || options.previousResponseId
? new ServerConversationTracker({
conversationId: options.conversationId,
previousResponseId: options.previousResponseId,
})
: undefined;

try {
while (true) {
const explictlyModelSet =
Expand Down Expand Up @@ -355,10 +458,12 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
await this.#runInputGuardrails(state);
}

const turnInput = getTurnInput(
state._originalInput,
state._generatedItems,
);
const turnInput = serverConversationTracker
? serverConversationTracker.prepareInput(
state._originalInput,
state._generatedItems,
)
: getTurnInput(state._originalInput, state._generatedItems);

if (state._noActiveAgentRun) {
state._currentAgent.emit(
Expand All @@ -385,14 +490,21 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
state._toolUseTracker,
modelSettings,
);
const previousResponseId =
serverConversationTracker?.previousResponseId ??
options.previousResponseId;
const conversationId =
serverConversationTracker?.conversationId ??
options.conversationId;

state._lastTurnResponse = await model.getResponse({
systemInstructions: await state._currentAgent.getSystemPrompt(
state._context,
),
prompt: await state._currentAgent.getPrompt(state._context),
input: turnInput,
previousResponseId: options.previousResponseId,
conversationId: options.conversationId,
previousResponseId,
conversationId,
modelSettings,
tools: serializedTools,
outputType: convertAgentOutputTypeToSerializable(
Expand All @@ -409,6 +521,10 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
state._context.usage.add(state._lastTurnResponse.usage);
state._noActiveAgentRun = false;

serverConversationTracker?.trackServerItems(
state._lastTurnResponse,
);

const processedResponse = processModelResponse(
state._lastTurnResponse,
state._currentAgent,
Expand Down Expand Up @@ -623,6 +739,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
result: StreamedRunResult<TContext, TAgent>,
options: StreamRunOptions<TContext>,
): Promise<void> {
const serverConversationTracker =
options.conversationId || options.previousResponseId
? new ServerConversationTracker({
conversationId: options.conversationId,
previousResponseId: options.previousResponseId,
})
: undefined;

try {
while (true) {
const currentAgent = result.state._currentAgent;
Expand Down Expand Up @@ -739,7 +863,12 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
modelSettings,
);

const turnInput = getTurnInput(result.input, result.newItems);
const turnInput = serverConversationTracker
? serverConversationTracker.prepareInput(
result.input,
result.newItems,
)
: getTurnInput(result.input, result.newItems);

if (result.state._noActiveAgentRun) {
currentAgent.emit(
Expand All @@ -752,14 +881,20 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {

let finalResponse: ModelResponse | undefined = undefined;

const previousResponseId =
serverConversationTracker?.previousResponseId ??
options.previousResponseId;
const conversationId =
serverConversationTracker?.conversationId ?? options.conversationId;

for await (const event of model.getStreamedResponse({
systemInstructions: await currentAgent.getSystemPrompt(
result.state._context,
),
prompt: await currentAgent.getPrompt(result.state._context),
input: turnInput,
previousResponseId: options.previousResponseId,
conversationId: options.conversationId,
previousResponseId,
conversationId,
modelSettings,
tools: serializedTools,
handoffs: serializedHandoffs,
Expand Down Expand Up @@ -798,6 +933,7 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
}

result.state._lastTurnResponse = finalResponse;
serverConversationTracker?.trackServerItems(finalResponse);
result.state._modelResponses.push(result.state._lastTurnResponse);

const processedResponse = processModelResponse(
Expand Down
Loading