From 52b34fefde6189059c81e4452b8d8bf7dd89c845 Mon Sep 17 00:00:00 2001 From: alekramelaheehridoy Date: Wed, 3 Sep 2025 19:44:41 -0600 Subject: [PATCH 1/3] fix: resolve duplicate item error when using conversationId with tools When using the OpenAI Agents SDK with conversationId and tools, users encounter a 'Duplicate item found with id rs_xxx' error. This happens because the OpenAI Responses API automatically retrieves conversation history when conversationId is provided, while the SDK also sends the full conversation history, causing duplicate items. This fix modifies the getInputItems function to: - Accept an optional conversationId parameter - Filter input to only include the current turn when conversationId is present - Maintain full backward compatibility The solution identifies the current turn by finding the last user message and only includes items from that point onwards, preventing duplicates while preserving conversation context. Resolves the BadRequestError and enables proper use of conversationId with tool-enabled agents. --- .../agents-openai/src/openaiResponsesModel.ts | 31 +++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/packages/agents-openai/src/openaiResponsesModel.ts b/packages/agents-openai/src/openaiResponsesModel.ts index a8a3a001..7797c980 100644 --- a/packages/agents-openai/src/openaiResponsesModel.ts +++ b/packages/agents-openai/src/openaiResponsesModel.ts @@ -426,6 +426,7 @@ function getPrompt(prompt: ModelRequest['prompt']): function getInputItems( input: ModelRequest['input'], + conversationId?: string, ): OpenAI.Responses.ResponseInputItem[] { if (typeof input === 'string') { return [ @@ -436,7 +437,33 @@ function getInputItems( ]; } - return input.map((item) => { + // When using conversationId, the OpenAI Responses API automatically retrieves + // the conversation history. To avoid duplicate items with the same IDs, + // we need to filter out items that would already be present in the conversation. + // We keep only the items from the current turn (typically the last few items). + let filteredInput = input; + if (conversationId) { + // Find the last user message to identify the start of the current turn + let lastUserMessageIndex = -1; + for (let i = input.length - 1; i >= 0; i--) { + const item = input[i]; + if (isMessageItem(item) && item.role === 'user') { + lastUserMessageIndex = i; + break; + } + } + + // If we found a user message, only include items from that point onwards + // This represents the current turn's conversation + if (lastUserMessageIndex >= 0) { + filteredInput = input.slice(lastUserMessageIndex); + } else { + // If no user message found, include all items (fallback) + filteredInput = input; + } + } + + return filteredInput.map((item) => { if (isMessageItem(item)) { return getMessageItem(item); } @@ -847,7 +874,7 @@ export class OpenAIResponsesModel implements Model { ): Promise< Stream | OpenAI.Responses.Response > { - const input = getInputItems(request.input); + const input = getInputItems(request.input, request.conversationId); const { tools, include } = getTools(request.tools, request.handoffs); const toolChoice = getToolChoice(request.modelSettings.toolChoice); const { text, ...restOfProviderData } = From 00a14f2e16e4cf36995351b301affc2484995eb7 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Thu, 4 Sep 2025 20:27:37 +0900 Subject: [PATCH 2/3] Create gold-vans-complain.md --- .changeset/gold-vans-complain.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/gold-vans-complain.md diff --git a/.changeset/gold-vans-complain.md b/.changeset/gold-vans-complain.md new file mode 100644 index 00000000..e0a90beb --- /dev/null +++ b/.changeset/gold-vans-complain.md @@ -0,0 +1,5 @@ +--- +"@openai/agents-openai": patch +--- + +fix: resolve #425 duplicate item error when using conversationId with tools From 4f00b0597a8f868d7977f136e0a13cc4a9e8ce1d Mon Sep 17 00:00:00 2001 From: Al-Ekram Elahee Hridoy Date: Tue, 30 Sep 2025 20:03:33 -0600 Subject: [PATCH 3/3] fix(agents-openai): avoid duplicate items when using conversationId with tools --- .../agents-openai/src/openaiResponsesModel.ts | 68 +++++++++++++------ .../test/openaiResponsesModel.helpers.test.ts | 43 ++++++++++++ 2 files changed, 89 insertions(+), 22 deletions(-) diff --git a/packages/agents-openai/src/openaiResponsesModel.ts b/packages/agents-openai/src/openaiResponsesModel.ts index 7797c980..16652d2e 100644 --- a/packages/agents-openai/src/openaiResponsesModel.ts +++ b/packages/agents-openai/src/openaiResponsesModel.ts @@ -424,6 +424,48 @@ function getPrompt(prompt: ModelRequest['prompt']): }; } +type InputArray = Exclude; + +const RESPONSE_ITEM_ID_PREFIXES = ['rs_', 'resp_', 'res_', 'msg_'] as const; + +function hasStoredConversationMetadata(item: InputArray[number]): boolean { + if (!item || typeof item !== 'object') { + return false; + } + + const providerData = (item as { providerData?: Record }) + .providerData; + if (providerData && typeof providerData === 'object') { + const responseId = + (providerData['response_id'] as string | undefined) ?? + (providerData['responseId'] as string | undefined); + if (typeof responseId === 'string' && responseId.length > 0) { + return true; + } + } + + const id = (item as { id?: unknown }).id; + if (typeof id === 'string') { + for (const prefix of RESPONSE_ITEM_ID_PREFIXES) { + if (id.startsWith(prefix)) { + return true; + } + } + } + + return false; +} + +function getLastStoredIndex(items: InputArray): number { + for (let i = items.length - 1; i >= 0; i--) { + if (hasStoredConversationMetadata(items[i])) { + return i; + } + } + + return -1; +} + function getInputItems( input: ModelRequest['input'], conversationId?: string, @@ -437,29 +479,11 @@ function getInputItems( ]; } - // When using conversationId, the OpenAI Responses API automatically retrieves - // the conversation history. To avoid duplicate items with the same IDs, - // we need to filter out items that would already be present in the conversation. - // We keep only the items from the current turn (typically the last few items). - let filteredInput = input; + let filteredInput: InputArray = input; if (conversationId) { - // Find the last user message to identify the start of the current turn - let lastUserMessageIndex = -1; - for (let i = input.length - 1; i >= 0; i--) { - const item = input[i]; - if (isMessageItem(item) && item.role === 'user') { - lastUserMessageIndex = i; - break; - } - } - - // If we found a user message, only include items from that point onwards - // This represents the current turn's conversation - if (lastUserMessageIndex >= 0) { - filteredInput = input.slice(lastUserMessageIndex); - } else { - // If no user message found, include all items (fallback) - filteredInput = input; + const lastStoredIndex = getLastStoredIndex(input); + if (lastStoredIndex >= 0) { + filteredInput = input.slice(lastStoredIndex + 1); } } diff --git a/packages/agents-openai/test/openaiResponsesModel.helpers.test.ts b/packages/agents-openai/test/openaiResponsesModel.helpers.test.ts index 012eaafd..ea9d703d 100644 --- a/packages/agents-openai/test/openaiResponsesModel.helpers.test.ts +++ b/packages/agents-openai/test/openaiResponsesModel.helpers.test.ts @@ -279,6 +279,49 @@ describe('getInputItems', () => { ] as any), ).toThrow(UserError); }); + + it('excludes stored items when conversationId is provided', () => { + const items = getInputItems( + [ + { role: 'user', content: 'hi' }, + { + type: 'function_call', + id: 'rs_123', + name: 'tool', + callId: 'call_1', + arguments: '{}', + status: 'in_progress', + providerData: { response_id: 'resp_1' }, + }, + { + type: 'function_call_result', + callId: 'call_1', + status: 'completed', + output: { type: 'text', text: 'done' }, + }, + ] as any, + 'conv_123', + ); + + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: 'function_call_output', + output: 'done', + }); + }); + + it('retains new items when no stored metadata exists', () => { + const items = getInputItems( + [{ role: 'user', content: 'hello' }] as any, + 'conv_123', + ); + + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + role: 'user', + content: 'hello', + }); + }); }); describe('convertToOutputItem', () => {