Skip to content

Commit 270dda4

Browse files
authored
fix: invalid tool_calls request due to improper cancellation (QwenLM#790)
1 parent d4fa15d commit 270dda4

File tree

2 files changed

+84
-0
lines changed

2 files changed

+84
-0
lines changed

packages/core/src/core/openaiContentGenerator/pipeline.test.ts

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
161161
top_p: 0.9,
162162
max_tokens: 1000,
163163
}),
164+
expect.objectContaining({
165+
signal: undefined,
166+
}),
164167
);
165168
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
166169
mockOpenAIResponse,
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
238241
expect.objectContaining({
239242
tools: mockTools,
240243
}),
244+
expect.objectContaining({
245+
signal: undefined,
246+
}),
241247
);
242248
});
243249

@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
274280
request,
275281
);
276282
});
283+
284+
it('should pass abort signal to OpenAI client when provided', async () => {
285+
const abortController = new AbortController();
286+
const request: GenerateContentParameters = {
287+
model: 'test-model',
288+
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
289+
config: { abortSignal: abortController.signal },
290+
};
291+
292+
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
293+
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
294+
new GenerateContentResponse(),
295+
);
296+
(mockClient.chat.completions.create as Mock).mockResolvedValue({
297+
choices: [{ message: { content: 'response' } }],
298+
});
299+
300+
await pipeline.execute(request, 'test-id');
301+
302+
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
303+
expect.any(Object),
304+
expect.objectContaining({ signal: abortController.signal }),
305+
);
306+
});
277307
});
278308

279309
describe('executeStream', () => {
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
338368
stream: true,
339369
stream_options: { include_usage: true },
340370
}),
371+
expect.objectContaining({
372+
signal: undefined,
373+
}),
341374
);
342375
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
343376
expect.objectContaining({
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
470503
);
471504
});
472505

506+
it('should pass abort signal to OpenAI client for streaming requests', async () => {
507+
const abortController = new AbortController();
508+
const request: GenerateContentParameters = {
509+
model: 'test-model',
510+
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
511+
config: { abortSignal: abortController.signal },
512+
};
513+
514+
const mockStream = {
515+
async *[Symbol.asyncIterator]() {
516+
yield {
517+
id: 'chunk-1',
518+
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
519+
};
520+
},
521+
};
522+
523+
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
524+
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
525+
new GenerateContentResponse(),
526+
);
527+
(mockClient.chat.completions.create as Mock).mockResolvedValue(
528+
mockStream,
529+
);
530+
531+
const resultGenerator = await pipeline.executeStream(request, 'test-id');
532+
for await (const _result of resultGenerator) {
533+
// Consume stream
534+
}
535+
536+
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
537+
expect.any(Object),
538+
expect.objectContaining({ signal: abortController.signal }),
539+
);
540+
});
541+
473542
it('should merge finishReason and usageMetadata from separate chunks', async () => {
474543
// Arrange
475544
const request: GenerateContentParameters = {
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
924993
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
925994
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
926995
}),
996+
expect.objectContaining({
997+
signal: undefined,
998+
}),
927999
);
9281000
});
9291001

@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
9601032
top_p: 0.9, // From config
9611033
max_tokens: 1000, // From config
9621034
}),
1035+
expect.objectContaining({
1036+
signal: undefined,
1037+
}),
9631038
);
9641039
});
9651040

@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
10091084
expect.objectContaining({
10101085
metadata: { promptId: userPromptId },
10111086
}),
1087+
expect.objectContaining({
1088+
signal: undefined,
1089+
}),
10121090
);
10131091
});
10141092
});

packages/core/src/core/openaiContentGenerator/pipeline.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ export class ContentGenerationPipeline {
4848
async (openaiRequest, context) => {
4949
const openaiResponse = (await this.client.chat.completions.create(
5050
openaiRequest,
51+
{
52+
signal: request.config?.abortSignal,
53+
},
5154
)) as OpenAI.Chat.ChatCompletion;
5255

5356
const geminiResponse =
@@ -78,6 +81,9 @@ export class ContentGenerationPipeline {
7881
// Stage 1: Create OpenAI stream
7982
const stream = (await this.client.chat.completions.create(
8083
openaiRequest,
84+
{
85+
signal: request.config?.abortSignal,
86+
},
8187
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
8288

8389
// Stage 2: Process stream with conversion and logging

0 commit comments

Comments
 (0)