@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
161161 top_p : 0.9 ,
162162 max_tokens : 1000 ,
163163 } ) ,
164+ expect . objectContaining ( {
165+ signal : undefined ,
166+ } ) ,
164167 ) ;
165168 expect ( mockConverter . convertOpenAIResponseToGemini ) . toHaveBeenCalledWith (
166169 mockOpenAIResponse ,
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
238241 expect . objectContaining ( {
239242 tools : mockTools ,
240243 } ) ,
244+ expect . objectContaining ( {
245+ signal : undefined ,
246+ } ) ,
241247 ) ;
242248 } ) ;
243249
@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
274280 request ,
275281 ) ;
276282 } ) ;
283+
284+ it ( 'should pass abort signal to OpenAI client when provided' , async ( ) => {
285+ const abortController = new AbortController ( ) ;
286+ const request : GenerateContentParameters = {
287+ model : 'test-model' ,
288+ contents : [ { parts : [ { text : 'Hello' } ] , role : 'user' } ] ,
289+ config : { abortSignal : abortController . signal } ,
290+ } ;
291+
292+ ( mockConverter . convertGeminiRequestToOpenAI as Mock ) . mockReturnValue ( [ ] ) ;
293+ ( mockConverter . convertOpenAIResponseToGemini as Mock ) . mockReturnValue (
294+ new GenerateContentResponse ( ) ,
295+ ) ;
296+ ( mockClient . chat . completions . create as Mock ) . mockResolvedValue ( {
297+ choices : [ { message : { content : 'response' } } ] ,
298+ } ) ;
299+
300+ await pipeline . execute ( request , 'test-id' ) ;
301+
302+ expect ( mockClient . chat . completions . create ) . toHaveBeenCalledWith (
303+ expect . any ( Object ) ,
304+ expect . objectContaining ( { signal : abortController . signal } ) ,
305+ ) ;
306+ } ) ;
277307 } ) ;
278308
279309 describe ( 'executeStream' , ( ) => {
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
338368 stream : true ,
339369 stream_options : { include_usage : true } ,
340370 } ) ,
371+ expect . objectContaining ( {
372+ signal : undefined ,
373+ } ) ,
341374 ) ;
342375 expect ( mockTelemetryService . logStreamingSuccess ) . toHaveBeenCalledWith (
343376 expect . objectContaining ( {
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
470503 ) ;
471504 } ) ;
472505
506+ it ( 'should pass abort signal to OpenAI client for streaming requests' , async ( ) => {
507+ const abortController = new AbortController ( ) ;
508+ const request : GenerateContentParameters = {
509+ model : 'test-model' ,
510+ contents : [ { parts : [ { text : 'Hello' } ] , role : 'user' } ] ,
511+ config : { abortSignal : abortController . signal } ,
512+ } ;
513+
514+ const mockStream = {
515+ async * [ Symbol . asyncIterator ] ( ) {
516+ yield {
517+ id : 'chunk-1' ,
518+ choices : [ { delta : { content : 'Hello' } , finish_reason : 'stop' } ] ,
519+ } ;
520+ } ,
521+ } ;
522+
523+ ( mockConverter . convertGeminiRequestToOpenAI as Mock ) . mockReturnValue ( [ ] ) ;
524+ ( mockConverter . convertOpenAIChunkToGemini as Mock ) . mockReturnValue (
525+ new GenerateContentResponse ( ) ,
526+ ) ;
527+ ( mockClient . chat . completions . create as Mock ) . mockResolvedValue (
528+ mockStream ,
529+ ) ;
530+
531+ const resultGenerator = await pipeline . executeStream ( request , 'test-id' ) ;
532+ for await ( const _result of resultGenerator ) {
533+ // Consume stream
534+ }
535+
536+ expect ( mockClient . chat . completions . create ) . toHaveBeenCalledWith (
537+ expect . any ( Object ) ,
538+ expect . objectContaining ( { signal : abortController . signal } ) ,
539+ ) ;
540+ } ) ;
541+
473542 it ( 'should merge finishReason and usageMetadata from separate chunks' , async ( ) => {
474543 // Arrange
475544 const request : GenerateContentParameters = {
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
924993 top_p : 0.9 , // Config parameter used since request overrides are not being applied in current implementation
925994 max_tokens : 1000 , // Config parameter used since request overrides are not being applied in current implementation
926995 } ) ,
996+ expect . objectContaining ( {
997+ signal : undefined ,
998+ } ) ,
927999 ) ;
9281000 } ) ;
9291001
@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
9601032 top_p : 0.9 , // From config
9611033 max_tokens : 1000 , // From config
9621034 } ) ,
1035+ expect . objectContaining ( {
1036+ signal : undefined ,
1037+ } ) ,
9631038 ) ;
9641039 } ) ;
9651040
@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
10091084 expect . objectContaining ( {
10101085 metadata : { promptId : userPromptId } ,
10111086 } ) ,
1087+ expect . objectContaining ( {
1088+ signal : undefined ,
1089+ } ) ,
10121090 ) ;
10131091 } ) ;
10141092 } ) ;
0 commit comments