@@ -232,23 +232,7 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
232
232
return result ;
233
233
}
234
234
case FetchResponseKind . Canceled :
235
- /* __GDPR__
236
- "response.cancelled" : {
237
- "owner": "digitarald",
238
- "comment": "Report canceled service responses for quality.",
239
- "model": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Model selection for the response" },
240
- "source": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Source for why the request was made" },
241
- "requestId": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Id of the request" },
242
- "totalTokenMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum total token window", "isMeasurement": true },
243
- "promptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens", "isMeasurement": true },
244
- "tokenCountMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum generated tokens", "isMeasurement": true },
245
- "timeToFirstToken": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
246
- "timeToCancelled": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
247
- "isVisionRequest": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Whether the request was for a vision model", "isMeasurement": true },
248
- "isBYOK": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for a BYOK model", "isMeasurement": true }
249
- }
250
- */
251
- this . _telemetryService . sendTelemetryEvent ( 'response.cancelled' , { github : true , microsoft : true } , {
235
+ this . _sendCancellationTelemetry ( {
252
236
source : telemetryProperties . messageSource ?? 'unknown' ,
253
237
requestId : chatParams . ourRequestId ,
254
238
model : chatEndpoint . model ,
@@ -273,12 +257,87 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
273
257
} catch ( err : unknown ) {
274
258
const timeToError = Date . now ( ) - baseTelemetry . issuedTime ;
275
259
const processed = this . processError ( err , chatParams . ourRequestId ) ;
276
- this . _sendResponseErrorTelemetry ( processed , telemetryProperties , chatParams , chatEndpoint , tokenCount , maxResponseTokens , timeToError , this . filterImageMessages ( messages ) ) ;
260
+ if ( processed . type === ChatFetchResponseType . Canceled ) {
261
+ this . _sendCancellationTelemetry ( {
262
+ source : telemetryProperties . messageSource ?? 'unknown' ,
263
+ requestId : chatParams . ourRequestId ,
264
+ model : chatEndpoint . model ,
265
+ } , {
266
+ totalTokenMax : chatEndpoint . modelMaxPromptTokens ?? - 1 ,
267
+ promptTokenCount : tokenCount ,
268
+ tokenCountMax : maxResponseTokens ,
269
+ timeToFirstToken : undefined ,
270
+ timeToCancelled : timeToError ,
271
+ isVisionRequest : this . filterImageMessages ( messages ) ? 1 : - 1 ,
272
+ isBYOK : chatEndpoint instanceof OpenAIEndpoint ? 1 : - 1
273
+ } ) ;
274
+ } else {
275
+ this . _sendResponseErrorTelemetry ( processed , telemetryProperties , chatParams , chatEndpoint , tokenCount , maxResponseTokens , timeToError , this . filterImageMessages ( messages ) ) ;
276
+ }
277
277
pendingLoggedChatRequest ?. resolve ( processed ) ;
278
278
return processed ;
279
279
}
280
280
}
281
281
282
+ private _sendCancellationTelemetry (
283
+ {
284
+ source,
285
+ requestId,
286
+ model,
287
+ } : {
288
+ source : string ;
289
+ requestId : string ;
290
+ model : string ;
291
+ } ,
292
+ {
293
+ totalTokenMax,
294
+ promptTokenCount,
295
+ tokenCountMax,
296
+ timeToFirstToken,
297
+ timeToCancelled,
298
+ isVisionRequest,
299
+ isBYOK
300
+ } : {
301
+ totalTokenMax : number ;
302
+ promptTokenCount : number ;
303
+ tokenCountMax : number ;
304
+ timeToFirstToken : number | undefined ;
305
+ timeToCancelled : number ;
306
+ isVisionRequest : number ;
307
+ isBYOK : number ;
308
+ }
309
+ ) {
310
+ /* __GDPR__
311
+ "response.cancelled" : {
312
+ "owner": "digitarald",
313
+ "comment": "Report canceled service responses for quality.",
314
+ "model": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Model selection for the response" },
315
+ "source": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Source for why the request was made" },
316
+ "requestId": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Id of the request" },
317
+ "totalTokenMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum total token window", "isMeasurement": true },
318
+ "promptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens", "isMeasurement": true },
319
+ "tokenCountMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum generated tokens", "isMeasurement": true },
320
+ "timeToFirstToken": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
321
+ "timeToCancelled": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
322
+ "isVisionRequest": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Whether the request was for a vision model", "isMeasurement": true },
323
+ "isBYOK": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for a BYOK model", "isMeasurement": true }
324
+ }
325
+ */
326
+ this . _telemetryService . sendTelemetryEvent ( 'response.cancelled' , { github : true , microsoft : true } , {
327
+ source,
328
+ requestId,
329
+ model,
330
+ } , {
331
+ totalTokenMax,
332
+ promptTokenCount,
333
+ tokenCountMax,
334
+ timeToFirstToken,
335
+ timeToCancelled,
336
+ isVisionRequest,
337
+ isBYOK
338
+ } ) ;
339
+ }
340
+
282
341
private _sendResponseErrorTelemetry (
283
342
processed : ChatFetchError ,
284
343
telemetryProperties : TelemetryProperties | undefined ,
@@ -528,39 +587,50 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
528
587
requestId : requestId ,
529
588
serverRequestId : undefined ,
530
589
} ;
531
- } if ( isCancellationError ( err ) ) {
590
+ }
591
+ if ( isCancellationError ( err ) ) {
532
592
return {
533
593
type : ChatFetchResponseType . Canceled ,
534
594
reason : 'Got a cancellation error' ,
535
595
requestId : requestId ,
536
596
serverRequestId : undefined ,
537
597
} ;
598
+ }
599
+ if ( err && (
600
+ ( err instanceof Error && err . message === 'Premature close' ) ||
601
+ ( typeof err === 'object' && ( err as any ) . code === 'ERR_STREAM_PREMATURE_CLOSE' ) /* to be extra sure */ )
602
+ ) {
603
+ return {
604
+ type : ChatFetchResponseType . Canceled ,
605
+ reason : 'Stream closed prematurely' ,
606
+ requestId : requestId ,
607
+ serverRequestId : undefined ,
608
+ } ;
609
+ }
610
+ this . _logService . error ( errorsUtil . fromUnknown ( err ) , `Error on conversation request` ) ;
611
+ this . _telemetryService . sendGHTelemetryException ( err , 'Error on conversation request' ) ;
612
+ // this.logger.exception(err, `Error on conversation request`);
613
+ if ( fetcher . isInternetDisconnectedError ( err ) ) {
614
+ return {
615
+ type : ChatFetchResponseType . Failed ,
616
+ reason : `It appears you're not connected to the internet, please check your network connection and try again.` ,
617
+ requestId : requestId ,
618
+ serverRequestId : undefined ,
619
+ } ;
620
+ } else if ( fetcher . isFetcherError ( err ) ) {
621
+ return {
622
+ type : ChatFetchResponseType . Failed ,
623
+ reason : fetcher . getUserMessageForFetcherError ( err ) ,
624
+ requestId : requestId ,
625
+ serverRequestId : undefined ,
626
+ } ;
538
627
} else {
539
- this . _logService . error ( errorsUtil . fromUnknown ( err ) , `Error on conversation request` ) ;
540
- this . _telemetryService . sendGHTelemetryException ( err , 'Error on conversation request' ) ;
541
- // this.logger.exception(err, `Error on conversation request`);
542
- if ( fetcher . isInternetDisconnectedError ( err ) ) {
543
- return {
544
- type : ChatFetchResponseType . Failed ,
545
- reason : `It appears you're not connected to the internet, please check your network connection and try again.` ,
546
- requestId : requestId ,
547
- serverRequestId : undefined ,
548
- } ;
549
- } else if ( fetcher . isFetcherError ( err ) ) {
550
- return {
551
- type : ChatFetchResponseType . Failed ,
552
- reason : fetcher . getUserMessageForFetcherError ( err ) ,
553
- requestId : requestId ,
554
- serverRequestId : undefined ,
555
- } ;
556
- } else {
557
- return {
558
- type : ChatFetchResponseType . Failed ,
559
- reason : 'Error on conversation request. Check the log for more details.' ,
560
- requestId : requestId ,
561
- serverRequestId : undefined ,
562
- } ;
563
- }
628
+ return {
629
+ type : ChatFetchResponseType . Failed ,
630
+ reason : 'Error on conversation request. Check the log for more details.' ,
631
+ requestId : requestId ,
632
+ serverRequestId : undefined ,
633
+ } ;
564
634
}
565
635
}
566
636
}
0 commit comments