@@ -8,11 +8,10 @@ import { context, propagation } from '@opentelemetry/api';
88import { VERSION } from '@opentelemetry/core' ;
99import type { InstrumentationConfig } from '@opentelemetry/instrumentation' ;
1010import { InstrumentationBase , InstrumentationNodeModuleDefinition } from '@opentelemetry/instrumentation' ;
11- import type { AggregationCounts , Client , SanitizedRequestData , Scope } from '@sentry/core' ;
12- import {
13- addBreadcrumb ,
11+ import type { AggregationCounts , Client , SanitizedRequestData , Scope } from '@sentry/core' ;
12+ import { addBreadcrumb ,
1413 addNonEnumerableProperty ,
15- generateSpanId ,
14+ flush , generateSpanId ,
1615 getBreadcrumbLogLevelFromHttpStatusCode ,
1716 getClient ,
1817 getCurrentScope ,
@@ -22,6 +21,7 @@ import {
2221 logger ,
2322 parseUrl ,
2423 stripUrlQueryAndFragment ,
24+ vercelWaitUntil ,
2525 withIsolationScope ,
2626} from '@sentry/core' ;
2727import { DEBUG_BUILD } from '../../debug-build' ;
@@ -127,6 +127,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
127127 this . _onOutgoingRequestFinish ( data . request , undefined ) ;
128128 } ) satisfies ChannelListener ;
129129
130+ const onHttpServerResponseCreated = ( ( _data : unknown ) => {
131+ const data = _data as { response : http . OutgoingMessage } ;
132+ patchResponseToFlushOnServerlessPlatformsOnce ( data . response ) ;
133+ } ) satisfies ChannelListener ;
134+
130135 /**
131136 * You may be wondering why we register these diagnostics-channel listeners
132137 * in such a convoluted way (as InstrumentationNodeModuleDefinition...)˝,
@@ -153,6 +158,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
153158 // In this case, `http.client.response.finish` is not triggered
154159 subscribe ( 'http.client.request.error' , onHttpClientRequestError ) ;
155160
161+ // On vercel, ensure that we flush events before the lambda freezes
162+ if ( process . env . VERCEL ) {
163+ subscribe ( 'http.server.response.created' , onHttpServerResponseCreated ) ;
164+ }
165+
156166 return moduleExports ;
157167 } ,
158168 ( ) => {
@@ -178,6 +188,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
178188 // In this case, `http.client.response.finish` is not triggered
179189 subscribe ( 'http.client.request.error' , onHttpClientRequestError ) ;
180190
191+ // On vercel, ensure that we flush events before the lambda freezes
192+ if ( process . env . VERCEL ) {
193+ subscribe ( 'http.server.response.created' , onHttpServerResponseCreated ) ;
194+ }
195+
181196 return moduleExports ;
182197 } ,
183198 ( ) => {
@@ -529,6 +544,66 @@ export function recordRequestSession({
529544 } ) ;
530545}
531546
547+ function patchResponseToFlushOnServerlessPlatformsOnce ( res : http . OutgoingMessage ) : void {
548+ // This means it was already patched, do nothing
549+ if ( ( res as { __sentry_patched__ ?: boolean } ) . __sentry_patched__ ) {
550+ return ;
551+ }
552+
553+ DEBUG_BUILD && logger . log ( INSTRUMENTATION_NAME , 'Patching server.end()' ) ;
554+ addNonEnumerableProperty ( res , '__sentry_patched__' , true ) ;
555+
556+ // This is vercel specific handling to flush events before the lambda freezes
557+
558+ // In some cases res.end does not seem to be defined leading to errors if passed to Proxy
559+ // https://github.com/getsentry/sentry-javascript/issues/15759
560+ if ( typeof res . end !== 'function' ) {
561+ return ;
562+ }
563+
564+ let markOnEndDone = ( ) : void => undefined ;
565+ const onEndDonePromise = new Promise < void > ( res => {
566+ markOnEndDone = res ;
567+ } ) ;
568+
569+ res . on ( 'close' , ( ) => {
570+ markOnEndDone ( ) ;
571+ } ) ;
572+
573+ // eslint-disable-next-line @typescript-eslint/unbound-method
574+ res . end = new Proxy ( res . end , {
575+ apply ( target , thisArg , argArray ) {
576+ vercelWaitUntil (
577+ new Promise < void > ( finishWaitUntil => {
578+ // Define a timeout that unblocks the lambda just to be safe so we're not indefinitely keeping it alive, exploding server bills
579+ const timeout = setTimeout ( ( ) => {
580+ finishWaitUntil ( ) ;
581+ } , 2000 ) ;
582+
583+ onEndDonePromise
584+ . then ( ( ) => {
585+ DEBUG_BUILD && logger . log ( 'Flushing events before Vercel Lambda freeze' ) ;
586+ return flush ( 2000 ) ;
587+ } )
588+ . then (
589+ ( ) => {
590+ clearTimeout ( timeout ) ;
591+ finishWaitUntil ( ) ;
592+ } ,
593+ e => {
594+ clearTimeout ( timeout ) ;
595+ DEBUG_BUILD && logger . log ( 'Error while flushing events for Vercel:\n' , e ) ;
596+ finishWaitUntil ( ) ;
597+ } ,
598+ ) ;
599+ } ) ,
600+ ) ;
601+
602+ return target . apply ( thisArg , argArray ) ;
603+ } ,
604+ } ) ;
605+ }
606+
532607const clientToRequestSessionAggregatesMap = new Map <
533608 Client ,
534609 { [ timestampRoundedToSeconds : string ] : { exited : number ; crashed : number ; errored : number } }
0 commit comments