@@ -49,47 +49,41 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
4949 protected init ( ) : InstrumentationModuleDefinition < any > {
5050 const vertexAIModule = new InstrumentationNodeModuleDefinition < any > (
5151 "@google-cloud/vertexai" ,
52- [ ">=0.2.1 " ] ,
52+ [ ">=1.1.0 " ] ,
5353 this . wrap . bind ( this ) ,
5454 this . unwrap . bind ( this ) ,
5555 ) ;
5656
5757 return vertexAIModule ;
5858 }
5959
60- private modelConfig : vertexAI . ModelParams = { model : "" } ;
61-
62- private setModel ( newValue : vertexAI . ModelParams ) {
63- this . modelConfig = { ...newValue } ;
64- }
65-
6660 public manuallyInstrument ( module : typeof vertexAI ) {
6761 this . _diag . debug ( "Manually instrumenting @google-cloud/vertexai" ) ;
6862
6963 this . _wrap (
70- module . VertexAI_Preview . prototype ,
71- "getGenerativeModel " ,
72- this . wrapperMethod ( "getGenerativeModel" ) ,
64+ module . GenerativeModel . prototype ,
65+ "generateContentStream " ,
66+ this . wrapperMethod ( ) ,
7367 ) ;
7468 this . _wrap (
7569 module . GenerativeModel . prototype ,
76- "generateContentStream " ,
77- this . wrapperMethod ( "generateContentStream" ) ,
70+ "generateContent " ,
71+ this . wrapperMethod ( ) ,
7872 ) ;
7973 }
8074
8175 private wrap ( module : typeof vertexAI , moduleVersion ?: string ) {
8276 this . _diag . debug ( `Patching @google-cloud/vertexai@${ moduleVersion } ` ) ;
8377
8478 this . _wrap (
85- module . VertexAI_Preview . prototype ,
86- "getGenerativeModel " ,
87- this . wrapperMethod ( "getGenerativeModel" ) ,
79+ module . GenerativeModel . prototype ,
80+ "generateContentStream " ,
81+ this . wrapperMethod ( ) ,
8882 ) ;
8983 this . _wrap (
9084 module . GenerativeModel . prototype ,
91- "generateContentStream " ,
92- this . wrapperMethod ( "generateContentStream" ) ,
85+ "generateContent " ,
86+ this . wrapperMethod ( ) ,
9387 ) ;
9488
9589 return module ;
@@ -98,42 +92,21 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
9892 private unwrap ( module : typeof vertexAI , moduleVersion ?: string ) : void {
9993 this . _diag . debug ( `Unpatching @google-cloud/vertexai@${ moduleVersion } ` ) ;
10094
101- this . _unwrap ( module . VertexAI_Preview . prototype , "getGenerativeModel" ) ;
10295 this . _unwrap ( module . GenerativeModel . prototype , "generateContentStream" ) ;
96+ this . _unwrap ( module . GenerativeModel . prototype , "generateContent" ) ;
10397 }
10498
105- private wrapperMethod (
106- wrappedMethodName : "getGenerativeModel" | "generateContentStream" ,
107- ) {
99+ private wrapperMethod ( ) {
108100 // eslint-disable-next-line @typescript-eslint/no-this-alias
109101 const plugin = this ;
110102 // eslint-disable-next-line @typescript-eslint/ban-types
111103 return ( original : Function ) => {
112104 return function method (
113- this : any ,
105+ this : vertexAI . GenerativeModel ,
114106 ...args : ( vertexAI . GenerateContentRequest & vertexAI . ModelParams ) [ ]
115107 ) {
116- if ( wrappedMethodName === "getGenerativeModel" ) {
117- plugin . setModel ( args [ 0 ] ) ;
118-
119- return context . bind (
120- context . active ( ) ,
121- safeExecuteInTheMiddle (
122- ( ) => {
123- return context . with ( context . active ( ) , ( ) => {
124- return original . apply ( this , args ) ;
125- } ) ;
126- } ,
127- ( e ) => {
128- if ( e ) {
129- plugin . _diag . error ( "Error in VertexAI Instrumentation" , e ) ;
130- }
131- } ,
132- ) ,
133- ) ;
134- }
135-
136108 const span = plugin . _startSpan ( {
109+ instance : this ,
137110 params : args [ 0 ] ,
138111 } ) ;
139112
@@ -157,8 +130,10 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
157130 }
158131
159132 private _startSpan ( {
133+ instance,
160134 params,
161135 } : {
136+ instance : vertexAI . GenerativeModel ;
162137 params : vertexAI . GenerateContentRequest ;
163138 } ) : Span {
164139 const attributes : Attributes = {
@@ -167,28 +142,18 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
167142 } ;
168143
169144 try {
170- attributes [ SpanAttributes . LLM_REQUEST_MODEL ] = this . modelConfig . model ;
171-
172- if (
173- this . modelConfig . generation_config !== undefined &&
174- typeof this . modelConfig . generation_config === "object"
175- ) {
176- if ( this . modelConfig . generation_config . max_output_tokens ) {
177- attributes [ SpanAttributes . LLM_REQUEST_MAX_TOKENS ] =
178- this . modelConfig . generation_config . max_output_tokens ;
179- }
180- if ( this . modelConfig . generation_config . temperature ) {
181- attributes [ SpanAttributes . LLM_REQUEST_TEMPERATURE ] =
182- this . modelConfig . generation_config . temperature ;
183- }
184- if ( this . modelConfig . generation_config . top_p ) {
185- attributes [ SpanAttributes . LLM_REQUEST_TOP_P ] =
186- this . modelConfig . generation_config . top_p ;
187- }
188- if ( this . modelConfig . generation_config . top_k ) {
189- attributes [ SpanAttributes . LLM_TOP_K ] =
190- this . modelConfig . generation_config . top_k ;
191- }
145+ attributes [ SpanAttributes . LLM_REQUEST_MODEL ] = instance [ "model" ] ;
146+ attributes [ SpanAttributes . LLM_RESPONSE_MODEL ] = instance [ "model" ] ;
147+
148+ if ( instance [ "generationConfig" ] ) {
149+ attributes [ SpanAttributes . LLM_REQUEST_MAX_TOKENS ] =
150+ instance [ "generationConfig" ] . max_output_tokens ;
151+ attributes [ SpanAttributes . LLM_REQUEST_TEMPERATURE ] =
152+ instance [ "generationConfig" ] . temperature ;
153+ attributes [ SpanAttributes . LLM_REQUEST_TOP_P ] =
154+ instance [ "generationConfig" ] . top_p ;
155+ attributes [ SpanAttributes . LLM_TOP_K ] =
156+ instance [ "generationConfig" ] . top_k ;
192157 }
193158
194159 if ( this . _shouldSendPrompts ( ) && "contents" in params ) {
@@ -213,7 +178,9 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
213178 . then ( async ( result ) => {
214179 await this . _endSpan ( {
215180 span,
216- result : result as vertexAI . StreamGenerateContentResult ,
181+ result : result as
182+ | vertexAI . StreamGenerateContentResult
183+ | vertexAI . GenerateContentResult ,
217184 } ) ;
218185 return new Promise < T > ( ( resolve ) => resolve ( result ) ) ;
219186 } )
@@ -236,14 +203,11 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
236203 result,
237204 } : {
238205 span : Span ;
239- result : vertexAI . StreamGenerateContentResult ;
206+ result :
207+ | vertexAI . StreamGenerateContentResult
208+ | vertexAI . GenerateContentResult ;
240209 } ) {
241210 try {
242- span . setAttribute (
243- SpanAttributes . LLM_RESPONSE_MODEL ,
244- this . modelConfig . model ,
245- ) ;
246-
247211 const streamResponse = await result . response ;
248212
249213 if ( streamResponse . usageMetadata ?. totalTokenCount !== undefined )
@@ -252,20 +216,20 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
252216 streamResponse . usageMetadata . totalTokenCount ,
253217 ) ;
254218
255- if ( streamResponse . usageMetadata ?. candidates_token_count )
219+ if ( streamResponse . usageMetadata ?. candidatesTokenCount )
256220 span . setAttribute (
257221 SpanAttributes . LLM_USAGE_COMPLETION_TOKENS ,
258- streamResponse . usageMetadata . candidates_token_count ,
222+ streamResponse . usageMetadata . candidatesTokenCount ,
259223 ) ;
260224
261- if ( streamResponse . usageMetadata ?. prompt_token_count )
225+ if ( streamResponse . usageMetadata ?. promptTokenCount )
262226 span . setAttribute (
263227 SpanAttributes . LLM_USAGE_PROMPT_TOKENS ,
264- streamResponse . usageMetadata . prompt_token_count ,
228+ streamResponse . usageMetadata . promptTokenCount ,
265229 ) ;
266230
267231 if ( this . _shouldSendPrompts ( ) ) {
268- streamResponse . candidates . forEach ( ( candidate , index ) => {
232+ streamResponse . candidates ? .forEach ( ( candidate , index ) => {
269233 if ( candidate . finishReason )
270234 span . setAttribute (
271235 `${ SpanAttributes . LLM_COMPLETIONS } .${ index } .finish_reason` ,
@@ -298,10 +262,10 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
298262 const result = parts
299263 . map ( ( part ) => {
300264 if ( part . text ) return part . text ;
301- else if ( part . file_data )
302- return part . file_data . file_uri + "-" + part . file_data . mime_type ;
303- else if ( part . inline_data )
304- return part . inline_data . data + "-" + part . inline_data . mime_type ;
265+ else if ( part . fileData )
266+ return part . fileData . fileUri + "-" + part . fileData . mimeType ;
267+ else if ( part . inlineData )
268+ return part . inlineData . data + "-" + part . inlineData . mimeType ;
305269 else return "" ;
306270 } )
307271 . filter ( Boolean ) ;
0 commit comments