Skip to content

Commit 1588e3d

Browse files
authored
release: vertexai tools support (#173)
1 parent a494b64 commit 1588e3d

File tree

6 files changed

+167
-14
lines changed

6 files changed

+167
-14
lines changed

CHANGELOG.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
# @langtrase/typescript-sdk
22

3+
4+
## 5.3.2
5+
6+
### Patch Changes
7+
8+
- Add Vertex AI tools and funcition tracing support
9+
310
## 5.3.1
411

512
### Patch Changes

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@langtrase/typescript-sdk",
3-
"version": "5.3.1",
3+
"version": "5.3.2",
44
"description": "A typescript SDK for Langtrace",
55
"main": "dist/index.js",
66
"types": "dist/index.d.ts",

src/examples/vertexai/basic.ts

Lines changed: 83 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { init } from '@langtrace-init/init'
22
import dotenv from 'dotenv'
3-
import { VertexAI } from '@google-cloud/vertexai'
3+
import { VertexAI, FunctionDeclarationSchemaType } from '@google-cloud/vertexai'
44

55
dotenv.config()
66
init({ batch: false, write_spans_to_console: true })
@@ -13,6 +13,38 @@ const vertexAI = new VertexAI({ project, location })
1313

1414
const generativeModel = vertexAI.getGenerativeModel({ model: textModel })
1515

16+
const functionDeclarations = [
17+
{
18+
functionDeclarations: [
19+
{
20+
name: 'get_current_weather',
21+
description: 'get weather in a given location',
22+
parameters: {
23+
type: FunctionDeclarationSchemaType.OBJECT,
24+
properties: {
25+
location: { type: FunctionDeclarationSchemaType.STRING },
26+
unit: {
27+
type: FunctionDeclarationSchemaType.STRING,
28+
enum: ['celsius', 'fahrenheit']
29+
}
30+
},
31+
required: ['location']
32+
}
33+
}
34+
]
35+
}
36+
]
37+
38+
const functionResponseParts = [
39+
{
40+
functionResponse: {
41+
name: 'get_current_weather',
42+
response:
43+
{ name: 'get_current_weather', content: { weather: 'super nice' } }
44+
}
45+
}
46+
]
47+
1648
export const basicVertexAIChat = async (): Promise<void> => {
1749
const request = { contents: [{ role: 'user', parts: [{ text: 'How are you doing today?' }] }] }
1850
const result = await generativeModel.generateContent(request)
@@ -65,11 +97,59 @@ export const basicVertexAIStartChatStream = async (): Promise<void> => {
6597
for await (const item of result.stream) {
6698
const text = item.candidates?.[0]?.content?.parts?.[0]?.text
6799
if (text === undefined || text === null) {
68-
console.log('Stream chunk: ', text)
69-
} else {
70100
console.log('Stream chunk: No text available')
101+
} else {
102+
console.log('Stream chunk: ', text)
71103
}
72104
}
73105
const aggregatedResponse = await result.response
74106
console.log('Aggregated response: ', JSON.stringify(aggregatedResponse))
75107
}
108+
109+
export const basicVertexAIStartChatWithToolRequest = async (): Promise<void> => {
110+
const request = {
111+
contents: [
112+
{ role: 'user', parts: [{ text: 'What is the weather in Boston?' }] },
113+
{ role: 'model', parts: [{ functionCall: { name: 'get_current_weather', args: { location: 'Boston' } } }] },
114+
{ role: 'user', parts: functionResponseParts }
115+
],
116+
tools: functionDeclarations
117+
}
118+
const streamingResult =
119+
await generativeModel.generateContentStream(request)
120+
for await (const item of streamingResult.stream) {
121+
if (item?.candidates !== undefined) {
122+
console.log(item.candidates[0])
123+
}
124+
}
125+
}
126+
127+
export const basicVertexAIStartChatWithToolResponse = async (): Promise<void> => {
128+
// Create a chat session and pass your function declarations
129+
const chat = generativeModel.startChat({ tools: functionDeclarations })
130+
131+
const chatInput1 = 'What is the weather in Boston?'
132+
133+
// This should include a functionCall response from the model
134+
const streamingResult1 = await chat.sendMessageStream(chatInput1)
135+
for await (const item of streamingResult1.stream) {
136+
if (item?.candidates !== undefined) {
137+
console.log(item.candidates[0])
138+
}
139+
}
140+
const response1 = await streamingResult1.response
141+
console.log('first aggregated response: ', JSON.stringify(response1))
142+
143+
// Send a follow up message with a FunctionResponse
144+
const streamingResult2 = await chat.sendMessageStream(functionResponseParts)
145+
for await (const item of streamingResult2.stream) {
146+
if (item?.candidates !== undefined) {
147+
console.log(item.candidates[0])
148+
}
149+
}
150+
151+
// This should include a text response from the model using the response content
152+
// provided above
153+
const response2 = await streamingResult2.response
154+
console.log('second aggregated response: ', JSON.stringify(response2))
155+
}

src/instrumentation/vertexai/patch.ts

Lines changed: 73 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,29 +53,83 @@ export function generateContentPatch (
5353
const serviceProvider = Vendors.VERTEXAI
5454
const customAttributes = context.active().getValue(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY) ?? {}
5555

56-
const prompts = args.flatMap((arg: string | { contents: CandidateContent[] }) => {
57-
if (typeof arg === 'string') {
56+
let argTools: any[] = []
57+
const prompts = args.flatMap((arg: string | { contents?: CandidateContent[], tools?: any, functionResponse?: any }) => {
58+
if (Array.isArray(arg)) {
59+
// Handle the case where `arg` is an array (like [ { functionResponse: ... } ])
60+
return arg.flatMap(innerArg => {
61+
if (Array.isArray(innerArg.tools)) argTools = argTools.concat(innerArg.tools)
62+
if (innerArg.functionResponse != null) {
63+
return [{ role: 'model', content: JSON.stringify(innerArg.functionResponse) }]
64+
} else if (innerArg.contents != null) {
65+
return innerArg.contents.map((content: CandidateContent) => ({
66+
role: content.role,
67+
content: content.parts.map((part: CandidateContentPart) => {
68+
if (typeof part.text === 'string') {
69+
return part.text
70+
} else if ('functionCall' in part) {
71+
return JSON.stringify((part as any).functionCall)
72+
} else if (typeof part === 'object') {
73+
return JSON.stringify(part)
74+
} else {
75+
return ''
76+
}
77+
}).join('')
78+
}))
79+
} else {
80+
return []
81+
}
82+
})
83+
} else if (typeof arg === 'string') {
5884
// Handle the case where `arg` is a string
5985
return [{ role: 'user', content: arg }]
60-
} else {
86+
} else if (arg.contents != null) {
87+
if (Array.isArray(arg.tools)) argTools = argTools.concat(arg.tools)
6188
// Handle the case where `arg` has the `contents` structure
6289
return arg.contents.map(content => ({
6390
role: content.role,
64-
content: content.parts.map(part => part.text).join('')
91+
content: content.parts.map((part: CandidateContentPart) => {
92+
if (typeof part.text === 'string') {
93+
return part.text
94+
} else if ('functionCall' in part) {
95+
return JSON.stringify((part as any).functionCall)
96+
} else if (typeof part === 'object') {
97+
return JSON.stringify(part)
98+
} else {
99+
return ''
100+
}
101+
}).join('')
65102
}))
103+
} else if (arg.functionResponse != null) {
104+
// Handle the case where `arg` has a `functionResponse` structure
105+
return [{ role: 'model', content: JSON.stringify(arg.functionResponse) }]
106+
} else {
107+
return []
66108
}
67109
})
68110

111+
const allTools = argTools.concat(this?.tools ?? [])
69112
const attributes: LLMSpanAttributes = {
70113
'langtrace.sdk.name': sdkName,
71114
'langtrace.service.name': serviceProvider,
72115
'langtrace.service.type': 'llm',
73116
'gen_ai.operation.name': 'chat',
74117
'langtrace.service.version': version,
75118
'langtrace.version': langtraceVersion,
76-
'url.full': '',
77-
'url.path': this?.publisherModelEndpoint,
78-
'gen_ai.request.model': this?.model,
119+
'url.full': this?.apiEndpoint,
120+
'url.path': this?.publisherModelEndpoint ?? this?.resourcePath ?? undefined,
121+
'gen_ai.request.model': (() => {
122+
if (this?.model !== undefined && this.model !== null) {
123+
return this.model
124+
}
125+
if (typeof this?.resourcePath === 'string') {
126+
return this.resourcePath.split('/').pop()
127+
}
128+
if (typeof this?.publisherModelEndpoint === 'string') {
129+
return this.publisherModelEndpoint.split('/').pop()
130+
}
131+
return undefined
132+
})(),
79133
'http.max.retries': this?._client?.maxRetries,
80134
'http.timeout': this?._client?.timeout,
81135
'gen_ai.request.temperature': this?.generationConfig?.temperature,
@@ -86,6 +140,7 @@ export function generateContentPatch (
86140
'gen_ai.request.frequency_penalty': this?.generationConfig?.frequencyPenalty,
87141
'gen_ai.request.presence_penalty': this?.generationConfig?.presencePenalty,
88142
'gen_ai.request.seed': this?.generationConfig?.seed,
143+
'gen_ai.request.tools': allTools.length > 0 ? JSON.stringify(allTools) : undefined,
89144
...customAttributes
90145
}
91146

@@ -179,7 +234,17 @@ async function * handleStreamResponse (
179234
const { content } = chunk.candidates.map((candidate: Candidate) => {
180235
return {
181236
role: candidate.content.role,
182-
content: candidate.content.parts.map((part: CandidateContentPart) => part.text).join('')
237+
content: candidate.content.parts.map((part: CandidateContentPart) => {
238+
if (typeof part.text === 'string') {
239+
return part.text
240+
} else if ('functionCall' in part) {
241+
return JSON.stringify(part.functionCall)
242+
} else if (typeof part === 'object') {
243+
return JSON.stringify(part)
244+
} else {
245+
return ''
246+
}
247+
}).join('')
183248
}
184249
})[0]
185250
const tokenCount = estimateTokens(content)

src/instrumentation/vertexai/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
export interface CandidateContentPart {
22
text: string
3+
functionCall: any
34
}
45

56
export interface CandidateContent {

0 commit comments

Comments
 (0)