Skip to content

Commit 572c3a1

Browse files
chore: Updated OpenAI instrumentation to skip creating a LlmChatCompletionMessage for an outgoing tool call response (#3655)
1 parent 85c601f commit 572c3a1

File tree

4 files changed

+106
-6
lines changed

4 files changed

+106
-6
lines changed

lib/llm-events/openai/chat-completion-message.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
2626
// Check if the given message is from the response.
2727
// The response object differs based on the API called.
2828
// If it's `responses.create`, we check against `response.output`.
29-
// If it's `chat.completions.create` or langchain, we check against `response.choices`.
29+
// If it's `chat.completions.create`, we check against `response.choices`.
3030
if (response?.object === 'response') {
3131
this.is_response = message.content === response?.output?.[0]?.content?.[0]?.text
3232
} else {

lib/subscribers/openai/utils.js

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ let TRACKING_METRIC = OPENAI.TRACKING_PREFIX
2121
* Parses the response from OpenAI and extracts the message content and role.
2222
*
2323
* @param {object} response The OpenAI SDK response object
24-
* @returns {{ content: string, role: string }} the message object with fields `content` and `role`
24+
* @returns {object[]} an array with the message object with fields `content`
25+
* and `role` or [] if response invalid
2526
*/
2627
function getMessageFromResponse(response) {
2728
let content
@@ -30,11 +31,17 @@ function getMessageFromResponse(response) {
3031
content = response?.output?.[0]?.content?.[0]?.text
3132
role = response?.output?.[0]?.role
3233
} else {
33-
content = response?.choices?.[0]?.message?.content
34-
role = response?.choices?.[0]?.message?.role
34+
const choice = response?.choices?.[0]
35+
if (choice?.finish_reason === 'tool_calls') {
36+
// A false response. Don't create a LlmChatCompletionMessage
37+
// for this -- the full conversation isn't done yet.
38+
return []
39+
}
40+
content = choice?.message?.content
41+
role = choice?.message?.role
3542
}
3643

37-
return { content, role }
44+
return [{ content, role }]
3845
}
3946

4047
/**
@@ -165,7 +172,7 @@ function recordChatCompletionMessages({
165172
// the response message.
166173
const messages = [
167174
...getMessagesFromRequest(request, logger),
168-
getMessageFromResponse(response)
175+
...getMessageFromResponse(response)
169176
]
170177

171178
for (let i = 0; i < messages.length; i++) {

test/versioned/openai/chat-completions.test.js

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,54 @@ test('chat.completions.create', async (t) => {
155155
})
156156
})
157157

158+
await t.test('should have not create LlmChatCompletionMessage for an outgoing tool call', (t, end) => {
159+
const { client, agent } = t.nr
160+
helper.runInTransaction(agent, async(tx) => {
161+
const model = 'gpt-4'
162+
const content = 'What is 2 + 2?'
163+
await client.chat.completions.create({
164+
max_tokens: 100,
165+
temperature: 0.5,
166+
model,
167+
messages: [
168+
{ role: 'user', content },
169+
]
170+
})
171+
172+
const chatCmplId = 'chatcmpl-87sb95K4EF2nuJRcTs43Tm9calc'
173+
174+
const events = agent.customEventAggregator.events.toArray()
175+
assert.equal(events.length, 2, 'should create only 1 chat completion messages and 1 summary event')
176+
const chatMsgs = events.filter(([{ type }]) => type === 'LlmChatCompletionMessage')
177+
assert.equal(chatMsgs.length, 1, 'should only have the request message')
178+
179+
const [segment] = tx.trace.getChildren(tx.trace.root.id)
180+
181+
const requestMsg = chatMsgs[0]
182+
if (requestMsg[1].sequence === 0) {
183+
const expectedMsg = {
184+
appName: 'New Relic for Node.js tests',
185+
completion_id: /[a-f0-9]{36}/,
186+
content,
187+
id: `${chatCmplId}-0`,
188+
ingest_source: 'Node',
189+
is_response: false,
190+
request_id: '49dbbffbd3c3f4612aa48def69059calc',
191+
'response.model': model,
192+
role: 'user',
193+
sequence: 0,
194+
span_id: segment.id,
195+
trace_id: tx.traceId,
196+
vendor: 'openai',
197+
}
198+
match(requestMsg[1], expectedMsg, { assert })
199+
}
200+
201+
tx.end()
202+
end()
203+
})
204+
})
205+
158206
if (semver.gte(pkgVersion, '4.12.2')) {
159207
await t.test('should create span on successful chat completion stream create', (t, end) => {
160208
const { client, agent, host, port } = t.nr

test/versioned/openai/mock-chat-api-responses.js

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -299,3 +299,48 @@ responses.set('bad stream', {
299299
},
300300
streamData: 'do random'
301301
})
302+
303+
responses.set('What is 2 + 2?', {
304+
headers: {
305+
'Content-Type': 'application/json',
306+
'openai-model': 'gpt-4',
307+
'openai-organization': 'new-relic-nkmd8b',
308+
'openai-processing-ms': '1469',
309+
'openai-version': '2020-10-01',
310+
'x-ratelimit-limit-requests': '200',
311+
'x-ratelimit-limit-tokens': '40000',
312+
'x-ratelimit-remaining-requests': '199',
313+
'x-ratelimit-remaining-tokens': '39940',
314+
'x-ratelimit-reset-requests': '7m12s',
315+
'x-ratelimit-reset-tokens': '90ms',
316+
'x-request-id': '49dbbffbd3c3f4612aa48def69059calc'
317+
},
318+
code: 200,
319+
body: {
320+
choices: [
321+
{
322+
finish_reason: 'tool_calls',
323+
index: 0,
324+
message: {
325+
role: 'assistant',
326+
content: null,
327+
tool_calls: [
328+
{
329+
id: 'call_calc123',
330+
type: 'function',
331+
function: {
332+
name: 'calculator',
333+
arguments: '{"a":2,"b":2,"operation":"add"}'
334+
}
335+
}
336+
]
337+
}
338+
}
339+
],
340+
created: 1696888863,
341+
id: 'chatcmpl-87sb95K4EF2nuJRcTs43Tm9calc',
342+
model: 'gpt-4',
343+
object: 'chat.completion',
344+
usage: { completion_tokens: 15, prompt_tokens: 50, total_tokens: 65 }
345+
}
346+
})

0 commit comments

Comments
 (0)