Skip to content

Commit ded675a

Browse files
chore(openai): add more accurate debug logging (#48)
1 parent 426ad73 commit ded675a

File tree

3 files changed

+58
-68
lines changed

3 files changed

+58
-68
lines changed

.changeset/clever-eels-lay.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@openai/agents-openai': patch
3+
---
4+
5+
chore(openai): add more accurate debug logging

packages/agents-openai/src/openaiChatCompletionsModel.ts

Lines changed: 29 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -245,20 +245,6 @@ export class OpenAIChatCompletionsModel implements Model {
245245
parallelToolCalls = request.modelSettings.parallelToolCalls;
246246
}
247247

248-
if (logger.dontLogModelData) {
249-
logger.debug('Calling LLM');
250-
} else {
251-
logger.debug(
252-
[
253-
`Calling LLM ${this.#model} with input:`,
254-
JSON.stringify(request.input, null, 2),
255-
`Tools: ${JSON.stringify(tools, null, 2)}`,
256-
`Stream: ${stream}`,
257-
`Response format: ${JSON.stringify(responseFormat, null, 2)}`,
258-
].join('\n'),
259-
);
260-
}
261-
262248
const messages = itemsToMessages(request.input);
263249
if (request.systemInstructions) {
264250
messages.unshift({
@@ -271,28 +257,35 @@ export class OpenAIChatCompletionsModel implements Model {
271257
span.spanData.input = messages;
272258
}
273259

274-
const completion = await this.#client.chat.completions.create(
275-
{
276-
model: this.#model,
277-
messages,
278-
tools,
279-
temperature: request.modelSettings.temperature,
280-
top_p: request.modelSettings.topP,
281-
frequency_penalty: request.modelSettings.frequencyPenalty,
282-
presence_penalty: request.modelSettings.presencePenalty,
283-
max_tokens: request.modelSettings.maxTokens,
284-
tool_choice: convertToolChoice(request.modelSettings.toolChoice),
285-
response_format: responseFormat,
286-
parallel_tool_calls: parallelToolCalls,
287-
stream,
288-
store: request.modelSettings.store,
289-
...request.modelSettings.providerData,
290-
},
291-
{
292-
headers: HEADERS,
293-
signal: request.signal,
294-
},
295-
);
260+
const requestData = {
261+
model: this.#model,
262+
messages,
263+
tools,
264+
temperature: request.modelSettings.temperature,
265+
top_p: request.modelSettings.topP,
266+
frequency_penalty: request.modelSettings.frequencyPenalty,
267+
presence_penalty: request.modelSettings.presencePenalty,
268+
max_tokens: request.modelSettings.maxTokens,
269+
tool_choice: convertToolChoice(request.modelSettings.toolChoice),
270+
response_format: responseFormat,
271+
parallel_tool_calls: parallelToolCalls,
272+
stream,
273+
store: request.modelSettings.store,
274+
...request.modelSettings.providerData,
275+
};
276+
277+
if (logger.dontLogModelData) {
278+
logger.debug('Calling LLM');
279+
} else {
280+
logger.debug(
281+
`Calling LLM. Request data: ${JSON.stringify(requestData, null, 2)}`,
282+
);
283+
}
284+
285+
const completion = await this.#client.chat.completions.create(requestData, {
286+
headers: HEADERS,
287+
signal: request.signal,
288+
});
296289

297290
if (logger.dontLogModelData) {
298291
logger.debug('Response received');

packages/agents-openai/src/openaiResponsesModel.ts

Lines changed: 24 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -640,45 +640,37 @@ export class OpenAIResponsesModel implements Model {
640640
parallelToolCalls = request.modelSettings.parallelToolCalls;
641641
}
642642

643+
const requestData = {
644+
instructions: request.systemInstructions,
645+
model: this.#model,
646+
input,
647+
include,
648+
tools,
649+
previous_response_id: request.previousResponseId,
650+
temperature: request.modelSettings.temperature,
651+
top_p: request.modelSettings.topP,
652+
truncation: request.modelSettings.truncation,
653+
max_output_tokens: request.modelSettings.maxTokens,
654+
tool_choice: toolChoice as ToolChoiceOptions,
655+
parallel_tool_calls: parallelToolCalls,
656+
stream,
657+
text: responseFormat,
658+
store: request.modelSettings.store,
659+
...request.modelSettings.providerData,
660+
};
661+
643662
if (logger.dontLogModelData) {
644663
logger.debug('Calling LLM');
645664
} else {
646665
logger.debug(
647-
[
648-
`Calling LLM ${this.#model} with input:`,
649-
JSON.stringify(request.input, null, 2),
650-
`Tools: ${JSON.stringify(tools, null, 2)}`,
651-
`Stream: ${stream}`,
652-
`Tool choice: ${toolChoice}`,
653-
`Response format: ${JSON.stringify(responseFormat, null, 2)}`,
654-
].join('\n'),
666+
`Calling LLM. Request data: ${JSON.stringify(requestData, null, 2)}`,
655667
);
656668
}
657669

658-
const response = await this.#client.responses.create(
659-
{
660-
instructions: request.systemInstructions,
661-
model: this.#model,
662-
input,
663-
include,
664-
tools,
665-
previous_response_id: request.previousResponseId,
666-
temperature: request.modelSettings.temperature,
667-
top_p: request.modelSettings.topP,
668-
truncation: request.modelSettings.truncation,
669-
max_output_tokens: request.modelSettings.maxTokens,
670-
tool_choice: toolChoice as ToolChoiceOptions,
671-
parallel_tool_calls: parallelToolCalls,
672-
stream,
673-
text: responseFormat,
674-
store: request.modelSettings.store,
675-
...request.modelSettings.providerData,
676-
},
677-
{
678-
headers: HEADERS,
679-
signal: request.signal,
680-
},
681-
);
670+
const response = await this.#client.responses.create(requestData, {
671+
headers: HEADERS,
672+
signal: request.signal,
673+
});
682674

683675
if (logger.dontLogModelData) {
684676
logger.debug('Response received');

0 commit comments

Comments
 (0)