Skip to content

Commit 0c29928

Browse files
authored
fix(anthropic,openai): handle structured output with v1 output version (#9212)
1 parent 1d95681 commit 0c29928

File tree

5 files changed

+73
-10
lines changed

5 files changed

+73
-10
lines changed

libs/langchain-core/src/language_models/chat_models.ts

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import {
1010
isAIMessageChunk,
1111
isBaseMessage,
1212
isAIMessage,
13+
MessageOutputVersion,
1314
} from "../messages/index.js";
1415
import {
1516
convertToOpenAIImageBlock,
@@ -113,7 +114,7 @@ export type BaseChatModelParams = BaseLanguageModelParams & {
113114
*
114115
* @default "v0"
115116
*/
116-
outputVersion?: "v0" | "v1";
117+
outputVersion?: MessageOutputVersion;
117118
};
118119

119120
/**
@@ -135,6 +136,22 @@ export type BaseChatModelCallOptions = BaseLanguageModelCallOptions & {
135136
* if used with an unsupported model.
136137
*/
137138
tool_choice?: ToolChoice;
139+
/**
140+
* Version of `AIMessage` output format to store in message content.
141+
*
142+
* `AIMessage.contentBlocks` will lazily parse the contents of `content` into a
143+
* standard format. This flag can be used to additionally store the standard format
144+
* as the message content, e.g., for serialization purposes.
145+
*
146+
* - "v0": provider-specific format in content (can lazily parse with `.contentBlocks`)
147+
* - "v1": standardized format in content (consistent with `.contentBlocks`)
148+
*
149+
* You can also set `LC_OUTPUT_VERSION` as an environment variable to "v1" to
150+
* enable this by default.
151+
*
152+
* @default "v0"
153+
*/
154+
outputVersion?: MessageOutputVersion;
138155
};
139156

140157
function _formatForTracing(messages: BaseMessage[]): BaseMessage[] {
@@ -202,7 +219,7 @@ export abstract class BaseChatModel<
202219

203220
disableStreaming = false;
204221

205-
outputVersion?: "v0" | "v1";
222+
outputVersion?: MessageOutputVersion;
206223

207224
constructor(fields: BaseChatModelParams) {
208225
super(fields);
@@ -308,6 +325,7 @@ export abstract class BaseChatModel<
308325
invocation_params: this?.invocationParams(callOptions),
309326
batch_size: 1,
310327
};
328+
const outputVersion = callOptions.outputVersion ?? this.outputVersion;
311329
const runManagers = await callbackManager_?.handleChatModelStart(
312330
this.toJSON(),
313331
[_formatForTracing(messages)],
@@ -335,7 +353,7 @@ export abstract class BaseChatModel<
335353
...chunk.generationInfo,
336354
...chunk.message.response_metadata,
337355
};
338-
if (this.outputVersion === "v1") {
356+
if (outputVersion === "v1") {
339357
yield castStandardMessageContent(
340358
chunk.message
341359
) as OutputMessageType;
@@ -440,6 +458,7 @@ export abstract class BaseChatModel<
440458
handledOptions.runName
441459
);
442460
}
461+
const outputVersion = parsedOptions.outputVersion ?? this.outputVersion;
443462
const generations: ChatGeneration[][] = [];
444463
const llmOutputs: LLMResult["llmOutput"][] = [];
445464
// Even if stream is not explicitly called, check if model is implicitly
@@ -508,7 +527,7 @@ export abstract class BaseChatModel<
508527
{ ...parsedOptions, promptIndex: i },
509528
runManagers?.[i]
510529
);
511-
if (this.outputVersion === "v1") {
530+
if (outputVersion === "v1") {
512531
for (const generation of generateResults.generations) {
513532
generation.message = castStandardMessageContent(
514533
generation.message
@@ -650,6 +669,7 @@ export abstract class BaseChatModel<
650669
);
651670

652671
// Handle results and call run managers
672+
const outputVersion = parsedOptions.outputVersion ?? this.outputVersion;
653673
const generations: Generation[][] = [];
654674
await Promise.all(
655675
cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
@@ -666,7 +686,7 @@ export abstract class BaseChatModel<
666686
output_tokens: 0,
667687
total_tokens: 0,
668688
};
669-
if (this.outputVersion === "v1") {
689+
if (outputVersion === "v1") {
670690
result.message = castStandardMessageContent(result.message);
671691
}
672692
}

libs/providers/langchain-anthropic/src/chat_models.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1224,6 +1224,7 @@ export class ChatAnthropicMessages<
12241224
console.warn(thinkingAdmonition);
12251225

12261226
llm = this.withConfig({
1227+
outputVersion: "v0",
12271228
tools,
12281229
ls_structured_output_format: {
12291230
kwargs: { method: "functionCalling" },
@@ -1241,6 +1242,7 @@ export class ChatAnthropicMessages<
12411242
llm = llm.pipe(raiseIfNoToolCalls);
12421243
} else {
12431244
llm = this.withConfig({
1245+
outputVersion: "v0",
12441246
tools,
12451247
tool_choice: {
12461248
type: "tool",

libs/providers/langchain-anthropic/src/tests/chat_models.int.test.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1519,3 +1519,15 @@ describe("Sonnet 4.5", () => {
15191519
expect(response.content.length).toBeGreaterThan(0);
15201520
});
15211521
});
1522+
1523+
it("won't modify structured output content if outputVersion is set", async () => {
1524+
const schema = z.object({ name: z.string() });
1525+
const model = new ChatAnthropic({
1526+
model: "claude-opus-4-1",
1527+
outputVersion: "v1",
1528+
});
1529+
const response = await model
1530+
.withStructuredOutput(schema)
1531+
.invoke("respond with the name 'John'");
1532+
expect(response.name).toBeDefined();
1533+
});

libs/providers/langchain-openai/src/chat_models.ts

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import {
3030
BaseChatModel,
3131
type LangSmithParams,
3232
type BaseChatModelParams,
33+
BaseChatModelCallOptions,
3334
} from "@langchain/core/language_models/chat_models";
3435
import {
3536
isOpenAITool as isOpenAIFunctionTool,
@@ -104,9 +105,11 @@ import {
104105
import {
105106
_convertMessagesToOpenAIParams,
106107
completionsApiContentBlockConverter,
107-
ResponsesInputItem,
108108
} from "./utils/message_inputs.js";
109-
import { _convertToResponsesMessageFromV1 } from "./utils/standard.js";
109+
import {
110+
_convertToResponsesMessageFromV1,
111+
ResponsesInputItem,
112+
} from "./utils/standard.js";
110113
import { iife, isReasoningModel, messageToOpenAIRole } from "./utils/misc.js";
111114

112115
const _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__";
@@ -123,7 +126,13 @@ export type { OpenAICallOptions, OpenAIChatInput };
123126

124127
export interface BaseChatOpenAICallOptions
125128
extends OpenAICallOptions,
129+
BaseChatModelCallOptions,
126130
BaseFunctionCallOptions {
131+
/**
132+
* Additional options to pass to the underlying axios request.
133+
*/
134+
options?: OpenAICoreRequestOptions;
135+
127136
/**
128137
* A list of tools that the model may use to generate responses.
129138
* Each tool can be a function, a built-in tool, or a custom tool definition.
@@ -464,9 +473,13 @@ export abstract class BaseChatOpenAI<
464473
constructor(fields?: BaseChatOpenAIFields) {
465474
super(fields ?? {});
466475

476+
const configApiKey =
477+
typeof fields?.configuration?.apiKey === "string"
478+
? fields?.configuration?.apiKey
479+
: undefined;
467480
this.apiKey =
468481
fields?.apiKey ??
469-
fields?.configuration?.apiKey ??
482+
configApiKey ??
470483
getEnvironmentVariable("OPENAI_API_KEY");
471484
this.organization =
472485
fields?.configuration?.organization ??
@@ -929,6 +942,7 @@ export abstract class BaseChatOpenAI<
929942
}
930943
const asJsonSchema = toJsonSchema(schema);
931944
llm = this.withConfig({
945+
outputVersion: "v0",
932946
response_format: { type: "json_object" },
933947
ls_structured_output_format: {
934948
kwargs: { method: "json_mode" },
@@ -944,6 +958,7 @@ export abstract class BaseChatOpenAI<
944958
};
945959
const asJsonSchema = toJsonSchema(openaiJsonSchemaParams.schema);
946960
llm = this.withConfig({
961+
outputVersion: "v0",
947962
response_format: {
948963
type: "json_schema",
949964
json_schema: openaiJsonSchemaParams,
@@ -976,6 +991,7 @@ export abstract class BaseChatOpenAI<
976991
if (isInteropZodSchema(schema)) {
977992
const asJsonSchema = toJsonSchema(schema);
978993
llm = this.withConfig({
994+
outputVersion: "v0",
979995
tools: [
980996
{
981997
type: "function" as const,
@@ -1023,6 +1039,7 @@ export abstract class BaseChatOpenAI<
10231039
}
10241040
const asJsonSchema = toJsonSchema(schema);
10251041
llm = this.withConfig({
1042+
outputVersion: "v0",
10261043
tools: [
10271044
{
10281045
type: "function" as const,
@@ -1944,7 +1961,7 @@ export class ChatOpenAIResponses<
19441961
reasoning.summary.length > 1
19451962
? reasoning.summary.reduce(
19461963
(acc, curr) => {
1947-
const last = acc.at(-1);
1964+
const last = acc[acc.length - 1];
19481965

19491966
if (last!.index === curr.index) {
19501967
last!.text += curr.text;

libs/providers/langchain-openai/src/tests/chat_models_responses.int.test.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { randomUUID } from "node:crypto";
22

33
import { z } from "zod/v3";
4-
import { test, expect } from "vitest";
4+
import { describe, it, test, expect } from "vitest";
55

66
import {
77
AIMessage,
@@ -934,3 +934,15 @@ describe("promptCacheKey", () => {
934934
).toBeGreaterThan(0);
935935
});
936936
});
937+
938+
it.only("won't modify structured output content if outputVersion is set", async () => {
939+
const schema = z.object({ name: z.string() });
940+
const model = new ChatOpenAI({
941+
model: "gpt-5",
942+
outputVersion: "v1",
943+
});
944+
const response = await model
945+
.withStructuredOutput(schema)
946+
.invoke("respond with the name 'John'");
947+
expect(response.name).toBeDefined();
948+
});

0 commit comments

Comments
 (0)