Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 35 additions & 122 deletions libs/providers/langchain-google-common/src/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,18 @@ import {
schemaToGeminiParameters,
} from "./utils/zod_to_gemini_parameters.js";

// ---------------------------------------------------------------------------
// Added helper: infer Gemini propertyOrdering from schema if absent
// ---------------------------------------------------------------------------

function withPropertyOrdering<T extends Record<string, any>>(schema: T): T {
const out = { ...schema } as T & { propertyOrdering?: string[] };
const props = (out as any)?.parameters?.properties ?? out.properties;
if (!out.propertyOrdering && props && typeof props === "object") {
out.propertyOrdering = Object.keys(props);
}
return out;
}
export class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<
BaseMessage[],
AuthOptions
Expand All @@ -89,9 +101,6 @@ export class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<
}

get computeUseSystemInstruction(): boolean {
// This works on models from April 2024 and later
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
// AI Studio: gemini-1.5-pro-latest
if (this.modelFamily === "palm") {
return false;
} else if (this.modelName === "gemini-1.0-pro-001") {
Expand All @@ -101,11 +110,8 @@ export class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<
} else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
return false;
} else if (this.modelName === "gemini-pro" && this.platform === "gai") {
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
return false;
} else if (this.modelFamily === "gemma") {
// At least as of 12 Mar 2025 gemma 3 on AIS, trying to use system instructions yields an error:
// "Developer instruction is not enabled for models/gemma-3-27b-it"
return false;
}
return true;
Expand Down Expand Up @@ -158,9 +164,6 @@ export class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<
}
}

/**
* Input to chat model class.
*/
export interface ChatGoogleBaseInput<AuthOptions>
extends BaseChatModelParams,
GoogleConnectionParams<AuthOptions>,
Expand All @@ -169,14 +172,10 @@ export interface ChatGoogleBaseInput<AuthOptions>
GoogleAIAPIParams,
Pick<GoogleAIBaseLanguageModelCallOptions, "streamUsage"> {}

/**
* Integration with a Google chat model.
*/
export abstract class ChatGoogleBase<AuthOptions>
extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions, AIMessageChunk>
implements ChatGoogleBaseInput<AuthOptions>
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "ChatGoogle";
}
Expand All @@ -188,58 +187,32 @@ export abstract class ChatGoogleBase<AuthOptions>
}

lc_serializable = true;

// Set based on modelName
model: string;

modelName = "gemini-pro";

temperature: number;

maxOutputTokens: number;

maxReasoningTokens: number;

topP: number;

topK: number;

seed: number;

presencePenalty: number;

frequencyPenalty: number;

stopSequences: string[] = [];

logprobs: boolean;

topLogprobs: number = 0;

safetySettings: GoogleAISafetySetting[] = [];

responseModalities?: GoogleAIModelModality[];

// May intentionally be undefined, meaning to compute this.
convertSystemMessageToHumanContent: boolean | undefined;

safetyHandler: GoogleAISafetyHandler;

speechConfig: GoogleSpeechConfig;

streamUsage = true;

streaming = false;

labels?: Record<string, string>;

protected connection: ChatConnection<AuthOptions>;

protected streamedConnection: ChatConnection<AuthOptions>;

constructor(fields?: ChatGoogleBaseInput<AuthOptions>) {
super(ensureParams(fields));

copyAndValidateModelParamsInto(fields, this);
this.safetyHandler =
fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
Expand Down Expand Up @@ -317,14 +290,10 @@ export abstract class ChatGoogleBase<AuthOptions>
return this.withConfig({ tools: convertToGeminiTools(tools), ...kwargs });
}

// Replace
_llmType() {
return "chat_integration";
}

/**
* Get the parameters used to invoke the model
*/
override invocationParams(options?: this["ParsedCallOptions"]) {
return copyAIModelParams(this, options);
}
Expand All @@ -341,12 +310,8 @@ export abstract class ChatGoogleBase<AuthOptions>
for await (const chunk of stream) {
finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk);
}
if (!finalChunk) {
throw new Error("No chunks were returned from the stream.");
}
return {
generations: [finalChunk],
};
if (!finalChunk) throw new Error("No chunks were returned from the stream.");
return { generations: [finalChunk] };
}

const response = await this.connection.request(
Expand All @@ -357,9 +322,7 @@ export abstract class ChatGoogleBase<AuthOptions>
);
const ret = this.connection.api.responseToChatResult(response);
const chunk = ret?.generations?.[0];
if (chunk) {
await runManager?.handleLLMNewToken(chunk.text || "");
}
if (chunk) await runManager?.handleLLMNewToken(chunk.text || "");
return ret;
}

Expand All @@ -368,28 +331,20 @@ export abstract class ChatGoogleBase<AuthOptions>
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
// Make the call as a streaming request
const parameters = this.invocationParams(options);
const response = await this.streamedConnection.request(
_messages,
parameters,
options,
runManager
);

// Get the streaming parser of the response
const stream = response.data as JsonStream;
let usageMetadata: UsageMetadata | undefined;
// Loop until the end of the stream
// During the loop, yield each time we get a chunk from the streaming parser
// that is either available or added to the queue
while (!stream.streamDone) {
const output = await stream.nextChunk();
await runManager?.handleCustomEvent(
`google-chunk-${this.constructor.name}`,
{
output,
}
{ output }
);
if (
output &&
Expand All @@ -416,63 +371,31 @@ export abstract class ChatGoogleBase<AuthOptions>
});
if (chunk) {
yield chunk;
await runManager?.handleLLMNewToken(
chunk.text ?? "",
undefined,
undefined,
undefined,
undefined,
{ chunk }
);
await runManager?.handleLLMNewToken(chunk.text ?? "", undefined, undefined, undefined, undefined, { chunk });
}
}
}

/** @ignore */
_combineLLMOutput() {
return [];
}

withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| InteropZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<false>
): Runnable<BaseLanguageModelInput, RunOutput>;
// ---------------------------------------------------------------------------
// Patched withStructuredOutput (adds Gemini propertyOrdering inference)
// ---------------------------------------------------------------------------

withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| InteropZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<true>
): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;

withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| InteropZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<boolean>
):
| Runnable<BaseLanguageModelInput, RunOutput>
| Runnable<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
> {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const schema: InteropZodType<RunOutput> | Record<string, any> =
outputSchema;
| Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>
{
const schema: InteropZodType<RunOutput> | Record<string, any> = outputSchema;
const name = config?.name;
const method = config?.method;
const includeRaw = config?.includeRaw;
Expand All @@ -483,16 +406,18 @@ export abstract class ChatGoogleBase<AuthOptions>
let functionName = name ?? "extract";
let outputParser: BaseLLMOutputParser<RunOutput>;
let tools: GeminiTool[];

if (isInteropZodSchema(schema)) {
const jsonSchema = schemaToGeminiParameters(schema);
const jsonSchemaWithOrdering = withPropertyOrdering(jsonSchema);
tools = [
{
functionDeclarations: [
{
name: functionName,
description:
jsonSchema.description ?? "A function available to call.",
parameters: jsonSchema as GeminiFunctionSchema,
jsonSchemaWithOrdering.description ?? "A function available to call.",
parameters: jsonSchemaWithOrdering as GeminiFunctionSchema,
},
],
},
Expand All @@ -512,24 +437,21 @@ export abstract class ChatGoogleBase<AuthOptions>
geminiFunctionDefinition = schema as GeminiFunctionDeclaration;
functionName = schema.name;
} else {
// We are providing the schema for *just* the parameters, probably
const parameters: GeminiJsonSchema = removeAdditionalProperties(schema);
let parameters: GeminiJsonSchema = removeAdditionalProperties(schema);
parameters = withPropertyOrdering(parameters);
geminiFunctionDefinition = {
name: functionName,
description: schema.description ?? "",
parameters,
};
}
tools = [
{
functionDeclarations: [geminiFunctionDefinition],
},
];
tools = [{ functionDeclarations: [geminiFunctionDefinition] }];
outputParser = new JsonOutputKeyToolsParser<RunOutput>({
returnSingle: true,
keyName: functionName,
});
}

const llm = this.bindTools(tools).withConfig({ tool_choice: functionName });

if (!includeRaw) {
Expand All @@ -539,25 +461,16 @@ export abstract class ChatGoogleBase<AuthOptions>
}

const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input: any, config) => outputParser.invoke(input.raw, config),
});
const parserNone = RunnablePassthrough.assign({
parsed: () => null,
});
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone],
});
const parserNone = RunnablePassthrough.assign({ parsed: () => null });
const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });
return RunnableSequence.from<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
>([
{
raw: llm,
},
parsedWithFallback,
]).withConfig({
>([{ raw: llm }, parsedWithFallback]).withConfig({
runName: "StructuredOutputRunnable",
});
}
}
export default ChatGoogleBase;