Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import { ChatOVHCloudAIEndpoints } from "@langchain/community/chat_models/ovhcloud";

const model = new ChatOVHCloudAIEndpoints({
// In Node.js defaults to process.env.OVHCLOUD_AI_ENDPOINTS_API_KEY
apiKey: "your-api-key",
model: "your-model-name",
});
15 changes: 15 additions & 0 deletions examples/src/langchain-classic/models/embeddings/ovhcloud.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import { OVHcloudAIEndpointsEmbeddings } from "@langchain/community/embeddings/ovhcloud";

/* Embed queries */
const ovhcloudEmbeddings = new OVHcloudAIEndpointsEmbeddings();
const res = await ovhcloudEmbeddings.embedQuery("Hello world");

console.log(res);

/* Embed documents */
const documentRes = await ovhcloudEmbeddings.embedDocuments([
"Hello world",
"Bye bye",
]);

console.log(documentRes);
11 changes: 11 additions & 0 deletions libs/langchain-classic/src/chat_models/tests/universal.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -577,6 +577,17 @@ describe("Works with all model providers", () => {
expect(perplexityResult).toBeDefined();
expect(perplexityResult.content.length).toBeGreaterThan(0);
});

it("Can invoke ovhcloud", async () => {
const ovhcloud = await initChatModel(undefined, {
modelProvider: "ovhcloud",
temperature: 0,
});

const ovhcloudResult = await ovhcloud.invoke("what's your name");
expect(ovhcloudResult).toBeDefined();
expect(ovhcloudResult.content.length).toBeGreaterThan(0);
});
});

test("Is compatible with agents", async () => {
Expand Down
5 changes: 5 additions & 0 deletions libs/langchain-classic/src/chat_models/universal.ts
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,11 @@ export const MODEL_PROVIDER_CONFIG = {
className: "ChatPerplexity",
hasCircularDependency: true,
},
ovhcloud: {
package: "@langchain/community/chat_models/ovhcloud",
className: "ChatOVHCloudAIEndpoints",
hasCircularDependency: true,
},
} as const;

const SUPPORTED_PROVIDERS = Object.keys(
Expand Down
2 changes: 2 additions & 0 deletions libs/langchain-classic/src/hub/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ export function generateModelImportMap(
importMapKey = "chat_models__fireworks";
} else if (modelLcName === "ChatGroq") {
importMapKey = "chat_models__groq";
} else if (modelLcName === "ChatOVHCloudAIEndpoints") {
importMapKey = "chat_models__ovhcloud";
} else {
throw new Error("Received unsupported model class when pulling prompt.");
}
Expand Down
8 changes: 8 additions & 0 deletions libs/langchain-community/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,10 @@ embeddings/ollama.cjs
embeddings/ollama.js
embeddings/ollama.d.ts
embeddings/ollama.d.cts
embeddings/ovhcloud.cjs
embeddings/ovhcloud.js
embeddings/ovhcloud.d.ts
embeddings/ovhcloud.d.cts
embeddings/premai.cjs
embeddings/premai.js
embeddings/premai.d.ts
Expand Down Expand Up @@ -598,6 +602,10 @@ chat_models/ollama.cjs
chat_models/ollama.js
chat_models/ollama.d.ts
chat_models/ollama.d.cts
chat_models/ovhcloud.cjs
chat_models/ovhcloud.js
chat_models/ovhcloud.d.ts
chat_models/ovhcloud.d.cts
chat_models/perplexity.cjs
chat_models/perplexity.js
chat_models/perplexity.d.ts
Expand Down
22 changes: 22 additions & 0 deletions libs/langchain-community/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -1184,6 +1184,17 @@
"default": "./dist/embeddings/minimax.cjs"
}
},
"./embeddings/ovhcloud": {
"input": "./src/embeddings/ovhcloud.ts",
"import": {
"types": "./dist/embeddings/ovhcloud.d.ts",
"default": "./dist/embeddings/ovhcloud.js"
},
"require": {
"types": "./dist/embeddings/ovhcloud.d.cts",
"default": "./dist/embeddings/ovhcloud.cjs"
}
},
"./embeddings/premai": {
"input": "./src/embeddings/premai.ts",
"import": {
Expand Down Expand Up @@ -2086,6 +2097,17 @@
"default": "./dist/chat_models/novita.cjs"
}
},
"./chat_models/ovhcloud": {
"input": "./src/chat_models/ovhcloud.ts",
"import": {
"types": "./dist/chat_models/ovhcloud.d.ts",
"default": "./dist/chat_models/ovhcloud.js"
},
"require": {
"types": "./dist/chat_models/ovhcloud.d.cts",
"default": "./dist/chat_models/ovhcloud.cjs"
}
},
"./chat_models/perplexity": {
"input": "./src/chat_models/perplexity.ts",
"import": {
Expand Down
158 changes: 158 additions & 0 deletions libs/langchain-community/src/chat_models/ovhcloud.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
import type {
BaseChatModelParams,
LangSmithParams,
} from "@langchain/core/language_models/chat_models";
import {
type OpenAIClient,
type ChatOpenAICallOptions,
type OpenAIChatInput,
type OpenAICoreRequestOptions,
ChatOpenAICompletions,
} from "@langchain/openai";

import { getEnvironmentVariable } from "@langchain/core/utils/env";

type OVHCloudUnsupportedArgs =
| "frequencyPenalty"
| "presencePenalty"
| "logitBias"
| "functions";

type OVHCloudUnsupportedCallOptions = "functions" | "function_call";

export type ChatOVHCloudAIEndpointsCallOptions = Partial<
Omit<ChatOpenAICallOptions, OVHCloudUnsupportedCallOptions>
>;

export interface ChatOVHCloudAIEndpointsInput
extends Omit<OpenAIChatInput, "openAIApiKey" | OVHCloudUnsupportedArgs>,
BaseChatModelParams {
/**
* The OVHcloud API key to use for requests.
* @default process.env.OVHCLOUD_AI_ENDPOINTS_API_KEY
*/
apiKey?: string;
}

/**
* OVHcloud AI Endpoints chat model integration.
*
* OVHcloud AI Endpoints is compatible with the OpenAI API.
* Base URL: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1
*
* Setup:
* Install `@langchain/community` and set an environment variable named `OVHCLOUD_AI_ENDPOINTS_API_KEY`.
* If no API key is provided, the model can still be used but with a rate limit.
*
* ```bash
* npm install @langchain/community
* export OVHCLOUD_AI_ENDPOINTS_API_KEY="your-api-key"
* ```
*
* ## Constructor args
*
* ## Runtime args
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`, `.stream`, etc.
*/
export class ChatOVHCloudAIEndpoints extends ChatOpenAICompletions<ChatOVHCloudAIEndpointsCallOptions> {
static lc_name() {
return "ChatOVHCloudAIEndpoints";
}

_llmType() {
return "ovhcloud";
}

get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "OVHCLOUD_AI_ENDPOINTS_API_KEY",
};
}

lc_serializable = true;

constructor(
fields?: Partial<
Omit<OpenAIChatInput, "openAIApiKey" | OVHCloudUnsupportedArgs>
> &
BaseChatModelParams & {
/**
* The OVHcloud AI Endpoints API key to use.
*/
apiKey?: string;
}
) {
const apiKey =
fields?.apiKey || getEnvironmentVariable("OVHCLOUD_AI_ENDPOINTS_API_KEY");

if (!apiKey) {
console.warn(
"OVHcloud AI Endpoints API key not found. You can use the model but with a rate limit. " +
"Set the OVHCLOUD_AI_ENDPOINTS_API_KEY environment variable or provide the key via 'apiKey' for unlimited access."
);
}

super({
...fields,
apiKey: apiKey || "",
configuration: {
baseURL: "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1",
},
});
}

// eslint-disable-next-line @typescript-eslint/no-explicit-any
getLsParams(options: any): LangSmithParams {
const params = super.getLsParams(options);
params.ls_provider = "ovhcloud";
return params;
}

toJSON() {
const result = super.toJSON();

if (
"kwargs" in result &&
typeof result.kwargs === "object" &&
result.kwargs != null
) {
delete result.kwargs.openai_api_key;
delete result.kwargs.configuration;
}

return result;
}

async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;

async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;

async completionWithRetry(
request:
| OpenAIClient.Chat.ChatCompletionCreateParamsStreaming
| OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<
| AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>
| OpenAIClient.Chat.Completions.ChatCompletion
> {
// Remove arguments not supported by OVHcloud AI Endpoints endpoint
delete request.frequency_penalty;
delete request.presence_penalty;
delete request.logit_bias;
delete request.functions;

if (request.stream === true) {
return super.completionWithRetry(request, options);
}

return super.completionWithRetry(request, options);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import { describe, test, expect } from "@jest/globals";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
import { z } from "zod/v3";

import { ChatOVHCloudAIEndpoints } from "../ovhcloud.js";

const model = "gpt-oss-120b";

describe("ChatOVHCloudAIEndpoints", () => {
test("should call ChatOVHCloudAIEndpoints", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
});
const message = new HumanMessage("What is the capital of France?");
const response = await chat.invoke([message], {});
expect(response.content.length).toBeGreaterThan(10);
});

test("aggregated response using streaming", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
streaming: true,
});
const message = new HumanMessage("What is the capital of France?");
const response = await chat.invoke([message], {});
expect(response.content.length).toBeGreaterThan(10);
});

test("use invoke", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
});
const response = await chat.invoke("What is the capital of France?");
expect(response.content.length).toBeGreaterThan(10);
});

test("should handle streaming", async () => {
const chat = new ChatOVHCloudAIEndpoints({
streaming: true,
model,
});
const message = new HumanMessage("What is the capital of France?");
const stream = await chat.stream([message], {});
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
expect(chunks.map((c) => c.content).join("")).toContain("Paris");
});

test("should handle system messages", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
});
const messages = [
new SystemMessage("You are a geography expert."),
new HumanMessage("What is the capital of France?"),
];
const response = await chat.invoke(messages);
expect(response.content.length).toBeGreaterThan(10);
});

test("structured output", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
}).withStructuredOutput(
z.object({
capital: z.string(),
country: z.string(),
})
);
const messages = [
new SystemMessage("You are a geography expert."),
new HumanMessage("What is the capital of France? Return JSON."),
];
const response = await chat.invoke(messages);
expect(response.capital).toBe("Paris");
expect(response.country).toBe("France");
});

test("reasoning model with structured output", async () => {
const chat = new ChatOVHCloudAIEndpoints({
model,
}).withStructuredOutput(
z.object({
capital: z.string(),
country: z.string(),
}),
{
name: "reasoning_response",
}
);

const messages = [
new SystemMessage("You are a geography expert."),
new HumanMessage("What is the capital of France? Return JSON."),
];
const response = await chat.invoke(messages);
expect(response.capital).toBe("Paris");
expect(response.country).toBe("France");
});
});
Loading