Skip to content

Commit 9a43f57

Browse files
authored
Merge pull request #98 from ovh/update-models-on-tutos
fix: 🐛 made the models choices more generic
2 parents 8359365 + 26d0298 commit 9a43f57

File tree

11 files changed

+66
-43
lines changed

11 files changed

+66
-43
lines changed

ai/ai-endpoints/java-langchain4j-chatbot/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
<maven.compiler.source>21</maven.compiler.source>
1919
<maven.compiler.target>21</maven.compiler.target>
2020
<maven.compiler.release>21</maven.compiler.release>
21-
<langchain4j.version>0.33.0</langchain4j.version>
21+
<langchain4j.version>0.36.2</langchain4j.version>
2222
</properties>
2323

2424
<dependencies>

ai/ai-endpoints/java-langchain4j-chatbot/src/main/java/com/ovhcloud/examples/aiendpoints/BlockingChatbot.java

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,17 @@
66

77
public class BlockingChatbot {
88
private static final Logger _LOG = LoggerFactory.getLogger(BlockingChatbot.class);
9-
private static final String OVHCLOUD_API_KEY = System.getenv("OVHCLOUD_API_KEY");
10-
9+
private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN");
10+
private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME");
11+
private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL");
1112

1213
public static void main(String[] args) {
1314

1415

1516
MistralAiChatModel aiChatModel = MistralAiChatModel.builder()
16-
.apiKey(OVHCLOUD_API_KEY)
17-
.modelName("Mixtral-8x22B-Instruct-v0.1")
18-
.baseUrl("https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/")
17+
.apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN)
18+
.modelName(OVH_AI_ENDPOINTS_MODEL_NAME)
19+
.baseUrl(OVH_AI_ENDPOINTS_MODEL_URL)
1920
.maxTokens(1500)
2021
.build();
2122

ai/ai-endpoints/java-langchain4j-chatbot/src/main/java/com/ovhcloud/examples/aiendpoints/MemoryStreamingChatbot.java

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,21 @@
1111
public class MemoryStreamingChatbot {
1212

1313
private static final Logger _LOG = LoggerFactory.getLogger(MemoryStreamingChatbot.class);
14-
private static final String OVHCLOUD_API_KEY = System.getenv("OVHCLOUD_API_KEY");
14+
private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN");
15+
private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME");
16+
private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL");
17+
1518

1619
interface Assistant {
1720
TokenStream chat(String message);
1821
}
1922

2023
public static void main(String[] args) {
2124
MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder()
22-
.apiKey(OVHCLOUD_API_KEY)
23-
.modelName("Mixtral-8x22B-Instruct-v0.1")
25+
.apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN)
26+
.modelName(OVH_AI_ENDPOINTS_MODEL_NAME)
2427
.baseUrl(
25-
"https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/")
28+
OVH_AI_ENDPOINTS_MODEL_URL)
2629
.maxTokens(1500)
2730
.build();
2831

ai/ai-endpoints/java-langchain4j-chatbot/src/main/java/com/ovhcloud/examples/aiendpoints/RAGStreamingChatbot.java

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,14 @@
2525

2626
public class RAGStreamingChatbot {
2727
private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class);
28-
private static final String OVHCLOUD_API_KEY = System.getenv("OVHCLOUD_API_KEY");
2928
private static final String DATABASE_HOST = System.getenv("DATABASE_HOST");
3029
private static final String DATABASE_USER = System.getenv("DATABASE_USER");
3130
private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD");
31+
private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN");
32+
private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME");
33+
private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL");
34+
private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL");
35+
3236

3337
interface Assistant {
3438
TokenStream chat(String userMessage);
@@ -47,7 +51,10 @@ public static void main(String[] args) throws Exception {
4751
List<TextSegment> segments = splitter.split(document);
4852

4953
// Do the embeddings and store them in an embedding store
50-
EmbeddingModel embeddingModel = OvhAiEmbeddingModel.withApiKey(OVHCLOUD_API_KEY);
54+
EmbeddingModel embeddingModel = OvhAiEmbeddingModel.builder()
55+
.apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN)
56+
.baseUrl(OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL)
57+
.build();
5158
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
5259

5360
EmbeddingStore<TextSegment> embeddingStore = PgVectorEmbeddingStore.builder()
@@ -72,10 +79,10 @@ public static void main(String[] args) throws Exception {
7279
.build();
7380

7481
MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder()
75-
.apiKey(OVHCLOUD_API_KEY)
76-
.modelName("Mistral-7B-Instruct-v0.2")
82+
.apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN)
83+
.modelName(OVH_AI_ENDPOINTS_MODEL_NAME)
7784
.baseUrl(
78-
"https://mistral-7b-instruct-v02.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1")
85+
OVH_AI_ENDPOINTS_MODEL_URL)
7986
.maxTokens(512)
8087
.build();
8188

ai/ai-endpoints/java-langchain4j-chatbot/src/main/java/com/ovhcloud/examples/aiendpoints/StreamingChatbot.java

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,18 @@
1111
public class StreamingChatbot {
1212

1313
private static final Logger _LOG = LoggerFactory.getLogger(StreamingChatbot.class);
14-
private static final String OVHCLOUD_API_KEY = System.getenv("OVHCLOUD_API_KEY");
14+
private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN");
15+
private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME");
16+
private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL");
17+
1518

1619
public static void main(String[] args) {
1720
MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder()
18-
.apiKey(OVHCLOUD_API_KEY).modelName("Mixtral-8x22B-Instruct-v0.1")
19-
.baseUrl(
20-
"https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/")
21-
.maxTokens(1500).build();
21+
.apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN)
22+
.modelName(OVH_AI_ENDPOINTS_MODEL_NAME)
23+
.baseUrl(OVH_AI_ENDPOINTS_MODEL_URL)
24+
.maxTokens(1500)
25+
.build();
2226

2327
_LOG.info("🤖: ");
2428

ai/ai-endpoints/js-langchain-chatbot/chatbot-streaming.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@ async function chatCompletion(question) {
1515

1616
// Use Mixtral-8x22B as LLM
1717
const model = new ChatMistralAI({
18-
modelName: "Mixtral-8x22B-Instruct-v0.1",
19-
model: "Mixtral-8x22B-Instruct-v0.1",
20-
apiKey: "None",
21-
endpoint: "https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/",
18+
modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME,
19+
model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME,
20+
apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN,
21+
endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL,
2222
maxTokens: 1500,
2323
streaming: true,
2424
verbose: false,

ai/ai-endpoints/js-langchain-chatbot/chatbot.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ async function chatCompletion(question) {
1414

1515
// Use Mixtral-8x22B as LLM
1616
const model = new ChatMistralAI({
17-
modelName: "Mixtral-8x22B-Instruct-v0.1",
18-
model: "Mixtral-8x22B-Instruct-v0.1",
19-
apiKey: "None",
20-
endpoint: "https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/",
17+
modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME,
18+
model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME,
19+
apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN,
20+
endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL,
2121
maxTokens: 1500,
2222
streaming: false,
2323
verbose: false,

ai/ai-endpoints/python-langchain-chatbot/chat-bot-streaming-rag.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,12 @@
1414
from langchain_core.output_parsers import StrOutputParser
1515
from langchain_core.runnables import RunnablePassthrough
1616

17-
## Set the OVHcloud AI Endpoints token to use models
18-
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_TOKEN')
17+
## Set the OVHcloud AI Endpoints configurations
18+
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN')
19+
_OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME')
20+
_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL')
21+
_OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME')
22+
1923

2024

2125
from langchain_text_splitters import RecursiveCharacterTextSplitter
@@ -25,9 +29,9 @@
2529
# The function print the LLM answer.
2630
def chat_completion(new_message: str):
2731
# no need to use a token
28-
model = ChatMistralAI(model="Mixtral-8x22B-Instruct-v0.1",
29-
api_key="foo",
30-
endpoint='https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1',
32+
model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME,
33+
api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN,
34+
endpoint=_OVH_AI_ENDPOINTS_MODEL_URL,
3135
max_tokens=1500,
3236
streaming=True)
3337

@@ -42,7 +46,7 @@ def chat_completion(new_message: str):
4246
# Split documents into chunks and vectorize them
4347
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
4448
splits = text_splitter.split_documents(docs)
45-
vectorstore = Chroma.from_documents(documents=splits, embedding=OVHCloudEmbeddings(model_name="multilingual-e5-base", access_token=_OVH_AI_ENDPOINTS_ACCESS_TOKEN))
49+
vectorstore = Chroma.from_documents(documents=splits, embedding=OVHCloudEmbeddings(model_name=_OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME, access_token=_OVH_AI_ENDPOINTS_ACCESS_TOKEN))
4650

4751
prompt = hub.pull("rlm/rag-prompt")
4852

ai/ai-endpoints/python-langchain-chatbot/chat-bot-streaming.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,18 @@
66
from langchain_core.prompts import ChatPromptTemplate
77

88
## Set the OVHcloud AI Endpoints token to use models
9-
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_TOKEN')
9+
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN')
10+
_OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME')
11+
_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL')
1012

1113
# Function in charge to call the LLM model.
1214
# Question parameter is the user's question.
1315
# The function print the LLM answer.
1416
def chat_completion(new_message: str):
1517
# no need to use a token
16-
model = ChatMistralAI(model="Mixtral-8x22B-Instruct-v0.1",
18+
model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME,
1719
api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN,
18-
endpoint='https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1',
20+
endpoint=_OVH_AI_ENDPOINTS_MODEL_URL,
1921
max_tokens=1500,
2022
streaming=True)
2123

ai/ai-endpoints/python-langchain-chatbot/chat-bot.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,18 @@
55
from langchain_core.prompts import ChatPromptTemplate
66

77
## Set the OVHcloud AI Endpoints token to use models
8-
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_TOKEN')
8+
_OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN')
9+
_OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME')
10+
_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL')
911

1012
# Function in charge to call the LLM model.
1113
# Question parameter is the user's question.
1214
# The function print the LLM answer.
1315
def chat_completion(question: str):
1416
# no need to use a token
15-
model = ChatMistralAI(model="Mixtral-8x22B-Instruct-v0.1",
17+
model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME,
1618
api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN,
17-
endpoint='https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1',
19+
endpoint=_OVH_AI_ENDPOINTS_MODEL_URL,
1820
max_tokens=1500)
1921

2022
prompt = ChatPromptTemplate.from_messages([

0 commit comments

Comments
 (0)