Skip to content

Commit 8359365

Browse files
eleapttnelea.petton
andauthored
Fix AI Endpoints tutorials (#97)
* :fix: tuto AI Endpoints chatbot memory python * :fix: tuto AI Endpoints audio summarizer * :fix: tuto AI Endpoints audio virtual assistant --------- Co-authored-by: elea.petton <[email protected]>
1 parent 05e59f7 commit 8359365

File tree

9 files changed

+29
-28
lines changed

9 files changed

+29
-28
lines changed

ai/ai-endpoints/audio-summarizer-assistant/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ This project illustrate how to use Automatic Speech Recognition (ASR) and Large
77
- create the `.env` file:
88
```
99
ASR_AI_ENDPOINT=https://nvr-asr-en-gb.endpoints.kepler.ai.cloud.ovh.net/api/v1/asr/recognize
10-
LLM_AI_ENDPOINT=https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1
10+
LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1
1111
OVH_AI_ENDPOINTS_ACCESS_TOKEN=<ai-endpoints-api-token>
1212
```
1313

ai/ai-endpoints/audio-summarizer-assistant/audio-summarizer-app.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def chat_completion(new_message):
7676
history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}]
7777
# return summary
7878
return client.chat.completions.create(
79-
model="Mixtral-8x22B-Instruct-v0.1",
79+
model="Mixtral-8x7B-Instruct-v0.1",
8080
messages=history_openai_format,
8181
temperature=0,
8282
max_tokens=1024
@@ -139,5 +139,4 @@ def chat_completion(new_message):
139139

140140
if __name__ == '__main__':
141141

142-
demo.launch(server_name="0.0.0.0", server_port=8000)
143-
142+
demo.launch(server_name="0.0.0.0", server_port=8000)
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
openai==1.13.3
2-
gradio==4.36.1
1+
openai==1.68.2
2+
gradio==4.44.1
33
pydub==0.25.1
44
python-dotenv==1.0.1

ai/ai-endpoints/audio-virtual-assistant/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@ This project illustrate how to put Automatic Speech Recognition (ASR), Large Lan
88
```
99
ASR_GRPC_ENDPOINT=nvr-asr-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443
1010
TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443
11-
LLM_AI_ENDPOINT=https://mixtral-8x22b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN=<ai-endpoints-api-token>
11+
LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1
12+
OVH_AI_ENDPOINTS_ACCESS_TOKEN=<ai-endpoints-api-token>
1213
```
1314

1415
- launch the Gradio app: `python audio-virtual-assistant-app.py`

ai/ai-endpoints/audio-virtual-assistant/audio-virtual-assistant-app.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,10 @@ def tts_synthesis(response):
4444
# set up config
4545
sample_rate_hz = 48000
4646
req = {
47-
"language_code" : "en-US", #
48-
languages: en-US
47+
"language_code" : "en-US", # languages: en-US
4948
"encoding" : riva.client.AudioEncoding.LINEAR_PCM ,
50-
"sample_rate_hz" : sample_rate_hz, #
51-
sample rate: 48KHz audio
52-
"voice_name" : "English-US.Female-1" #
53-
voices: `English-US.Female-1`, `English-US.Male-1`
49+
"sample_rate_hz" : sample_rate_hz, # sample rate: 48KHz audio
50+
"voice_name" : "English-US.Female-1" # voices: `English-US.Female-1`, `English-US.Male-1`
5451
}
5552

5653
# return response
@@ -95,7 +92,7 @@ def tts_synthesis(response):
9592
prompt, "avatar":"👤"})
9693
messages.chat_message("user", avatar="👤").write(prompt)
9794
response = client.chat.completions.create(
98-
model="Mixtral-8x22B-Instruct-v0.1",
95+
model="Mixtral-8x7B-Instruct-v0.1",
9996
messages=st.session_state.messages,
10097
temperature=0,
10198
max_tokens=1024,
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
openai==1.13.3
2-
streamlit==1.36.0
3-
streamlit-mic-recorder==1.16.0
4-
nvidia-riva-client==2.15.1
1+
openai==1.68.2
2+
streamlit==1.42.0
3+
streamlit-mic-recorder==0.0.8
4+
nvidia-riva-client==2.18.0
55
python-dotenv==1.0.1

ai/ai-endpoints/python-langchain-conversational-memory/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ This project illustrate how to implement conversational memory and enable your c
66

77
- create the `.env` file:
88
```
9-
LLM_AI_ENDPOINT=https://mistral-7b-instruct-v02.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1
9+
LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1
1010
OVH_AI_ENDPOINTS_ACCESS_TOKEN=<ai-endpoints-api-token>
1111
```
1212

ai/ai-endpoints/python-langchain-conversational-memory/chatbot-memory-langchain.ipynb

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,13 @@
3535
{
3636
"cell_type": "raw",
3737
"id": "b66ba6c3-0fa0-4188-8ab7-37dd08879e91",
38-
"metadata": {},
38+
"metadata": {
39+
"vscode": {
40+
"languageId": "raw"
41+
}
42+
},
3943
"source": [
40-
"LLM_AI_ENDPOINT=https://mistral-7b-instruct-v02.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1\n",
44+
"LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1\n",
4145
"OVH_AI_ENDPOINTS_ACCESS_TOKEN=<ai-endpoints-api-token>"
4246
]
4347
},
@@ -124,9 +128,9 @@
124128
"source": [
125129
"# Set up the LLM\n",
126130
"llm = ChatOpenAI(\n",
127-
" model=\"Mistral-7B-Instruct-v0.2\", \n",
128-
" api_key=ai_endpoint_token,\n",
129-
" base_url=ai_endpoint_mistral7b, \n",
131+
" model_name=\"Mistral-7B-Instruct-v0.3\", \n",
132+
" openai_api_key=ai_endpoint_token,\n",
133+
" openai_api_base=ai_endpoint_mistral7b, \n",
130134
" max_tokens=512,\n",
131135
" temperature=0.0\n",
132136
")\n",
@@ -186,9 +190,9 @@
186190
"source": [
187191
"# Set up the LLM\n",
188192
"llm = ChatOpenAI(\n",
189-
" model=\"Mistral-7B-Instruct-v0.2\", \n",
190-
" api_key=ai_endpoint_token,\n",
191-
" base_url=ai_endpoint_mistral7b, \n",
193+
" model_name=\"Mistral-7B-Instruct-v0.3\", \n",
194+
" openai_api_key=ai_endpoint_token,\n",
195+
" openai_api_base=ai_endpoint_mistral7b,\n",
192196
" max_tokens=512,\n",
193197
" temperature=0.0\n",
194198
")\n",
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
python-dotenv==1.0.1
22
langchain_openai==0.1.14
3-
openai==1.35.1
3+
openai==1.68.2

0 commit comments

Comments
 (0)