|
26 | 26 | "\n", |
27 | 27 | " 1. Create an [Azure subscription](https://azure.microsoft.com).\n", |
28 | 28 | " 2. Create an Azure AI hub resource as explained at [How to create and manage an Azure AI Studio hub](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-azure-ai-resource).\n", |
29 | | - " 3. Deploy one model supporting the [Azure AI model inference API](https://aka.ms/azureai/modelinference). In this example we use a `Mistral-Large-2407` and a `Mistral-Small` deployment. \n", |
| 29 | + " 3. Deploy one model supporting the [Azure AI model inference API](https://aka.ms/azureai/modelinference). In this example we use a `mistral-medium-2505` and a `Mistral-Small` deployment. \n", |
30 | 30 | "\n", |
31 | 31 | " * You can follow the instructions at [Add and configure models to Azure AI model inference service](https://learn.microsoft.com/azure/ai-studio/ai-services/how-to/create-model-deployments)." |
32 | 32 | ] |
|
54 | 54 | { |
55 | 55 | "cell_type": "code", |
56 | 56 | "execution_count": null, |
57 | | - "metadata": {}, |
| 57 | + "metadata": { |
| 58 | + "name": "create_client" |
| 59 | + }, |
58 | 60 | "outputs": [], |
59 | 61 | "source": [ |
60 | 62 | "import os\n", |
|
63 | 65 | "model = AzureAIChatCompletionsModel(\n", |
64 | 66 | " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", |
65 | 67 | " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", |
66 | | - " model=\"mistral-large-2407\",\n", |
| 68 | + " model=\"mistral-medium-2505\",\n", |
67 | 69 | ")" |
68 | 70 | ] |
69 | 71 | }, |
|
84 | 86 | { |
85 | 87 | "cell_type": "code", |
86 | 88 | "execution_count": null, |
87 | | - "metadata": {}, |
| 89 | + "metadata": { |
| 90 | + "name": "human_message" |
| 91 | + }, |
88 | 92 | "outputs": [], |
89 | 93 | "source": [ |
90 | 94 | "from langchain_core.messages import HumanMessage, SystemMessage\n", |
|
181 | 185 | { |
182 | 186 | "cell_type": "code", |
183 | 187 | "execution_count": null, |
184 | | - "metadata": {}, |
| 188 | + "metadata": { |
| 189 | + "name": "create_producer_verifier" |
| 190 | + }, |
185 | 191 | "outputs": [], |
186 | 192 | "source": [ |
187 | 193 | "from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel\n", |
188 | 194 | "\n", |
189 | 195 | "producer = AzureAIChatCompletionsModel(\n", |
190 | 196 | " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", |
191 | 197 | " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", |
192 | | - " model=\"mistral-large-2407\",\n", |
| 198 | + " model=\"mistral-medium-2505\",\n", |
193 | 199 | ")\n", |
194 | 200 | "\n", |
195 | 201 | "verifier = AzureAIChatCompletionsModel(\n", |
|
209 | 215 | { |
210 | 216 | "cell_type": "code", |
211 | 217 | "execution_count": null, |
212 | | - "metadata": {}, |
| 218 | + "metadata": { |
| 219 | + "name": "generate_poem" |
| 220 | + }, |
213 | 221 | "outputs": [], |
214 | 222 | "source": [ |
215 | 223 | "from langchain_core.prompts import PromptTemplate\n", |
|
242 | 250 | { |
243 | 251 | "cell_type": "code", |
244 | 252 | "execution_count": null, |
245 | | - "metadata": {}, |
| 253 | + "metadata": { |
| 254 | + "name": "create_output_parser" |
| 255 | + }, |
246 | 256 | "outputs": [], |
247 | 257 | "source": [ |
248 | 258 | "from langchain_core.output_parsers import StrOutputParser\n", |
|
260 | 270 | { |
261 | 271 | "cell_type": "code", |
262 | 272 | "execution_count": null, |
263 | | - "metadata": {}, |
| 273 | + "metadata": { |
| 274 | + "name": "create_chain" |
| 275 | + }, |
264 | 276 | "outputs": [], |
265 | 277 | "source": [ |
266 | 278 | "chain = producer_template | producer | parser | verifier_template | verifier | parser" |
|
276 | 288 | { |
277 | 289 | "cell_type": "code", |
278 | 290 | "execution_count": null, |
279 | | - "metadata": {}, |
| 291 | + "metadata": { |
| 292 | + "name": "create_multiple_outputs_chain" |
| 293 | + }, |
280 | 294 | "outputs": [], |
281 | 295 | "source": [ |
282 | 296 | "generate_poem = producer_template | producer | parser\n", |
|
286 | 300 | { |
287 | 301 | "cell_type": "code", |
288 | 302 | "execution_count": null, |
289 | | - "metadata": {}, |
| 303 | + "metadata": { |
| 304 | + "name": "create_chain_with_passthrough" |
| 305 | + }, |
290 | 306 | "outputs": [], |
291 | 307 | "source": [ |
292 | 308 | "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", |
|
304 | 320 | { |
305 | 321 | "cell_type": "code", |
306 | 322 | "execution_count": null, |
307 | | - "metadata": {}, |
| 323 | + "metadata": { |
| 324 | + "name": "invoke_chain" |
| 325 | + }, |
308 | 326 | "outputs": [], |
309 | 327 | "source": [ |
310 | 328 | "chain.invoke({\"topic\": \"living in a foreign country\"})" |
|
329 | 347 | { |
330 | 348 | "cell_type": "code", |
331 | 349 | "execution_count": null, |
332 | | - "metadata": {}, |
| 350 | + "metadata": { |
| 351 | + "name": "configure_logging" |
| 352 | + }, |
333 | 353 | "outputs": [], |
334 | 354 | "source": [ |
335 | 355 | "import sys\n", |
|
363 | 383 | { |
364 | 384 | "cell_type": "code", |
365 | 385 | "execution_count": null, |
366 | | - "metadata": {}, |
| 386 | + "metadata": { |
| 387 | + "name": "create_client_with_logging" |
| 388 | + }, |
367 | 389 | "outputs": [], |
368 | 390 | "source": [ |
369 | 391 | "import os\n", |
|
372 | 394 | "model = AzureAIChatCompletionsModel(\n", |
373 | 395 | " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", |
374 | 396 | " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", |
375 | | - " model=\"mistral-large-2407\",\n", |
| 397 | + " model=\"mistral-medium-2505\",\n", |
376 | 398 | " client_kwargs={\"logging_enable\": True},\n", |
377 | 399 | ")" |
378 | 400 | ] |
|
0 commit comments