|
77 | 77 | "source": [ |
78 | 78 | "## Set up connections\n", |
79 | 79 | "\n", |
80 | | - "The `sample.env` file contains environment variables for connections to Azure AI Search and Azure OpenAI in Azure AI Foundry. Agentic retrieval requires these connections for document retrieval, query planning, query execution, and answer generation.\n", |
| 80 | + "The `sample.env` file contains environment variables for connections to Azure AI Search and Azure OpenAI in Azure AI Foundry. Agentic retrieval requires these connections for document retrieval, query planning, and query execution.\n", |
81 | 81 | "\n", |
82 | | - "To set up connections:\n", |
| 82 | + "To set up the connections:\n", |
83 | 83 | "\n", |
84 | 84 | "1. Sign in to the [Azure portal](https://portal.azure.com).\n", |
85 | 85 | "\n", |
86 | | - "1. Get the endpoints for Azure AI Search (`https://my-service.search.windows.net`) and Azure OpenAI in Azure AI Foundry (`https://my-resource.openai.azure.com`).\n", |
| 86 | + "1. Get the endpoints for Azure AI Search (`https://your-search-service.search.windows.net`) and Azure OpenAI in Azure AI Foundry (`https://your-foundry-resource.openai.azure.com`).\n", |
87 | 87 | "\n", |
88 | 88 | "1. Save the `sample.env` file as `.env` on your local system.\n", |
89 | 89 | "\n", |
|
126 | 126 | }, |
127 | 127 | { |
128 | 128 | "cell_type": "code", |
129 | | - "execution_count": 2, |
| 129 | + "execution_count": 23, |
130 | 130 | "id": "041e5d89", |
131 | 131 | "metadata": {}, |
132 | 132 | "outputs": [], |
|
136 | 136 | }, |
137 | 137 | { |
138 | 138 | "cell_type": "code", |
139 | | - "execution_count": 3, |
| 139 | + "execution_count": 24, |
140 | 140 | "id": "2df3a118", |
141 | 141 | "metadata": {}, |
142 | 142 | "outputs": [], |
|
175 | 175 | }, |
176 | 176 | { |
177 | 177 | "cell_type": "code", |
178 | | - "execution_count": 6, |
| 178 | + "execution_count": 27, |
179 | 179 | "id": "ee48bec5", |
180 | 180 | "metadata": {}, |
181 | 181 | "outputs": [ |
|
248 | 248 | }, |
249 | 249 | { |
250 | 250 | "cell_type": "code", |
251 | | - "execution_count": 7, |
| 251 | + "execution_count": 28, |
252 | 252 | "id": "ded5147b", |
253 | 253 | "metadata": {}, |
254 | 254 | "outputs": [ |
|
285 | 285 | }, |
286 | 286 | { |
287 | 287 | "cell_type": "code", |
288 | | - "execution_count": 8, |
| 288 | + "execution_count": 29, |
289 | 289 | "id": "e3415954", |
290 | 290 | "metadata": {}, |
291 | 291 | "outputs": [ |
|
329 | 329 | }, |
330 | 330 | { |
331 | 331 | "cell_type": "code", |
332 | | - "execution_count": 9, |
| 332 | + "execution_count": 30, |
333 | 333 | "id": "d3fe4183", |
334 | 334 | "metadata": {}, |
335 | 335 | "outputs": [ |
|
380 | 380 | "source": [ |
381 | 381 | "## Set up messages\n", |
382 | 382 | "\n", |
383 | | - "Messages are the input for the retrieval route and contain the conversation history. Each message includes a `role` that indicates its origin, such as `assistant` or `user`, and `content` in natural language. The LLM you use determines which roles are valid." |
| 383 | + "Messages are the input for the retrieval route and contain the conversation history. Each message includes a `role` that indicates its origin, such as `system` or `user`, and `content` in natural language. The LLM you use determines which roles are valid." |
384 | 384 | ] |
385 | 385 | }, |
386 | 386 | { |
387 | 387 | "cell_type": "code", |
388 | | - "execution_count": 5, |
| 388 | + "execution_count": 31, |
389 | 389 | "id": "2ab7b970", |
390 | 390 | "metadata": {}, |
391 | 391 | "outputs": [], |
|
425 | 425 | }, |
426 | 426 | { |
427 | 427 | "cell_type": "code", |
428 | | - "execution_count": 6, |
| 428 | + "execution_count": 33, |
429 | 429 | "id": "918ded26", |
430 | 430 | "metadata": {}, |
431 | 431 | "outputs": [ |
432 | 432 | { |
433 | 433 | "name": "stdout", |
434 | 434 | "output_type": "stream", |
435 | 435 | "text": [ |
436 | | - "Retrieved content from 'knowledge_source_name' successfully.\n" |
| 436 | + "Retrieved content from 'earth-knowledge-source' successfully.\n" |
437 | 437 | ] |
438 | 438 | } |
439 | 439 | ], |
|
468 | 468 | ")\n", |
469 | 469 | "\n", |
470 | 470 | "result = agent_client.retrieve(retrieval_request=req, api_version=search_api_version)\n", |
471 | | - "print(\"Retrieved content from 'knowledge_source_name' successfully.\")" |
| 471 | + "print(f\"Retrieved content from '{knowledge_source_name}' successfully.\")" |
472 | 472 | ] |
473 | 473 | }, |
474 | 474 | { |
|
480 | 480 | "\n", |
481 | 481 | "Because your knowledge agent is configured for answer synthesis, the retrieval response contains the following values:\n", |
482 | 482 | "\n", |
483 | | - "+ `response_content`: An LLM-generated answer to the query based on the retrieved documents.\n", |
| 483 | + "+ `response_content`: An LLM-generated answer to the query that cites the retrieved documents.\n", |
484 | 484 | "\n", |
485 | 485 | "+ `activity_content`: Detailed planning and execution information, including subqueries, reranking decisions, and intermediate steps.\n", |
486 | 486 | "\n", |
|
685 | 685 | }, |
686 | 686 | { |
687 | 687 | "cell_type": "code", |
688 | | - "execution_count": 12, |
| 688 | + "execution_count": null, |
689 | 689 | "id": "da260539", |
690 | 690 | "metadata": {}, |
691 | 691 | "outputs": [ |
|
720 | 720 | ")\n", |
721 | 721 | "\n", |
722 | 722 | "result = agent_client.retrieve(retrieval_request=req, api_version=search_api_version)\n", |
723 | | - "print(\"Retrieved content from 'knowledge_source_name' successfully.\")" |
| 723 | + "print(f\"Retrieved content from '{knowledge_source_name}' successfully.\")" |
724 | 724 | ] |
725 | 725 | }, |
726 | 726 | { |
|
0 commit comments