|
79 | 79 | "cell_type": "markdown", |
80 | 80 | "metadata": {}, |
81 | 81 | "source": [ |
82 | | - "## Run the code\n", |
| 82 | + "## Create a virtual environment\n", |
83 | 83 | "\n", |
84 | | - "1. Create a virtual environment. In Visual Studio Code, press Ctrl-shift-P to open the command palette, search for \"Python: Create Environment\", and then select `Venv` to create a virtual environment in the current workspace.\n", |
| 84 | + "Create a virtual environment so that you can install the dependencies in isolation.\n", |
| 85 | + "\n", |
| 86 | + "1. In Visual Studio Code, press Ctrl-shift-P to open the command palette, search for \"Python: Create Environment\", and then select `Venv` to create a virtual environment in the current workspace.\n", |
85 | 87 | "\n", |
86 | 88 | "1. Select Quickstart-RAG\\requirements.txt for the dependencies.\n", |
87 | 89 | "\n", |
88 | | - "It takes several minutes to create the environment. When the environment is ready, continue to the next step." |
| 90 | + "It takes several minutes to create the environment. When the environment is ready, continue to the next step.\n", |
| 91 | + "\n", |
| 92 | + "## Run the code" |
89 | 93 | ] |
90 | 94 | }, |
91 | 95 | { |
92 | 96 | "cell_type": "code", |
93 | | - "execution_count": 20, |
| 97 | + "execution_count": 1, |
94 | 98 | "metadata": {}, |
95 | 99 | "outputs": [], |
96 | 100 | "source": [ |
|
100 | 104 | }, |
101 | 105 | { |
102 | 106 | "cell_type": "code", |
103 | | - "execution_count": 22, |
| 107 | + "execution_count": 2, |
104 | 108 | "metadata": {}, |
105 | 109 | "outputs": [], |
106 | 110 | "source": [ |
|
112 | 116 | }, |
113 | 117 | { |
114 | 118 | "cell_type": "code", |
115 | | - "execution_count": 23, |
| 119 | + "execution_count": 3, |
116 | 120 | "metadata": {}, |
117 | 121 | "outputs": [], |
118 | 122 | "source": [ |
|
124 | 128 | }, |
125 | 129 | { |
126 | 130 | "cell_type": "code", |
127 | | - "execution_count": 24, |
128 | | - "metadata": {}, |
129 | | - "outputs": [], |
130 | | - "source": [ |
131 | | - "# Set up the clients, define a chat instance, create a search function\n", |
132 | | - "from azure.core.credentials_async import AsyncTokenCredential\n", |
133 | | - "from azure.identity.aio import get_bearer_token_provider\n", |
134 | | - "from azure.search.documents.aio import SearchClient\n", |
135 | | - "from openai import AsyncAzureOpenAI\n", |
136 | | - "from enum import Enum\n", |
137 | | - "from typing import List, Optional\n", |
138 | | - "\n", |
139 | | - "def create_openai_client(credential: AsyncTokenCredential) -> AsyncAzureOpenAI:\n", |
140 | | - " token_provider = get_bearer_token_provider(credential, \"https://cognitiveservices.azure.com/.default\")\n", |
141 | | - " return AsyncAzureOpenAI(\n", |
142 | | - " api_version=\"2024-04-01-preview\",\n", |
143 | | - " azure_endpoint=AZURE_OPENAI_ACCOUNT,\n", |
144 | | - " azure_ad_token_provider=token_provider\n", |
145 | | - " )\n", |
146 | | - "\n", |
147 | | - "def create_search_client(credential: AsyncTokenCredential) -> SearchClient:\n", |
148 | | - " return SearchClient(\n", |
149 | | - " endpoint=AZURE_SEARCH_SERVICE,\n", |
150 | | - " index_name=\"hotels-sample-index\",\n", |
151 | | - " credential=credential\n", |
152 | | - " )\n", |
153 | | - "\n", |
154 | | - "# This quickstart is only using text at this time\n", |
155 | | - "class SearchType(Enum):\n", |
156 | | - " TEXT = \"text\"\n", |
157 | | - " VECTOR = \"vector\"\n", |
158 | | - " HYBRID = \"hybrid\"\n", |
159 | | - "\n", |
160 | | - "# This function retrieves the sselected fields from the search index\n", |
161 | | - "async def get_sources(search_client: SearchClient, query: str, search_type: SearchType, use_semantic_reranker: bool = True, sources_to_include: int = 5) -> List[str]:\n", |
162 | | - " search_type == SearchType.TEXT,\n", |
163 | | - " response = await search_client.search(\n", |
164 | | - " search_text=query,\n", |
165 | | - " query_type=\"semantic\" if use_semantic_reranker else \"simple\",\n", |
166 | | - " top=sources_to_include,\n", |
167 | | - " select=\"Description,HotelName,Tags\"\n", |
168 | | - " )\n", |
169 | | - "\n", |
170 | | - " return [ document async for document in response ]\n", |
171 | | - "\n", |
172 | | - "GROUNDED_PROMPT=\"\"\"\n", |
173 | | - "You are a friendly assistant that recommends hotels based on activities and amenities.\n", |
174 | | - "Answer the query using only the sources provided below in a friendly and concise bulleted manner.\n", |
175 | | - "Answer ONLY with the facts listed in the list of sources below.\n", |
176 | | - "If there isn't enough information below, say you don't know.\n", |
177 | | - "Do not generate answers that don't use the sources below.\n", |
178 | | - "Query: {query}\n", |
179 | | - "Sources:\\n{sources}\n", |
180 | | - "\"\"\"\n", |
181 | | - "class ChatThread:\n", |
182 | | - " def __init__(self):\n", |
183 | | - " self.messages = []\n", |
184 | | - " self.search_results = []\n", |
185 | | - " \n", |
186 | | - " def append_message(self, role: str, message: str):\n", |
187 | | - " self.messages.append({\n", |
188 | | - " \"role\": role,\n", |
189 | | - " \"content\": message\n", |
190 | | - " })\n", |
191 | | - "\n", |
192 | | - " async def append_grounded_message(self, search_client: SearchClient, query: str, search_type: SearchType, use_semantic_reranker: bool = True, sources_to_include: int = 5):\n", |
193 | | - " sources = await get_sources(search_client, query, search_type, use_semantic_reranker, sources_to_include)\n", |
194 | | - " sources_formatted = \"\\n\".join([f'{document[\"HotelName\"]}:{document[\"Description\"]}:{document[\"Tags\"]}' for document in sources])\n", |
195 | | - " self.append_message(role=\"user\", message=GROUNDED_PROMPT.format(query=query, sources=sources_formatted))\n", |
196 | | - " self.search_results.append(\n", |
197 | | - " {\n", |
198 | | - " \"message_index\": len(self.messages) - 1,\n", |
199 | | - " \"query\": query,\n", |
200 | | - " \"sources\": sources\n", |
201 | | - " }\n", |
202 | | - " )\n", |
203 | | - "\n", |
204 | | - " async def get_openai_response(self, openai_client: AsyncAzureOpenAI, model: str):\n", |
205 | | - " response = await openai_client.chat.completions.create(\n", |
206 | | - " messages=self.messages,\n", |
207 | | - " model=model\n", |
208 | | - " )\n", |
209 | | - " self.append_message(role=\"assistant\", message=response.choices[0].message.content)\n", |
210 | | - "\n", |
211 | | - " def get_last_message(self) -> Optional[object]:\n", |
212 | | - " return self.messages[-1] if len(self.messages) > 0 else None\n", |
213 | | - "\n", |
214 | | - " def get_last_message_sources(self) -> Optional[List[object]]:\n", |
215 | | - " return self.search_results[-1][\"sources\"] if len(self.search_results) > 0 else None" |
216 | | - ] |
217 | | - }, |
218 | | - { |
219 | | - "cell_type": "code", |
220 | | - "execution_count": 28, |
| 131 | + "execution_count": 5, |
221 | 132 | "metadata": {}, |
222 | 133 | "outputs": [ |
223 | 134 | { |
224 | 135 | "name": "stdout", |
225 | 136 | "output_type": "stream", |
226 | 137 | "text": [ |
227 | | - "Based on your preferences, I recommend the following hotels:\n", |
| 138 | + "Sure, here are a few hotels that match your criteria: \n", |
228 | 139 | "\n", |
229 | | - "- Ocean Air Motel: This oceanfront hotel overlooks the beach and features rooms with private balconies, 2 indoor and outdoor pools, and it's located steps away from various shops and entertainment on the boardwalk.\n", |
230 | | - "- Marquis Plaza & Suites: Offering views of the ocean and amenities like free Wi-Fi, a full kitchen, and a free breakfast buffet, this hotel is a great option for those who want to spend a day at the aquarium and then come home to relax.\n", |
231 | | - "- Trails End Motel: Although not directly on the ocean, this non-smoking hotel offers great views, an on-site bar/restaurant, a hot breakfast buffet, and free wireless internet. Only 8 miles from downtown and 15 miles from the airport, it's a convenient choice.\n" |
| 140 | + "- Ocean Air Motel, which is an oceanfront hotel with beach access, private balconies, indoor and outdoor pools, shops, and art entertainment nearby.\n", |
| 141 | + "- Marquis Plaza & Suites, which offers ocean views, free Wi-Fi, a full kitchen, and free breakfast buffet, along with beach access and a pool.\n", |
| 142 | + "- Oceanside Resort is a new luxury hotel with bay views from every room, a rooftop pool, waterfront dining, and laundry service. \n", |
| 143 | + "\n", |
| 144 | + "Unfortunately, the Double Sanctuary Resort and Triple Landscape Hotel don't have beach access or mention views.\n" |
232 | 145 | ] |
233 | 146 | } |
234 | 147 | ], |
235 | 148 | "source": [ |
236 | | - "# Instantiate the chat thread and run the conversation\n", |
237 | | - "import azure.identity.aio\n", |
238 | | - "\n", |
239 | | - "chat_thread = ChatThread()\n", |
240 | | - "chat_deployment = AZURE_DEPLOYMENT_MODEL\n", |
241 | | - "\n", |
242 | | - "async with azure.identity.aio.DefaultAzureCredential() as credential, create_search_client(credential) as search_client, create_openai_client(credential) as openai_client:\n", |
243 | | - " await chat_thread.append_grounded_message(\n", |
244 | | - " search_client=search_client,\n", |
245 | | - " query=\"Can you recommend a few hotels near the ocean with beach access and good views\",\n", |
246 | | - " search_type=SearchType(search_type),\n", |
247 | | - " use_semantic_reranker=use_semantic_reranker,\n", |
248 | | - " sources_to_include=sources_to_include\n", |
249 | | - " )\n", |
250 | | - " await chat_thread.get_openai_response(openai_client=openai_client, model=chat_deployment)\n", |
| 149 | + "# Set up the query for generating responses\n", |
| 150 | + "from azure.identity import DefaultAzureCredential\n", |
| 151 | + "from azure.identity import get_bearer_token_provider\n", |
| 152 | + "from azure.search.documents import SearchClient\n", |
| 153 | + "from openai import AzureOpenAI\n", |
| 154 | + "\n", |
| 155 | + "credential = DefaultAzureCredential()\n", |
| 156 | + "token_provider = get_bearer_token_provider(credential, \"https://cognitiveservices.azure.com/.default\")\n", |
| 157 | + "openai_client = AzureOpenAI(\n", |
| 158 | + " api_version=\"2024-06-01\",\n", |
| 159 | + " azure_endpoint=AZURE_OPENAI_ACCOUNT,\n", |
| 160 | + " azure_ad_token_provider=token_provider\n", |
| 161 | + ")\n", |
| 162 | + "\n", |
| 163 | + "search_client = SearchClient(\n", |
| 164 | + " endpoint=AZURE_SEARCH_SERVICE,\n", |
| 165 | + " index_name=\"hotels-sample-index\",\n", |
| 166 | + " credential=credential\n", |
| 167 | + ")\n", |
| 168 | + "\n", |
| 169 | + "# This prompt provides instructions to the model\n", |
| 170 | + "GROUNDED_PROMPT=\"\"\"\n", |
| 171 | + "You are a friendly assistant that recommends hotels based on activities and amenities.\n", |
| 172 | + "Answer the query using only the sources provided below in a friendly and concise bulleted manner.\n", |
| 173 | + "Answer ONLY with the facts listed in the list of sources below.\n", |
| 174 | + "If there isn't enough information below, say you don't know.\n", |
| 175 | + "Do not generate answers that don't use the sources below.\n", |
| 176 | + "Query: {query}\n", |
| 177 | + "Sources:\\n{sources}\n", |
| 178 | + "\"\"\"\n", |
251 | 179 | "\n", |
252 | | - "print(chat_thread.get_last_message()[\"content\"])" |
| 180 | + "# Query is the question being asked. It's sent to the search engine and the LLM.\n", |
| 181 | + "query=\"Can you recommend a few hotels near the ocean with beach access and good views\"\n", |
| 182 | + "\n", |
| 183 | + "# Set up the search results and the chat thread.\n", |
| 184 | + "# Retrieve the selected fields from the search index related to the question.\n", |
| 185 | + "search_results = search_client.search(\n", |
| 186 | + " search_text=query,\n", |
| 187 | + " top=5,\n", |
| 188 | + " select=\"Description,HotelName,Tags\"\n", |
| 189 | + ")\n", |
| 190 | + "sources_formatted = \"\\n\".join([f'{document[\"HotelName\"]}:{document[\"Description\"]}:{document[\"Tags\"]}' for document in search_results])\n", |
| 191 | + "\n", |
| 192 | + "response = openai_client.chat.completions.create(\n", |
| 193 | + " messages=[\n", |
| 194 | + " {\n", |
| 195 | + " \"role\": \"user\",\n", |
| 196 | + " \"content\": GROUNDED_PROMPT.format(query=query, sources=sources_formatted)\n", |
| 197 | + " }\n", |
| 198 | + " ],\n", |
| 199 | + " model=AZURE_DEPLOYMENT_MODEL\n", |
| 200 | + ")\n", |
| 201 | + "\n", |
| 202 | + "print(response.choices[0].message.content)" |
253 | 203 | ] |
254 | 204 | }, |
255 | 205 | { |
256 | 206 | "cell_type": "markdown", |
257 | 207 | "metadata": {}, |
258 | 208 | "source": [ |
259 | | - "If you get an authorization error message, wait a few minutes and try again. It can take several minutes for role assignments to become operational." |
| 209 | + "If you get an authorization error instead of results, wait a few minutes and try again. It can take several minutes for role assignments to become operational." |
260 | 210 | ] |
261 | 211 | } |
262 | 212 | ], |
|
0 commit comments