|
6 | 6 | "source": [ |
7 | 7 | "\n", |
8 | 8 | "\n", |
9 | | - "# LLM Session Memory - Multiple Sessions\n", |
| 9 | + "# LLM Message History\n", |
10 | 10 | "\n", |
11 | | - "Large Language Models are inherently stateless and have no knowledge of previous interactions with a user, or even of previous parts of the current conversation. While this may not be noticeable when asking simple questions, it becomes a hinderance when engaging in long running conversations that rely on conversational context.\n", |
| 11 | + "Large Language Models are inherently stateless and have no knowledge of previous interactions with a user, or even of previous parts of the current conversation. While this may not be noticeable when asking simple questions, it becomes a hindrance when engaging in long running conversations that rely on conversational context.\n", |
12 | 12 | "\n", |
13 | 13 | "The solution to this problem is to append the previous conversation history to each subsequent call to the LLM.\n", |
14 | 14 | "\n", |
15 | | - "This notebook will show how to use Redis to structure and store and retrieve this conversational session memory.\n", |
| 15 | + "This notebook will show how to use Redis to structure and store and retrieve this conversational message history.\n", |
16 | 16 | "\n", |
17 | 17 | "## Let's Begin!\n", |
18 | | - "<a href=\"https://colab.research.google.com/github/redis-developer/redis-ai-resources/blob/main/python-recipes/session-manager/00_session_manager.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n" |
| 18 | + "<a href=\"https://colab.research.google.com/github/redis-developer/redis-ai-resources/blob/main/python-recipes/llm-message-history/00_message_history.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n" |
19 | 19 | ] |
20 | 20 | }, |
21 | 21 | { |
|
31 | 31 | "metadata": {}, |
32 | 32 | "outputs": [], |
33 | 33 | "source": [ |
34 | | - "%pip install cohere \"redisvl>=0.4.1\" sentence-transformers" |
| 34 | + "%pip install cohere \"redisvl>=0.6.0\" sentence-transformers" |
35 | 35 | ] |
36 | 36 | }, |
37 | 37 | { |
|
153 | 153 | " return response.text\n", |
154 | 154 | "\n", |
155 | 155 | " def remap(self, context) -> List[Dict]:\n", |
156 | | - " ''' re-index the chat history to match the Cohere API requirements '''\n", |
| 156 | + " ''' re-index the message history to match the Cohere API requirements '''\n", |
157 | 157 | " new_context = []\n", |
158 | 158 | " for statement in context:\n", |
159 | 159 | " if statement[\"role\"] == \"user\":\n", |
|
174 | 174 | "cell_type": "markdown", |
175 | 175 | "metadata": {}, |
176 | 176 | "source": [ |
177 | | - "### Import SemanticSessionManager\n", |
| 177 | + "### Import MessageHistory\n", |
178 | 178 | "\n", |
179 | | - "redisvl provides the SemanticSessionManager for easy management of session state." |
| 179 | + "redisvl provides the MessageHistory and SemanticMessageHistory classes for easy management of LLM conversations." |
180 | 180 | ] |
181 | 181 | }, |
182 | 182 | { |
|
185 | 185 | "metadata": {}, |
186 | 186 | "outputs": [], |
187 | 187 | "source": [ |
188 | | - "from redisvl.extensions.session_manager import SemanticSessionManager\n", |
| 188 | + "from redisvl.extensions.message_history import SemanticMessageHistory\n", |
189 | 189 | "\n", |
190 | | - "user_session = SemanticSessionManager(name=\"llm chef\")\n", |
191 | | - "user_session.add_message({\"role\":\"system\", \"content\":\"You are a helpful chef, assisting people in making delicious meals\"})" |
| 190 | + "user_history = SemanticMessageHistory(name=\"llm chef\")\n", |
| 191 | + "user_history.add_message({\"role\":\"system\", \"content\":\"You are a helpful chef, assisting people in making delicious meals\"})" |
192 | 192 | ] |
193 | 193 | }, |
194 | 194 | { |
|
224 | 224 | ], |
225 | 225 | "source": [ |
226 | 226 | "prompt = \"can you give me some ideas for breakfast?\"\n", |
227 | | - "context = user_session.get_recent()\n", |
| 227 | + "context = user_history.get_recent()\n", |
228 | 228 | "response = client.converse(prompt=prompt, context=context)\n", |
229 | | - "user_session.store(prompt, response)\n", |
| 229 | + "user_history.store(prompt, response)\n", |
230 | 230 | "print('USER: ', prompt)\n", |
231 | 231 | "print('\\nLLM: ', response)" |
232 | 232 | ] |
|
286 | 286 | ], |
287 | 287 | "source": [ |
288 | 288 | "prompt = \"can you give me the recipe for those pancakes?\"\n", |
289 | | - "context = user_session.get_recent()\n", |
| 289 | + "context = user_history.get_recent()\n", |
290 | 290 | "response = client.converse(prompt=prompt, context=context)\n", |
291 | | - "user_session.store(prompt, response)\n", |
| 291 | + "user_history.store(prompt, response)\n", |
292 | 292 | "print('USER: ', prompt)\n", |
293 | 293 | "print('\\nLLM: ', response)" |
294 | 294 | ] |
|
360 | 360 | ], |
361 | 361 | "source": [ |
362 | 362 | "prompt =\"I am vegetarian. Can you remove the eggs?\"\n", |
363 | | - "context = user_session.get_recent()\n", |
| 363 | + "context = user_history.get_recent()\n", |
364 | 364 | "response = client.converse(prompt=prompt, context=context)\n", |
365 | | - "user_session.store(prompt, response)\n", |
| 365 | + "user_history.store(prompt, response)\n", |
366 | 366 | "print('USER: ', prompt)\n", |
367 | 367 | "print('\\nLLM: ', response)" |
368 | 368 | ] |
|
436 | 436 | ], |
437 | 437 | "source": [ |
438 | 438 | "prompt = \"I am also vegan. Can you replace the butter too?\"\n", |
439 | | - "context = user_session.get_recent()\n", |
| 439 | + "context = user_history.get_recent()\n", |
440 | 440 | "response = client.converse(prompt=prompt, context=context)\n", |
441 | | - "user_session.store(prompt, response)\n", |
| 441 | + "user_history.store(prompt, response)\n", |
442 | 442 | "print('USER: ', prompt)\n", |
443 | 443 | "print('\\nLLM: ', response)" |
444 | 444 | ] |
|
521 | 521 | ], |
522 | 522 | "source": [ |
523 | 523 | "prompt = \"I changed my mind. Can you give me the first recipe from your list?\"\n", |
524 | | - "context = user_session.get_recent(top_k=5)\n", |
| 524 | + "context = user_history.get_recent(top_k=5)\n", |
525 | 525 | "response = client.converse(prompt=prompt, context=context)\n", |
526 | | - "user_session.store(prompt, response)\n", |
| 526 | + "user_history.store(prompt, response)\n", |
527 | 527 | "print('USER: ', prompt)\n", |
528 | 528 | "print('\\nLLM: ', response)" |
529 | 529 | ] |
|
561 | 561 | "cell_type": "markdown", |
562 | 562 | "metadata": {}, |
563 | 563 | "source": [ |
564 | | - "## Semantic session memory" |
| 564 | + "## Semantic message history" |
565 | 565 | ] |
566 | 566 | }, |
567 | 567 | { |
|
608 | 608 | ], |
609 | 609 | "source": [ |
610 | 610 | "prompt = \"Can you give me the avocado one?\"\n", |
611 | | - "user_session.set_distance_threshold(0.75)\n", |
612 | | - "context = user_session.get_relevant(prompt=prompt)\n", |
| 611 | + "user_history.set_distance_threshold(0.75)\n", |
| 612 | + "context = user_history.get_relevant(prompt=prompt)\n", |
613 | 613 | "response = client.converse(prompt=prompt, context=context)\n", |
614 | | - "user_session.store(prompt, response)\n", |
| 614 | + "user_history.store(prompt, response)\n", |
615 | 615 | "print('USER: ', prompt)\n", |
616 | 616 | "print('\\nLLM: ', response)" |
617 | 617 | ] |
|
648 | 648 | "metadata": {}, |
649 | 649 | "outputs": [], |
650 | 650 | "source": [ |
651 | | - "user_session.clear()" |
| 651 | + "user_history.clear()" |
652 | 652 | ] |
653 | 653 | } |
654 | 654 | ], |
|
0 commit comments