|
11 | 11 | }, |
12 | 12 | { |
13 | 13 | "cell_type": "code", |
14 | | - "execution_count": 1, |
| 14 | + "execution_count": null, |
15 | 15 | "metadata": { |
16 | 16 | "cellView": "form", |
17 | 17 | "id": "tuOe1ymfHZPu" |
|
65 | 65 | "id": "D64fjiFDke_P" |
66 | 66 | }, |
67 | 67 | "source": [ |
68 | | - "In some cases you might want to stop the generation.\n", |
| 68 | + "For some use cases, you may want to stop the generation from a model to insert specific results. For example, language models may have trouble with complicated arithmetic problems like word problems.\n", |
| 69 | + "This tutorial shows an example of using an external tool with the `palm.chat` method to output the correct answer to a word problem.\n", |
69 | 70 | "\n", |
70 | | - "For example, models like this may have trouble with harder arithmetic, like this version of the problem:" |
| 71 | + "This particular example uses the [`numexpr`](https://github.com/pydata/numexpr) tool to perform the arithmetic but you can use this same procedure to integrate other tools specific to your use case. The following is an outline of the steps:\n", |
| 72 | + "\n", |
| 73 | + "1. Determine a `start` and `end` tag to demarcate the text to send the tool.\n", |
| 74 | + "1. Create a prompt instructing the model how to use the tags in its response.\n", |
| 75 | + "1. From the model response, take the text between the `start` and `end` tags as input to the tool.\n", |
| 76 | + "1. Drop everything after the `end` tag.\n", |
| 77 | + "1. Run the tool and add it's output as your reply.\n", |
| 78 | + "1. The model will take into account the tools's output in its reply." |
71 | 79 | ] |
72 | 80 | }, |
73 | 81 | { |
74 | 82 | "cell_type": "code", |
75 | | - "execution_count": 24, |
| 83 | + "execution_count": null, |
76 | 84 | "metadata": { |
77 | 85 | "id": "v8d0FtO2KJ3O" |
78 | 86 | }, |
79 | | - "outputs": [], |
| 87 | + "outputs": [ |
| 88 | + { |
| 89 | + "name": "stdout", |
| 90 | + "output_type": "stream", |
| 91 | + "text": [ |
| 92 | + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m122.2/122.2 kB\u001b[0m \u001b[31m2.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", |
| 93 | + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m113.3/113.3 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", |
| 94 | + "\u001b[?25h" |
| 95 | + ] |
| 96 | + } |
| 97 | + ], |
80 | 98 | "source": [ |
81 | 99 | "pip install -q google.generativeai" |
82 | 100 | ] |
83 | 101 | }, |
84 | 102 | { |
85 | 103 | "cell_type": "code", |
86 | | - "execution_count": 26, |
| 104 | + "execution_count": null, |
87 | 105 | "metadata": { |
88 | 106 | "id": "TxbqsnIWRheU" |
89 | 107 | }, |
|
94 | 112 | }, |
95 | 113 | { |
96 | 114 | "cell_type": "code", |
97 | | - "execution_count": 27, |
| 115 | + "execution_count": null, |
98 | 116 | "metadata": { |
99 | 117 | "id": "BH9oQzP2SD4L" |
100 | 118 | }, |
|
113 | 131 | }, |
114 | 132 | { |
115 | 133 | "cell_type": "code", |
116 | | - "execution_count": 28, |
| 134 | + "execution_count": null, |
117 | 135 | "metadata": { |
118 | 136 | "id": "VQK6KRDpKD8g" |
119 | 137 | }, |
|
125 | 143 | }, |
126 | 144 | { |
127 | 145 | "cell_type": "code", |
128 | | - "execution_count": 101, |
| 146 | + "execution_count": null, |
| 147 | + "metadata": { |
| 148 | + "id": "UGy1ghOp0ibu" |
| 149 | + }, |
| 150 | + "outputs": [], |
| 151 | + "source": [ |
| 152 | + "models = [m for m in palm.list_models() if 'generateMessage' in m.supported_generation_methods]\n", |
| 153 | + "model = models[0].name\n", |
| 154 | + "print(model)" |
| 155 | + ] |
| 156 | + }, |
| 157 | + { |
| 158 | + "cell_type": "code", |
| 159 | + "execution_count": null, |
129 | 160 | "metadata": { |
130 | 161 | "id": "1TIekNZ1rrOd" |
131 | 162 | }, |
|
144 | 175 | }, |
145 | 176 | { |
146 | 177 | "cell_type": "code", |
147 | | - "execution_count": 81, |
| 178 | + "execution_count": null, |
148 | 179 | "metadata": { |
149 | 180 | "id": "IRoDEoU1Bq_6" |
150 | 181 | }, |
|
177 | 208 | ], |
178 | 209 | "source": [ |
179 | 210 | "response = retry_chat(\n", |
180 | | - " model=\"models/chat-bison-001\",\n", |
181 | | - " context=\"You are an expert at solving word problems.\"\n", |
| 211 | + " model=model,\n", |
| 212 | + " context=\"You are an expert at solving word problems.\",\n", |
182 | 213 | " messages=question,\n", |
183 | 214 | ")\n", |
184 | 215 | "\n", |
|
191 | 222 | "id": "zG4bukqCIQ4f" |
192 | 223 | }, |
193 | 224 | "source": [ |
194 | | - "Just like that, it's usually incorrect.\n", |
| 225 | + "The prompt as is usually generates incorrect results.\n", |
195 | 226 | "It generally gets the steps right but the arithmetic wrong.\n", |
196 | 227 | "\n", |
197 | 228 | "The answer should be:" |
198 | 229 | ] |
199 | 230 | }, |
200 | 231 | { |
201 | 232 | "cell_type": "code", |
202 | | - "execution_count": 82, |
| 233 | + "execution_count": null, |
203 | 234 | "metadata": { |
204 | 235 | "id": "9Jw0L9OoCAHZ" |
205 | 236 | }, |
|
226 | 257 | "id": "91LETezvs6LU" |
227 | 258 | }, |
228 | 259 | "source": [ |
229 | | - "So give the model access to a calculator. You can do that by adding something like this to the prompt:" |
| 260 | + "In this next attempt, give the model instructions on how to access the calculator. You can do that by specifying a `start` and `end` tag the model can use to indicate where a calculation is needed. Add something like the following to the prompt:" |
230 | 261 | ] |
231 | 262 | }, |
232 | 263 | { |
233 | 264 | "cell_type": "code", |
234 | | - "execution_count": 102, |
| 265 | + "execution_count": null, |
235 | 266 | "metadata": { |
236 | 267 | "id": "kjrZYUE7N5N6" |
237 | 268 | }, |
|
271 | 302 | }, |
272 | 303 | { |
273 | 304 | "cell_type": "code", |
274 | | - "execution_count": 84, |
| 305 | + "execution_count": null, |
275 | 306 | "metadata": { |
276 | 307 | "id": "lbLNQfHVK0bO" |
277 | 308 | }, |
|
304 | 335 | ], |
305 | 336 | "source": [ |
306 | 337 | "chat = retry_chat(\n", |
307 | | - " model=\"models/chat-bison-001\",\n", |
| 338 | + " model=model,\n", |
308 | 339 | " messages=calc_prompt,\n", |
309 | 340 | ")\n", |
310 | 341 | "\n", |
|
322 | 353 | }, |
323 | 354 | { |
324 | 355 | "cell_type": "code", |
325 | | - "execution_count": 85, |
| 356 | + "execution_count": null, |
326 | 357 | "metadata": { |
327 | 358 | "id": "K9Y0Z-Lome0P" |
328 | 359 | }, |
|
352 | 383 | }, |
353 | 384 | { |
354 | 385 | "cell_type": "code", |
355 | | - "execution_count": 93, |
| 386 | + "execution_count": null, |
356 | 387 | "metadata": { |
357 | 388 | "id": "sGDNIvkkQjw8" |
358 | 389 | }, |
|
384 | 415 | }, |
385 | 416 | { |
386 | 417 | "cell_type": "code", |
387 | | - "execution_count": 94, |
| 418 | + "execution_count": null, |
388 | 419 | "metadata": { |
389 | 420 | "id": "BC7TfMIu-9Ci" |
390 | 421 | }, |
|
419 | 450 | }, |
420 | 451 | { |
421 | 452 | "cell_type": "code", |
422 | | - "execution_count": 98, |
| 453 | + "execution_count": null, |
423 | 454 | "metadata": { |
424 | 455 | "id": "PFXRsjvNMUyv" |
425 | 456 | }, |
426 | 457 | "outputs": [], |
427 | 458 | "source": [ |
428 | 459 | "def solve():\n", |
429 | 460 | " chat = retry_chat(\n", |
430 | | - " model=\"models/chat-bison-001\",\n", |
| 461 | + " model=model,\n", |
431 | 462 | " context=\"You are an expert at solving word probles.\",\n", |
432 | 463 | " messages=calc_prompt,\n", |
433 | 464 | " )\n", |
|
456 | 487 | }, |
457 | 488 | { |
458 | 489 | "cell_type": "code", |
459 | | - "execution_count": 103, |
| 490 | + "execution_count": null, |
460 | 491 | "metadata": { |
461 | 492 | "id": "Syf10WrLmopr" |
462 | 493 | }, |
|
522 | 553 | }, |
523 | 554 | { |
524 | 555 | "cell_type": "code", |
525 | | - "execution_count": 104, |
| 556 | + "execution_count": null, |
526 | 557 | "metadata": { |
527 | 558 | "id": "jM-TRySbOz3k" |
528 | 559 | }, |
|
713 | 744 | }, |
714 | 745 | { |
715 | 746 | "cell_type": "code", |
716 | | - "execution_count": 105, |
| 747 | + "execution_count": null, |
717 | 748 | "metadata": { |
718 | 749 | "id": "5vaOADwUWgQd" |
719 | 750 | }, |
|
0 commit comments