|
131 | 131 | "import time\n", |
132 | 132 | "\n", |
133 | 133 | "import google.generativeai as genai\n", |
134 | | - "import google.ai.generativelanguage as glm\n", |
135 | 134 | "\n", |
136 | 135 | "from IPython import display\n", |
137 | 136 | "from IPython.display import Markdown\n", |
|
206 | 205 | "\n", |
207 | 206 | "To use function calling, pass a list of functions to the `tools` parameter when creating a [`GenerativeModel`](https://ai.google.dev/api/python/google/generativeai/GenerativeModel). The model uses the function name, docstring, parameters, and parameter type annotations to decide if it needs the function to best answer a prompt.\n", |
208 | 207 | "\n", |
209 | | - "> Important: The SDK converts function parameter type annotations to a format the API understands (`glm.FunctionDeclaration`). The API only supports a limited selection of parameter types, and the Python SDK's automatic conversion only supports a subset of that: `AllowedTypes = int | float | bool | str | list['AllowedTypes'] | dict`" |
| 208 | + "> Important: The SDK converts function parameter type annotations to a format the API understands (`genai.protos.FunctionDeclaration`). The API only supports a limited selection of parameter types, and the Python SDK's automatic conversion only supports a subset of that: `AllowedTypes = int | float | bool | str | list['AllowedTypes'] | dict`" |
210 | 209 | ] |
211 | 210 | }, |
212 | 211 | { |
|
327 | 326 | "source": [ |
328 | 327 | "Examine the chat history to see the flow of the conversation and how function calls are integrated within it.\n", |
329 | 328 | "\n", |
330 | | - "The `ChatSession.history` property stores a chronological record of the conversation between the user and the Gemini model. Each turn in the conversation is represented by a [`glm.Content`](https://ai.google.dev/api/python/google/ai/generativelanguage/Content) object, which contains the following information:\n", |
| 329 | + "The `ChatSession.history` property stores a chronological record of the conversation between the user and the Gemini model. Each turn in the conversation is represented by a [`genai.protos.Content`](https://ai.google.dev/api/python/google/generativeai/protos/Content) object, which contains the following information:\n", |
331 | 330 | "\n", |
332 | 331 | "* **Role**: Identifies whether the content originated from the \"user\" or the \"model\".\n", |
333 | | - "* **Parts**: A list of [`glm.Part`](https://ai.google.dev/api/python/google/ai/generativelanguage/Part) objects that represent individual components of the message. With a text-only model, these parts can be:\n", |
| 332 | + "* **Parts**: A list of [`genai.protos.Part`](https://ai.google.dev/api/python/google/generativeai/protos/Part) objects that represent individual components of the message. With a text-only model, these parts can be:\n", |
334 | 333 | " * **Text**: Plain text messages.\n", |
335 | | - " * **Function Call** ([`glm.FunctionCall`](https://ai.google.dev/api/python/google/ai/generativelanguage/FunctionCall)): A request from the model to execute a specific function with provided arguments.\n", |
336 | | - " * **Function Response** ([`glm.FunctionResponse`](https://ai.google.dev/api/python/google/ai/generativelanguage/FunctionResponse)): The result returned by the user after executing the requested function.\n", |
| 334 | + " * **Function Call** ([`genai.protos.FunctionCall`](https://ai.google.dev/api/python/google/generativeai/protos/FunctionCall)): A request from the model to execute a specific function with provided arguments.\n", |
| 335 | + " * **Function Response** ([`genai.protos.FunctionResponse`](https://ai.google.dev/api/python/google/generativeai/protos/FunctionResponse)): The result returned by the user after executing the requested function.\n", |
337 | 336 | "\n", |
338 | 337 | " In the previous example with the mittens calculation, the history shows the following sequence:\n", |
339 | 338 | "\n", |
|
400 | 399 | "source": [ |
401 | 400 | "While this was all handled automatically, if you need more control, you can:\n", |
402 | 401 | "\n", |
403 | | - "- Leave the default `enable_automatic_function_calling=False` and process the `glm.FunctionCall` responses yourself.\n", |
| 402 | + "- Leave the default `enable_automatic_function_calling=False` and process the `genai.protos.FunctionCall` responses yourself.\n", |
404 | 403 | "- Or use `GenerativeModel.generate_content`, where you also need to manage the chat history." |
405 | 404 | ] |
406 | 405 | }, |
|
541 | 540 | "\n", |
542 | 541 | "# Build the response parts.\n", |
543 | 542 | "response_parts = [\n", |
544 | | - " glm.Part(function_response=glm.FunctionResponse(name=fn, response={\"result\": val}))\n", |
| 543 | + " genai.protos.Part(function_response=genai.protos.FunctionResponse(name=fn, response={\"result\": val}))\n", |
545 | 544 | " for fn, val in responses.items()\n", |
546 | 545 | "]\n", |
547 | 546 | "\n", |
|
570 | 569 | "AllowedType = (int | float | bool | str | list['AllowedType'] | dict[str, AllowedType]\n", |
571 | 570 | "```\n", |
572 | 571 | "\n", |
573 | | - "The `google.ai.generativelanguage` client library provides access to the low level types giving you full control." |
574 | | - ] |
575 | | - }, |
576 | | - { |
577 | | - "cell_type": "code", |
578 | | - "execution_count": null, |
579 | | - "metadata": { |
580 | | - "id": "S53E0EE8TBUF" |
581 | | - }, |
582 | | - "outputs": [], |
583 | | - "source": [ |
584 | | - "import google.ai.generativelanguage as glm" |
| 572 | + "The `google.generativeai.protos` submodule provides access to the low level types giving you full control." |
585 | 573 | ] |
586 | 574 | }, |
587 | 575 | { |
|
648 | 636 | "id": "qFD4U7ym04F5" |
649 | 637 | }, |
650 | 638 | "source": [ |
651 | | - "This returns the list of `glm.Tool` objects that would be sent to the API. If the printed format is not familiar, it's because these are Google protobuf classes. Each `glm.Tool` (1 in this case) contains a list of `glm.FunctionDeclarations`, which describe a function and its arguments." |
| 639 | + "This returns the list of `genai.protos.Tool` objects that would be sent to the API. If the printed format is not familiar, it's because these are Google protobuf classes. Each `genai.protos.Tool` (1 in this case) contains a list of `genai.protos.FunctionDeclarations`, which describe a function and its arguments." |
652 | 640 | ] |
653 | 641 | }, |
654 | 642 | { |
|
657 | 645 | "id": "eY6RmFQ76FVu" |
658 | 646 | }, |
659 | 647 | "source": [ |
660 | | - "Here is a declaration for the same multiply function written using the `glm` classes.\n", |
| 648 | + "Here is a declaration for the same multiply function written using the `genai.protos` classes.\n", |
661 | 649 | "\n", |
662 | 650 | "Note that these classes just describe the function for the API, they don't include an implementation of it. So using this doesn't work with automatic function calling, but functions don't always need an implementation." |
663 | 651 | ] |
|
670 | 658 | }, |
671 | 659 | "outputs": [], |
672 | 660 | "source": [ |
673 | | - "calculator = glm.Tool(\n", |
| 661 | + "calculator = genai.protos.Tool(\n", |
674 | 662 | " function_declarations=[\n", |
675 | | - " glm.FunctionDeclaration(\n", |
| 663 | + " genai.protos.FunctionDeclaration(\n", |
676 | 664 | " name='multiply',\n", |
677 | 665 | " description=\"Returns the product of two numbers.\",\n", |
678 | | - " parameters=glm.Schema(\n", |
679 | | - " type=glm.Type.OBJECT,\n", |
| 666 | + " parameters=genai.protos.Schema(\n", |
| 667 | + " type=genai.protos.Type.OBJECT,\n", |
680 | 668 | " properties={\n", |
681 | | - " 'a':glm.Schema(type=glm.Type.NUMBER),\n", |
682 | | - " 'b':glm.Schema(type=glm.Type.NUMBER)\n", |
| 669 | + " 'a':genai.protos.Schema(type=genai.protos.Type.NUMBER),\n", |
| 670 | + " 'b':genai.protos.Schema(type=genai.protos.Type.NUMBER)\n", |
683 | 671 | " },\n", |
684 | 672 | " required=['a','b']\n", |
685 | 673 | " )\n", |
|
753 | 741 | } |
754 | 742 | ], |
755 | 743 | "source": [ |
756 | | - "glm.Tool(calculator)" |
| 744 | + "genai.protos.Tool(calculator)" |
757 | 745 | ] |
758 | 746 | }, |
759 | 747 | { |
|
762 | 750 | "id": "jS6ruiTp6VBf" |
763 | 751 | }, |
764 | 752 | "source": [ |
765 | | - "Either way, you pass a representation of a `glm.Tool` or list of tools to" |
| 753 | + "Either way, you pass a representation of a `genai.protos.Tool` or list of tools to" |
766 | 754 | ] |
767 | 755 | }, |
768 | 756 | { |
|
787 | 775 | "id": "517ca06297bb" |
788 | 776 | }, |
789 | 777 | "source": [ |
790 | | - "Like before the model returns a `glm.FunctionCall` invoking the calculator's `multiply` function:" |
| 778 | + "Like before the model returns a `genai.protos.FunctionCall` invoking the calculator's `multiply` function:" |
791 | 779 | ] |
792 | 780 | }, |
793 | 781 | { |
|
889 | 877 | "outputs": [], |
890 | 878 | "source": [ |
891 | 879 | "response = chat.send_message(\n", |
892 | | - " glm.Content(\n", |
893 | | - " parts=[glm.Part(\n", |
894 | | - " function_response = glm.FunctionResponse(\n", |
| 880 | + " genai.protos.Content(\n", |
| 881 | + " parts=[genai.protos.Part(\n", |
| 882 | + " function_response = genai.protos.FunctionResponse(\n", |
895 | 883 | " name='multiply',\n", |
896 | 884 | " response={'result': result}))]))" |
897 | 885 | ] |
|
0 commit comments