diff --git a/cachy.jsonl b/cachy.jsonl index 305aeee..41255a2 100644 --- a/cachy.jsonl +++ b/cachy.jsonl @@ -94,3 +94,68 @@ {"key": "6ca5db66", "response": "{\n \"id\": \"chatcmpl-CkKgq3Ov2DdanOxEYNwJrCzej7dQ8\",\n \"object\": \"chat.completion\",\n \"created\": 1765158656,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The result of 5 + 3 is 8.\\n\\nExplanation: The simple_add tool takes two numbers (in this case, 5 and 3) and adds them together. The sum of 5 and 3 is 8.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 156,\n \"completion_tokens\": 49,\n \"total_tokens\": 205,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} {"key": "ce36a9f7", "response": "{\n \"id\": \"chatcmpl-CkKgxlKn9WDEtVAzIQCj5x5IXLRFb\",\n \"object\": \"chat.completion\",\n \"created\": 1765158663,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_osF7ih5fUnuMrWC0PAae8ik4\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"arguments\": \"{\\\"a\\\":8,\\\"b\\\":9}\"\n }\n }\n ],\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\": 17,\n \"total_tokens\": 195,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} {"key": "e0458695", "response": "{\n \"id\": \"chatcmpl-CkKgyUKZG6IOx5T0ds9S47BWLhMkZ\",\n \"object\": \"chat.completion\",\n \"created\": 1765158664,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"(5 + 3) = 8 and (7 + 2) = 9. Multiplying them gives 8 \u00d7 9 = 72. \\n\\nSo, (5 + 3) * (7 + 2) = 72.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 203,\n \"completion_tokens\": 54,\n \"total_tokens\": 257,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} +{"key": "2dc99256", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Hello! How can I help you today?\\n\\nWhether you need help with a creative project, have a burning question, or just want to chat, I'm all ears. What's on your mind?\",\n \"thoughtSignature\": \"EvoDCvcDAXLI2nwStssJVsA/41VhNGBtTYcO/AB6v51G/mWckRl8dTv7yc9sBdsfL/ZyZS46ncXHwgzkascEc6y3GrwXfb3U6h7+DjZQmYIx1ezMlYqP4rsYcpVU6laTtU8IpcGLj66SigR5a+JW9bAU3res5GL+3yXZcnCY7cSzGVG134A7FkFqaVsRjiU06e9UihOGiK6C+t54GrA4ITdLnMFo1RvReBArJFYjPhj5oXuvITyfhlkdKc4nH36eT55766GHpzjHn+l+T6/MwPiwr+kxqDI72O8IgxjBUyTCToQQm9Av5FE4syBSWcCGqJAGOjMe4aNIakMh5E/f5FTyJD7GJJANiKINDe9rjDFcgF7bwJQmeLCOY3DlseMXCfDegZtYupK1Jm948xza7GkUZzp5BxdjqsRdvbD8UeBcCIg5cxbjZAV2bRl97lrNhHGZw7wWxlOek3nhiqDdkyuz2UpsEKJ3wCaDXdH7EHoSirsLP5dyAezmVh+N8lcVpvqrNZmnVemF31X+R++BWmLyaS8vDyZWQ4SKKEiY171zvP8rZvO5eNNVwmhzLlyDiJJrB1TYdwt7L4utcJWFjTirdw96Ju+I7vjgmBetQiIWBqfvHRuy7o3z7jGHY1C6nW3/STJK3y0Q4WKbYi9ehoq0B1ii2NdMMyxrTgQ=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 4,\n \"candidatesTokenCount\": 43,\n \"totalTokenCount\": 167,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 4\n }\n ],\n \"thoughtsTokenCount\": 120\n },\n \"modelVersion\": \"gemini-3-pro-preview\",\n \"responseId\": \"hakxabHdFIm4nsEPwtuUuQM\"\n}\n"} +{"key": "f3ca1c5a", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Hey there! How can I help you today?\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 4,\n \"candidatesTokenCount\": 10,\n \"totalTokenCount\": 1055,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 4\n }\n ],\n \"thoughtsTokenCount\": 1041\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"kKkxadeHMd_xnsEP9fuRiAQ\"\n}\n"} +{"key": "a353b221", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Hey there! How can I help you today?\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 2,\n \"candidatesTokenCount\": 10,\n \"totalTokenCount\": 769,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 2\n }\n ],\n \"thoughtsTokenCount\": 757\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"mKkxaZ77MsazkdUPhqDpsAQ\"\n}\n"} +{"key": "678d537d", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"This is an absolutely adorable picture of a puppy!\\n\\nHere's a breakdown of what's in the image:\\n\\n* **The Puppy:** The main subject is a very young puppy, most likely a **Cavalier King Charles Spaniel**. It has the breed's characteristic features: large, dark, expressive eyes, long, floppy ears with silky, wavy fur, and a sweet expression. The coloring, white with chestnut or reddish-brown patches, is known as \\\"Blenheim\\\" in this breed.\\n* **The Pose:** The puppy is lying down in the green grass, peeking out from behind a bush of flowers. It's looking directly at the camera with a curious and gentle gaze.\\n* **The Flowers:** To the left of the puppy is a cluster of small, delicate purple or lavender-colored flowers, which look like asters or a similar daisy-like flower.\\n* **The Setting:** The scene is outdoors, likely in a garden or yard. The focus is sharp on the puppy, while the background is softly blurred, which makes the puppy stand out as the main subject.\\n\\nOverall, it's a very charming and heartwarming photograph capturing the innocence and cuteness of a young puppy.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 265,\n \"candidatesTokenCount\": 257,\n \"totalTokenCount\": 1597,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 7\n },\n {\n \"modality\": \"IMAGE\",\n \"tokenCount\": 258\n }\n ],\n \"thoughtsTokenCount\": 1075\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"pqkxafv3MrDikdUP6s_4-A8\"\n}\n"} +{"key": "5ef8238a", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"The audio says: \\\"The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years.\\\"\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 230,\n \"candidatesTokenCount\": 30,\n \"totalTokenCount\": 581,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 7\n },\n {\n \"modality\": \"AUDIO\",\n \"tokenCount\": 223\n }\n ],\n \"thoughtsTokenCount\": 321\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"rakxaf2zH7Xq7M8P2tbq6Q8\"\n}\n"} +{"key": "0d833369", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"This video is an advertisement for the Google Pixel 8 Pro smartphone, featuring a photographer named Saeka Shimada. She walks through Tokyo at night, demonstrating the phone's new \\\"Video Boost\\\" feature, which uses \\\"Night Sight\\\" to capture high-quality, vibrant video in low-light conditions. She is visibly impressed by the clarity and beauty of the footage she records in the city's atmospheric alleys.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 17402,\n \"candidatesTokenCount\": 84,\n \"totalTokenCount\": 17804,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 12\n },\n {\n \"modality\": \"VIDEO\",\n \"tokenCount\": 15517\n },\n {\n \"modality\": \"AUDIO\",\n \"tokenCount\": 1873\n }\n ],\n \"thoughtsTokenCount\": 318\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"uKkxaau4De2skdUPw-jMWQ\"\n}\n"} +{"key": "e4965f35", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Hey there! How can I help you today?\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 3,\"candidatesTokenCount\": 10,\"totalTokenCount\": 821,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 3}],\"thoughtsTokenCount\": 808},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"uakxadPXLtDSnsEPgoCIkAQ\"}\r\n\r\n"} +{"key": "5ec0bc53", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I will use the `simple_add` tool to perform the two requested additions. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.\",\n \"thoughtSignature\": \"CsYNAXLI2nxQDg2H7IQFKRfAB++XIjr6+QF5y7Bm79+vB52Ig/hIU2JzHavfsUJq/hLHh24tKaj4NtwCC5IEytQW4hWc53exlWACJevkSQGxCyUIefD4KF6hhSLXcltG7PzHdLUdTAfX6Vc2ocluBKEAGIpzhKL13gwHACM7GPldMgA3YYuFJ88qOIEk9TVE4Pzt4DI6/PT3Ztm18dNA6lF8Px28dfB6N1EVmWtWfcKN69tiOK/aEbrg+1yvzqugWCOS8/X+/x/IToz5hHlNpG3t2MYec82v/5wmFFicYc6+AtzjTFIcXKgGXMBY9RtLpYdn0yRPseNbJoi9zbHAfYIB/uJKSfU3xs9jSLOTLwIcj4BPJxo59t6JKEnwqKA2f/ZCkqu5c9L09uRfJ6CV/5qRKhqDQ502zqkC0VlYvTH6dFWBaOz0xiUSd6G2BpBBaYaCQY4ieI4l7U7N6zv1K8RW248A4xyN4oZXOjtFplcvxLaqdXkc1qFYDVnM9dsk1QGOmXQg6uPJkZQplSIF5LaG8N5FdEf2qPyGrYgqreeyxlU0oJit43HP70FS44q9it92ugCtUtgOcAVK86KlAe8wRlvbcUq8Fq+W/2fMTF1btc1oXHYaoMS8uwZ8YBPKmkQ9v/bCm2CQNp9Hqmbcs889Q9CIoL4KpDf1O2ULHHUKjAayb89/xV6lYD0qZv8+5asM0pGrod/FUsZJI6UKdsV/zIqbbJ9t5TxHR8LubJqZuo2l5q4RrHWICRMXTcGPPftFw/GK6cyfD4j0bXmXMIqx17OxIrBBIYAfMatGNIN9Sk5fzjkg+ah9BfaSdXxft6qWE9t1ETips3N1eFiVb9W1c3N6CUZFzRGtWo/X2LWi4/ZEwnIUpPN5jPMwlLGvcrnlPJZXivYbnOfI76iE9A4kunfQV6CM+QPofhwO033js+jvpbab1hb3a9QeZRPm2h8PQAiGRfhHzTkDnpUUOYu7AUg/vYWY75dUbD4v2srWNYXdqJ1JChUYV5Wf40L05GPLsUqyIXYJsGfLcICbK3YEla/5laSmV3aO7b8KNMLgZ/oatCnFd9p3kWyC/dEAgjIe2WzWSAXt/BIRYF7z6hMHltp3q6KQH3ov3/tTmQ7xltlt4cJANTzu8puzkzRYZU6tLYe9bej/Y2R7Yd1jLscQ6bRhXSIYo4M4+q7XRPZGHPa2eSCgVC9+ZiWVGKEFcd0dAwtLNA99FFTvtqtgsJgcgMjZXvxh5ewbu9jl3JHGB1IAayj6D6E8MPd38+5LVsWCSF8R4+urLiK62yru2RPL/NKcpb4FCXErGoPx9ywJXR0x3SvW5HXg0sNPhuUCrFpxmcCq/KJzWydzXnhCR37W6HgPzGvD3mMlwDHk59LFPQZ4q253tu5nOu+afphP6FlPwL7d8lrNT67Q4i2FMTk9X13IHJED4t6qAcGH0pBhuuLK25EZVVnJowc3V3pebGtAg4tpIoUVJ100YJ2b0g9pZZ+V9fgWJsGoYCWrZw3ZmZv1CLAufnpJKnC6J+jxqCgRwjjeq8y3Rqi7iDKNOWqu0AmyT1gmHgvgxbrC/LSakUTQR8lwPQq8ab/LKPRM6EvyiuxrsssjONiXiglxbUUzODzZF5wHvqmZ12BbtrndAFmOsI+P8IooYzJXeOrpF54+A5+1blqOY8rcL3Ur9rJjwqNpieW7QVKYGmURCGMbYngJINfbXr9Qm1N5GLUmWNLSA5nPrFXnzHBxBF7hKGdRT993UQBaWtYf4letLeeTTd1bClcD//lIA9guzVx0hL5PAZZmgV51T6/iOVCSETD5WE7ljlw74haDch9l8ZtLFw1BS9xSqmG6pK5M62NHxcEQvGcXAZvEbXrfUTqj6QEpUJQnntoExDczr3pGdJqYpIC6wuvW2wHaFVIisnveA13u/WtZ1rFy8fDZjo2oNVtBvuCuRUblAo+BadCk8T0wh52ZykRs0EZhkvo82YQFjZmEvv8AYBSPmYz/69H1Cqbbtu3TSaCmIIeqxZF9abMATtc6MfNE1bTST0m4/C/x+IWJJSCRLCdcJSiz0++FvhWCLeNI5zTem85LNPx0cB+JlvdfsJBt1KAPBXDV6XJWZZkc7e0L+amCRFR9PBfkoMORDrfswlwOCWyHc7uKO3rU8E4hh1lgKl9rWJCMT7SY8QSlB88tt92sr/wKVaSkqvCqU0WtCqTLO33aQyWYyfKxdCp57NN87AVk2BGwapaFMd/gSirJAhNmZ/lcHG1RnN/c2/85+iu2shLmeeEFBOh4tDA6vjOBhH5npM8g6C3qz+r2AB+PVaku\"\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5478954793,\n \"b\": 547982745\n }\n }\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 9875438979,\n \"a\": 5479749754\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 149,\n \"candidatesTokenCount\": 151,\n \"totalTokenCount\": 823,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 149\n }\n ],\n \"thoughtsTokenCount\": 523\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"x6kxabqXIKi3nsEPy8yY6AM\"\n}\n"} +{"key": "aa8c6588", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"I will add the two pairs of numbers for you. First, I'll add 547895479\",\"thoughtSignature\": \"CiQBcsjafIANmQ22PNVCojdqdN9/r0WxHnGmPN6M4wy486AC2kEKZAFyyNp8V48otaeC6yfgobYXo6i277ZyP4/aHmS/VLIwo9wjQEGdaNgnfrM7HVQXEykCta4o+5jQ+JabSkSAHA4RqVkEl2Wrll74iLkha+dj3Bf7K23o66dyt7ddMRXacyQw2zYKgQEBcsjafN4p8oIHOl843asvtGtjUrMbiBiRWxsqIqp8tL75RV5a97L7wPz/mKmmQuDIzlMKlTP9fNiu+bEL2elT3y/wkyLkh9HXizFx2RjURpVlKVMrswiI19RppreNMpRxPLArWQd1cQ0O7/BtbXsPXnlMsZxtPWRik+5dKf08jU0KUAFyyNp8DGVVio1Cd0SMOpQt1Xz8Koly32cq6GJHDvNUbzwEJsLPkoOJyU+YjPuvIBk6MTqbT7S+zgrnacDeA6nKWz4C6DNSlSFY98fIwY/MCkgBcsjafEC2QMuaj83ci3giHmp/I6Svr3zAC4t/f+hoGTor00TE6uC75KN/4vsnRzn8QPZXuP0WSCGuOnRuahl949zXZjEFraEKRwFyyNp8qRfo2V++DCfwWtcxB2S8iXqDx3NmB0T70KDvxhkjyI/k/r+c51yCJmeEpmFigIzcItLbWPjOQWCKqmWUyi9FsQPHCngBcsjafGYok7PvOF4aY1eLGFIrU1z1kg1CM/YIOsQrP7PEBfGQ2h3HisoTJDogyXW6WP9vnQW89foFlgg5wauf+l0LaQl8CAJp/10eW5EA2AJSZl56T46YzpTimRR2yX3KikN6ooYYdk4cSFnRx8X+HbaLEOl4JW0KUwFyyNp8/EwByi4l+6jFRdEcXdBiCIjHmGcK0x2shZC4wsgYx7Fn1z1CVsZp7sPD/w1mqqBpCDcFZFc7MgwlLNfw+lNhsGkimyTJVVJWcYEZCJ3t\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 149,\"candidatesTokenCount\": 27,\"totalTokenCount\": 332,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 149}],\"thoughtsTokenCount\": 156},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"yakxad-9NKrqkdUPxOWzgQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"3 and 547982745. Then, I'll add 5479\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 149,\"candidatesTokenCount\": 51,\"totalTokenCount\": 356,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 149}],\"thoughtsTokenCount\": 156},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"yakxad-9NKrqkdUPxOWzgQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"749754 and 9875438979.\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 149,\"candidatesTokenCount\": 70,\"totalTokenCount\": 375,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 149}],\"thoughtsTokenCount\": 156},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"yakxad-9NKrqkdUPxOWzgQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"simple_add\",\"args\": {\"a\": 5478954793,\"b\": 547982745}}}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 149,\"candidatesTokenCount\": 107,\"totalTokenCount\": 412,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 149}],\"thoughtsTokenCount\": 156},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"yakxad-9NKrqkdUPxOWzgQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"call:simple_add{a:5479749754,b:9875\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 149,\"candidatesTokenCount\": 132,\"totalTokenCount\": 437,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 149}],\"thoughtsTokenCount\": 156},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"yakxad-9NKrqkdUPxOWzgQQ\"}\r\n\r\n"} +{"key": "b7327e3f", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Initiating Derivative Exploration**\\n\\nI'm now fully immersed in the process of formulating a clear explanation of the derivative for the given polynomial. I've broken down the user's request and am carefully considering the most accessible way to present this information, ensuring it's comprehensive and understandable. The focus is on clarity and step-by-step breakdown.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"totalTokenCount\": 98,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 69},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Decomposing the Polynomial**\\n\\nI've just finished the preliminary analysis of the problem. I'm focusing on dissecting the user's need for a comprehensive, step-by-step breakdown due to their perception of complexity. I've begun to list down the initial steps and have zeroed in on the polynomial expression. Now, I'm identifying the necessary calculus rules to accurately solve this derivative problem. I'm prioritizing the power and constant multiple rules as the key components for a concise and accessible derivation.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"totalTokenCount\": 424,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 395},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Constructing the Solution**\\n\\nI've structured my approach to address the user's perception of complexity. I'll provide both the immediate answer and a detailed, encouraging explanation. The core of my explanation revolves around applying the power, constant multiple, sum/difference, and constant rules step-by-step. I will ensure each step is clear, concise, and easy to follow. I plan to use the original expression in the response to enhance clarity.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"totalTokenCount\": 754,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 725},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Detailing the Power Rule**\\n\\nI've just focused on refining the explanation of the power rule, emphasizing how it simplifies finding derivatives of polynomial terms. The breakdown will directly address how to calculate the derivative for each term in the polynomial, ensuring an easily understandable and comprehensive tutorial. The structure is now designed to start with the fundamental rules and apply them step-by-step to the given problem.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"totalTokenCount\": 928,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Of course! While it might seem complex at first, this is a classic calculus problem that becomes quite simple once you know the\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 21,\"totalTokenCount\": 949,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" rules.\\n\\nThe derivative of **x\u00b3 + 2x\u00b2 - 5x + 1** is:\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 45,\"totalTokenCount\": 973,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\\n\\n**3x\u00b2 + 4x - 5**\\n\\n---\\n\\n### Step-by-Step Solution:\\n\\nTo\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 70,\"totalTokenCount\": 998,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" solve this, we use a few fundamental rules of differentiation. The main idea is that we can take the derivative of each part\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 95,\"totalTokenCount\": 1023,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" of the expression (each term) separately and then add them together.\\n\\nThe function is: `f(x)\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 119,\"totalTokenCount\": 1047,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" = x\u00b3 + 2x\u00b2 - 5x + 1`\\n\\nLet's break it down term\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 143,\"totalTokenCount\": 1071,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" by term.\\n\\n#### 1. The Power Rule\\nThe most important rule we'll use is the **Power Rule\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 168,\"totalTokenCount\": 1096,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**, which states:\\nThe derivative of `x\u207f` is `n * x\u207f\u207b\u00b9`\\n(In\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 192,\"totalTokenCount\": 1120,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" simple terms: bring the exponent down to the front as a multiplier, then subtract one from the original exponent).\\n\\n---\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 216,\"totalTokenCount\": 1144,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\\n\\n**Term 1: `x\u00b3`**\\n* Using the Power Rule, `n = 3`.\\n*\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 242,\"totalTokenCount\": 1170,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" Bring the `3` to the front and subtract 1 from the exponent.\\n* Derivative = `3 * x\u00b3\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 268,\"totalTokenCount\": 1196,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\u207b\u00b9` = **3x\u00b2**\\n\\n**Term 2: `2x\u00b2`**\\n* We\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 293,\"totalTokenCount\": 1221,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" use the Power Rule on `x\u00b2` and keep the constant `2` as a multiplier.\\n* The derivative\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 318,\"totalTokenCount\": 1246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" of `x\u00b2` is `2 * x\u00b2\u207b\u00b9` = `2x`.\\n* Now,\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 342,\"totalTokenCount\": 1270,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" multiply by the constant `2`: `2 * (2x)` = **4x**\\n\\n**Term 3\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 366,\"totalTokenCount\": 1294,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \": `-5x`**\\n* You can think of `x` as `x\u00b9`.\\n* Using the\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 392,\"totalTokenCount\": 1320,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" Power Rule, the derivative of `x\u00b9` is `1 * x\u00b9\u207b\u00b9` = `1 * x\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 416,\"totalTokenCount\": 1344,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\u2070`.\\n* Anything to the power of 0 is 1, so the derivative is `1`.\\n* Now\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 443,\"totalTokenCount\": 1371,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \", multiply by the constant `-5`: `-5 * 1` = **-5**\\n\\n**Term 4\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 467,\"totalTokenCount\": 1395,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \": `+1`**\\n* There is a rule for constants: **The derivative of any constant number is\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 491,\"totalTokenCount\": 1419,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" always 0.**\\n* The derivative of `1` is **0**.\\n\\n---\\n\\n### Putting It All Together:\\n\\n\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 517,\"totalTokenCount\": 1445,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Now, we just combine the derivatives of each term:\\n\\n**3x\u00b2 + 4x - 5 + 0\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 543,\"totalTokenCount\": 1471,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**\\n\\nWhich simplifies to our final answer:\\n\\n### **3x\u00b2 + 4x - 5**\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 29,\"candidatesTokenCount\": 566,\"totalTokenCount\": 1494,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 29}],\"thoughtsTokenCount\": 899},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"zqkxafL8FNCEkdUP-YvPAQ\"}\r\n\r\n"} +{"key": "d84201ae", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. There are 14 known species of otters, which are part of the weasel family.\\n\\n**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming. They possess dense, waterproof fur that keeps them warm. Their size varies by species, ranging from about 2 to 6 feet in length and weighing between 6 and 100 pounds.\\n\\n**Habitat and Diet:** Most otters live in and around freshwater rivers, lakes, and wetlands, while two species are marine. Their diet is primarily carnivorous and consists of fish, crayfish, crabs, and other aquatic invertebrates. Some species are adept at using tools, such as rocks, to break open shellfish.\\n\\n**Behavior and Social Structure:** Otters are known for their playful nature, often seen sliding down riverbanks. Their social structure varies; some species are mostly solitary, while others live in groups. They communicate through a variety of sounds, including whistles and chirps. Otters can live up to 16 years in the wild.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"groundingMetadata\": {\n \"searchEntryPoint\": {\n \"renderedContent\": \"\\u003cstyle\\u003e\\n.container {\\n align-items: center;\\n border-radius: 8px;\\n display: flex;\\n font-family: Google Sans, Roboto, sans-serif;\\n font-size: 14px;\\n line-height: 20px;\\n padding: 8px 12px;\\n}\\n.chip {\\n display: inline-block;\\n border: solid 1px;\\n border-radius: 16px;\\n min-width: 14px;\\n padding: 5px 16px;\\n text-align: center;\\n user-select: none;\\n margin: 0 8px;\\n -webkit-tap-highlight-color: transparent;\\n}\\n.carousel {\\n overflow: auto;\\n scrollbar-width: none;\\n white-space: nowrap;\\n margin-right: -12px;\\n}\\n.headline {\\n display: flex;\\n margin-right: 4px;\\n}\\n.gradient-container {\\n position: relative;\\n}\\n.gradient {\\n position: absolute;\\n transform: translate(3px, -9px);\\n height: 36px;\\n width: 9px;\\n}\\n@media (prefers-color-scheme: light) {\\n .container {\\n background-color: #fafafa;\\n box-shadow: 0 0 0 1px #0000000f;\\n }\\n .headline-label {\\n color: #1f1f1f;\\n }\\n .chip {\\n background-color: #ffffff;\\n border-color: #d2d2d2;\\n color: #5e5e5e;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #f2f2f2;\\n }\\n .chip:focus {\\n background-color: #f2f2f2;\\n }\\n .chip:active {\\n background-color: #d8d8d8;\\n border-color: #b6b6b6;\\n }\\n .logo-dark {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #fafafa 15%, #fafafa00 100%);\\n }\\n}\\n@media (prefers-color-scheme: dark) {\\n .container {\\n background-color: #1f1f1f;\\n box-shadow: 0 0 0 1px #ffffff26;\\n }\\n .headline-label {\\n color: #fff;\\n }\\n .chip {\\n background-color: #2c2c2c;\\n border-color: #3c4043;\\n color: #fff;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #353536;\\n }\\n .chip:focus {\\n background-color: #353536;\\n }\\n .chip:active {\\n background-color: #464849;\\n border-color: #53575b;\\n }\\n .logo-light {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #1f1f1f 15%, #1f1f1f00 100%);\\n }\\n}\\n\\u003c/style\\u003e\\n\\u003cdiv class=\\\"container\\\"\\u003e\\n \\u003cdiv class=\\\"headline\\\"\\u003e\\n \\u003csvg class=\\\"logo-light\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"9 9 35 35\\\" fill=\\\"none\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M42.8622 27.0064C42.8622 25.7839 42.7525 24.6084 42.5487 23.4799H26.3109V30.1568H35.5897C35.1821 32.3041 33.9596 34.1222 32.1258 35.3448V39.6864H37.7213C40.9814 36.677 42.8622 32.2571 42.8622 27.0064V27.0064Z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 43.8555C30.9659 43.8555 34.8687 42.3195 37.7213 39.6863L32.1258 35.3447C30.5898 36.3792 28.6306 37.0061 26.3109 37.0061C21.8282 37.0061 18.0195 33.9811 16.6559 29.906H10.9194V34.3573C13.7563 39.9841 19.5712 43.8555 26.3109 43.8555V43.8555Z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M16.6559 29.8904C16.3111 28.8559 16.1074 27.7588 16.1074 26.6146C16.1074 25.4704 16.3111 24.3733 16.6559 23.3388V18.8875H10.9194C9.74388 21.2072 9.06992 23.8247 9.06992 26.6146C9.06992 29.4045 9.74388 32.022 10.9194 34.3417L15.3864 30.8621L16.6559 29.8904V29.8904Z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 16.2386C28.85 16.2386 31.107 17.1164 32.9095 18.8091L37.8466 13.8719C34.853 11.082 30.9659 9.3736 26.3109 9.3736C19.5712 9.3736 13.7563 13.245 10.9194 18.8875L16.6559 23.3388C18.0195 19.2636 21.8282 16.2386 26.3109 16.2386V16.2386Z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003csvg class=\\\"logo-dark\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"0 0 48 48\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003ccircle cx=\\\"24\\\" cy=\\\"23\\\" fill=\\\"#FFF\\\" r=\\\"22\\\"/\\u003e\\n \\u003cpath d=\\\"M33.76 34.26c2.75-2.56 4.49-6.37 4.49-11.26 0-.89-.08-1.84-.29-3H24.01v5.99h8.03c-.4 2.02-1.5 3.56-3.07 4.56v.75l3.91 2.97h.88z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath d=\\\"M15.58 25.77A8.845 8.845 0 0 0 24 31.86c1.92 0 3.62-.46 4.97-1.31l4.79 3.71C31.14 36.7 27.65 38 24 38c-5.93 0-11.01-3.4-13.45-8.36l.17-1.01 4.06-2.85h.8z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath d=\\\"M15.59 20.21a8.864 8.864 0 0 0 0 5.58l-5.03 3.86c-.98-2-1.53-4.25-1.53-6.64 0-2.39.55-4.64 1.53-6.64l1-.22 3.81 2.98.22 1.08z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath d=\\\"M24 14.14c2.11 0 4.02.75 5.52 1.98l4.36-4.36C31.22 9.43 27.81 8 24 8c-5.93 0-11.01 3.4-13.45 8.36l5.03 3.85A8.86 8.86 0 0 1 24 14.14z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003cdiv class=\\\"gradient-container\\\"\\u003e\\u003cdiv class=\\\"gradient\\\"\\u003e\\u003c/div\\u003e\\u003c/div\\u003e\\n \\u003c/div\\u003e\\n \\u003cdiv class=\\\"carousel\\\"\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFoqNcRYz4EnKTDr9rOFtM_QL8fkC9srSnO_pO3Kx3V-kztEHlBXjKTZmwvAT1QVim_wGNWj61kRC38vJmJaMPBQQi58FwA0X9f65W8veorMP0m1VIwW4WwQW9NZuipK4Q9zdSIUAlgOiQDjFUW_-PslRgwXxWrmGXu5jUIN2i6HP6W2G3x7EFJDV-AWX0jvo0Zmj942zxINEq0ZrAowWhQqAIvduvUhH2h\\\"\\u003ewhat are the characteristics of otters\\u003c/a\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHr13jWszEdjCzQ_VmuUxmeWQAswGZE1Z1qXGQUJ3WJ2K2V0lptvAWJxT8BH01LDSKROIVEuOZCUaoTJgtiV2t4O23q3HmdUhHYTCu97daYMl_CbvDGZD0VddcXk9RqryyYYJPxc5Eec_OIihpsc9UxO3KhHGmkNU-ItKfxBzY4Og4v_frQLz7OR2yyrdFEy6ME\\\"\\u003eotter overview\\u003c/a\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEGh6-Kc-LFdE22rpN0twxDOvbmj2XMMYRwxH1EB8ABOXwOlRtbf7NCPMNeUm0mCi8uNCaCpplqfL6UtWg6QZ7zGhT-8ZvbkQ2FTGn8Usty51WIUC8tQfXWCZ-rDlCDtEa6CNjjGn9meE104hS9wSfZqNYuovvp600RstKn62EgsFge0w9ofRvsjNEJ8dHuHd0IHdtSXA==\\\"\\u003ewhat do otters eat\\u003c/a\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEzEOrUH6o3JZfbGF3n8ESXGUurDYTYi9utM22v1XvGww8ypUZiVJwqNDIHaiURuf2uaY_r4fekzjsCJ_swjBPoMRSH068D8jtgn-REwLOaC_agYjK758f6hQ9shaOzBwAbkM_YkaloHm0e9NurjADhUWnnJzyPpo-xY6lbtDA8yotgGEk7ltllXKIA8Vc3jd_d\\\"\\u003eotter behavior\\u003c/a\\u003e\\n \\u003c/div\\u003e\\n\\u003c/div\\u003e\\n\"\n },\n \"groundingChunks\": [\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz\",\n \"title\": \"wikipedia.org\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEodF4gXW5x0gckfC-61dKg4HrDDzH4Gmg5CYOFmSPmJAXVDu9Im4Hr6kIQCPXhkHas81DGHb9zOGZib_HCmBoR2P0YX2848NHuivTqntd0FcuMOEBWEXvvztxrJNEfH9c2QQ==\",\n \"title\": \"britannica.com\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFFQo0zU7gp9wrrxaxTWtv8TcphvYKvxSL0ejmYdfsEAIJ9Dmy5pQwTOxqQbqW-sLKrQoPE4T1yQ9hl4oYgD5pc__Fd-lWZn4bfLFUgMdnRXKNpoaO8ZymBoGLtzTOqJg5lVnwtVKvNTNqCTWwdCI_U5pMgerGQNZqV6MB6U3N8VLVeGho=\",\n \"title\": \"study.com\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFRF0VW6hoXzWmAVA2B0Jmxy9-NCSur9G5Hhq8Nw6374Hcs01Keya_v6SCRSw-eSXk74OlD-BD4tvVj1tPGJkGQeCQdvNdc3z51uUssEeYzJquEt4YbWGQEZZvNHjUjow7tpOg=\",\n \"title\": \"ukwildottertrust.org\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHFQi65Rnx7jTizXrLDnx9N61o8IEohpWVC-j22j9M5V2gDyfSLclxAvBrmbohJuXvgMiCSSeGF19YqgezmNicOEyDGdXuSN7BtDvqCQ8MeYqQldnPzK_oNNW8ta0tnw2LQmf0brWZHmclXib9JVj7JN6faMg==\",\n \"title\": \"crittercontrol.com\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHikH4zJLdkJQg80KmE30onD0iv0fWZ_owkdyLRLD5mJPT1Ntrt_J68utfO2omyUxMfcvbTn02AbqZiqrjWA3rcgIFitCZe9MouiXYAhploBRH3QTX_w__weveMt6jMqIZvCdOdlYADZhwC3UFYNg==\",\n \"title\": \"seaworld.org\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE_8ckWYXXHCvYLvK4fCKFn7rmuEsGlSG7Nocws9XT3LOHTrXmpE2K-IhEO94yPnji1KaVDmwpCkHoYJDUiJQ7AqU4_GrHw5U4EufghQffGg46OYooNPFlIJikjahDUs8MjwJiOSGbDqRJ4W-Mg_jDluZ8Nl59tTtYhqLolJWFwja8SGDBFJv65hPI3Or-LxPYZiekQmo39QagEzIpwEgIrL19KPwy6wqRZKvZTZ7H3ePZA-rXZAdqU4v7GblD8-QSM46rkqQv5we8oyOXt4w==\",\n \"title\": \"bluereefaquarium.co.uk\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFmhaLUT5FXSCe9JLketaUGHHz9D7eR0bXXCWq31gf6RBIw-p6_c9dd8CVsodWOf0e1b01wQQpqFaE8fnN62mNIXtNf-8z0dhhe7FOaRsATeuOsJEPK_UUaHTdgA0zFHRxPDEL13e2JhPnL4EJilGHgCzp56Vxr1Ff7UCfleETlclhbkhCau-LhItpI\",\n \"title\": \"woodlandtrust.org.uk\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE3NCd5oA17ZUlfbQ9XbxXkx380QemvE4qYk479kDjOKNHHKzjwcTdVE5zmRx-BUGjgIkgLaXLGU6vKU8ax7F44_dZKMJXlELIeB7iVRvUUjUT2PqTqzB0WJsv0wLScaa-FCsUyu_lhct_UYWUvbJan\",\n \"title\": \"seaworld.com\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGJ9ARniOp2Bq8pMJfgt0EQc0NfxbSNyKcKyh2NYu2qUv5-kSk545BuzRV1PZoEIaO7zpEvZ1jWSb39WidJGYGvjHOeNn9kRihYwOsV334tFa7fv7Ne7fTlctHBmrq7RPM9rKWdiwP275qlswCsrNPiovl7y66QBw==\",\n \"title\": \"petscare.com\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQETEv5OTvoUF8IiONMTdsfVYwAiOccFc0pvTYJWBRgAebHXihwUBZawAhuRd_F9GIrieqokMtARA3y6uoLjHg4JCCrBaiKw29iTsJm_G5sZqBJh9YC4O0bFPtbKJakXPLAw_oBdWxR-YWxBXnmGFp1PsYfFybS7eBw=\",\n \"title\": \"si.edu\"\n }\n },\n {\n \"web\": {\n \"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE_5vdCEqYBtISJo0eWBUEojafrDzqiRq8p5N40Jq2yvJYs1GbAQQhULKc1fOHyAjBYyUYMfMKitLtwyLIWeYE0GK3KW1Mvs6n0K6MPikCeBoUtMKrwayDgC8XB2DWx9rKju8w4rvCWJLPp9PplTFZuxCw=\",\n \"title\": \"seaworld.org\"\n }\n }\n ],\n \"groundingSupports\": [\n {\n \"segment\": {\n \"endIndex\": 103,\n \"text\": \"Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 1\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 104,\n \"endIndex\": 178,\n \"text\": \"There are 14 known species of otters, which are part of the weasel family.\"\n },\n \"groundingChunkIndices\": [\n 0\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 180,\n \"endIndex\": 315,\n \"text\": \"**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 1,\n 2\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 316,\n \"endIndex\": 372,\n \"text\": \"They possess dense, waterproof fur that keeps them warm.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 3\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 373,\n \"endIndex\": 482,\n \"text\": \"Their size varies by species, ranging from about 2 to 6 feet in length and weighing between 6 and 100 pounds.\"\n },\n \"groundingChunkIndices\": [\n 2\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 484,\n \"endIndex\": 606,\n \"text\": \"**Habitat and Diet:** Most otters live in and around freshwater rivers, lakes, and wetlands, while two species are marine.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 1\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 607,\n \"endIndex\": 714,\n \"text\": \"Their diet is primarily carnivorous and consists of fish, crayfish, crabs, and other aquatic invertebrates.\"\n },\n \"groundingChunkIndices\": [\n 4,\n 0,\n 5,\n 6,\n 7\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 715,\n \"endIndex\": 793,\n \"text\": \"Some species are adept at using tools, such as rocks, to break open shellfish.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 8\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 795,\n \"endIndex\": 908,\n \"text\": \"**Behavior and Social Structure:** Otters are known for their playful nature, often seen sliding down riverbanks.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 9,\n 10\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 909,\n \"endIndex\": 1002,\n \"text\": \"Their social structure varies; some species are mostly solitary, while others live in groups.\"\n },\n \"groundingChunkIndices\": [\n 0,\n 3,\n 11\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 1003,\n \"endIndex\": 1079,\n \"text\": \"They communicate through a variety of sounds, including whistles and chirps.\"\n },\n \"groundingChunkIndices\": [\n 3\n ]\n },\n {\n \"segment\": {\n \"startIndex\": 1080,\n \"endIndex\": 1123,\n \"text\": \"Otters can live up to 16 years in the wild.\"\n },\n \"groundingChunkIndices\": [\n 0\n ]\n }\n ],\n \"webSearchQueries\": [\n \"otter overview\",\n \"what are the characteristics of otters\",\n \"what do otters eat\",\n \"otter behavior\"\n ]\n }\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 12,\n \"candidatesTokenCount\": 271,\n \"totalTokenCount\": 578,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 12\n }\n ],\n \"toolUsePromptTokenCount\": 101,\n \"toolUsePromptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 101\n }\n ],\n \"thoughtsTokenCount\": 194\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"46kxadHjHZ7onsEP-4mdgAQ\"\n}\n"} +{"key": "f91d516b", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Otters are carnivorous mammals belonging to the subfamily Lutrinae, part of the weasel family (Mustelidae), which also includes badgers, mink, and wolverines. There are \"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 52,\"totalTokenCount\": 211,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 60,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 60}],\"thoughtsTokenCount\": 87},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"5akxad_NHe_l7M8PxvHM6A8\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"13 extant species of otters, all of which are semi-aquatic, living in both freshwater and marine environments. They are found on every continent except Australia and Antarctica.\\n\\nThese mammals are recognized by their long, slim bodies, short limbs, and powerful, webbed feet, which make them excellent swimmers. Most species also possess a long\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 122,\"totalTokenCount\": 281,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 60,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 60}],\"thoughtsTokenCount\": 87},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"5akxad_NHe_l7M8PxvHM6A8\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \", muscular tail. Otters have incredibly dense, insulated fur, especially sea otters which have the thickest fur of any animal, helping them trap air for warmth and buoyancy in water as they lack a bl\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 164,\"totalTokenCount\": 323,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 60,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 60}],\"thoughtsTokenCount\": 87},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"5akxad_NHe_l7M8PxvHM6A8\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"ubber layer. Their diet primarily consists of fish, but can also include frogs, birds, and shellfish. Otters are known\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 189,\"totalTokenCount\": 348,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 60,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 60}],\"thoughtsTokenCount\": 87},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"5akxad_NHe_l7M8PxvHM6A8\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" for their playful behavior, engaging in activities like sliding and playing with stones. They typically live in dens called \\\"holts\\\" near water sources.\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"groundingMetadata\": {\"searchEntryPoint\": {\"renderedContent\": \"\\u003cstyle\\u003e\\n.container {\\n align-items: center;\\n border-radius: 8px;\\n display: flex;\\n font-family: Google Sans, Roboto, sans-serif;\\n font-size: 14px;\\n line-height: 20px;\\n padding: 8px 12px;\\n}\\n.chip {\\n display: inline-block;\\n border: solid 1px;\\n border-radius: 16px;\\n min-width: 14px;\\n padding: 5px 16px;\\n text-align: center;\\n user-select: none;\\n margin: 0 8px;\\n -webkit-tap-highlight-color: transparent;\\n}\\n.carousel {\\n overflow: auto;\\n scrollbar-width: none;\\n white-space: nowrap;\\n margin-right: -12px;\\n}\\n.headline {\\n display: flex;\\n margin-right: 4px;\\n}\\n.gradient-container {\\n position: relative;\\n}\\n.gradient {\\n position: absolute;\\n transform: translate(3px, -9px);\\n height: 36px;\\n width: 9px;\\n}\\n@media (prefers-color-scheme: light) {\\n .container {\\n background-color: #fafafa;\\n box-shadow: 0 0 0 1px #0000000f;\\n }\\n .headline-label {\\n color: #1f1f1f;\\n }\\n .chip {\\n background-color: #ffffff;\\n border-color: #d2d2d2;\\n color: #5e5e5e;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #f2f2f2;\\n }\\n .chip:focus {\\n background-color: #f2f2f2;\\n }\\n .chip:active {\\n background-color: #d8d8d8;\\n border-color: #b6b6b6;\\n }\\n .logo-dark {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #fafafa 15%, #fafafa00 100%);\\n }\\n}\\n@media (prefers-color-scheme: dark) {\\n .container {\\n background-color: #1f1f1f;\\n box-shadow: 0 0 0 1px #ffffff26;\\n }\\n .headline-label {\\n color: #fff;\\n }\\n .chip {\\n background-color: #2c2c2c;\\n border-color: #3c4043;\\n color: #fff;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #353536;\\n }\\n .chip:focus {\\n background-color: #353536;\\n }\\n .chip:active {\\n background-color: #464849;\\n border-color: #53575b;\\n }\\n .logo-light {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #1f1f1f 15%, #1f1f1f00 100%);\\n }\\n}\\n\\u003c/style\\u003e\\n\\u003cdiv class=\\\"container\\\"\\u003e\\n \\u003cdiv class=\\\"headline\\\"\\u003e\\n \\u003csvg class=\\\"logo-light\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"9 9 35 35\\\" fill=\\\"none\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M42.8622 27.0064C42.8622 25.7839 42.7525 24.6084 42.5487 23.4799H26.3109V30.1568H35.5897C35.1821 32.3041 33.9596 34.1222 32.1258 35.3448V39.6864H37.7213C40.9814 36.677 42.8622 32.2571 42.8622 27.0064V27.0064Z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 43.8555C30.9659 43.8555 34.8687 42.3195 37.7213 39.6863L32.1258 35.3447C30.5898 36.3792 28.6306 37.0061 26.3109 37.0061C21.8282 37.0061 18.0195 33.9811 16.6559 29.906H10.9194V34.3573C13.7563 39.9841 19.5712 43.8555 26.3109 43.8555V43.8555Z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M16.6559 29.8904C16.3111 28.8559 16.1074 27.7588 16.1074 26.6146C16.1074 25.4704 16.3111 24.3733 16.6559 23.3388V18.8875H10.9194C9.74388 21.2072 9.06992 23.8247 9.06992 26.6146C9.06992 29.4045 9.74388 32.022 10.9194 34.3417L15.3864 30.8621L16.6559 29.8904V29.8904Z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 16.2386C28.85 16.2386 31.107 17.1164 32.9095 18.8091L37.8466 13.8719C34.853 11.082 30.9659 9.3736 26.3109 9.3736C19.5712 9.3736 13.7563 13.245 10.9194 18.8875L16.6559 23.3388C18.0195 19.2636 21.8282 16.2386 26.3109 16.2386V16.2386Z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003csvg class=\\\"logo-dark\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"0 0 48 48\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003ccircle cx=\\\"24\\\" cy=\\\"23\\\" fill=\\\"#FFF\\\" r=\\\"22\\\"/\\u003e\\n \\u003cpath d=\\\"M33.76 34.26c2.75-2.56 4.49-6.37 4.49-11.26 0-.89-.08-1.84-.29-3H24.01v5.99h8.03c-.4 2.02-1.5 3.56-3.07 4.56v.75l3.91 2.97h.88z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath d=\\\"M15.58 25.77A8.845 8.845 0 0 0 24 31.86c1.92 0 3.62-.46 4.97-1.31l4.79 3.71C31.14 36.7 27.65 38 24 38c-5.93 0-11.01-3.4-13.45-8.36l.17-1.01 4.06-2.85h.8z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath d=\\\"M15.59 20.21a8.864 8.864 0 0 0 0 5.58l-5.03 3.86c-.98-2-1.53-4.25-1.53-6.64 0-2.39.55-4.64 1.53-6.64l1-.22 3.81 2.98.22 1.08z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath d=\\\"M24 14.14c2.11 0 4.02.75 5.52 1.98l4.36-4.36C31.22 9.43 27.81 8 24 8c-5.93 0-11.01 3.4-13.45 8.36l5.03 3.85A8.86 8.86 0 0 1 24 14.14z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003cdiv class=\\\"gradient-container\\\"\\u003e\\u003cdiv class=\\\"gradient\\\"\\u003e\\u003c/div\\u003e\\u003c/div\\u003e\\n \\u003c/div\\u003e\\n \\u003cdiv class=\\\"carousel\\\"\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEXMRpTI2pFbvxffeY8m2WANDXguGuCIGMckYB0Lbk1aDcTcrg6rRengGvvkRDQp4TGADrmmbgEFbGAQsdfAilWGj0YuO-VQQPQNq5zlr2ziP8d6rTphWntFxBdqxy6NrTvb3whH150TyzRkCmgbObsvN0D6oBJ93sSDMbAc_d1aJfeEy4u2qKZQw==\\\"\\u003eotters\\u003c/a\\u003e\\n \\u003c/div\\u003e\\n\\u003c/div\\u003e\\n\"},\"groundingChunks\": [{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX\",\"title\": \"wikipedia.org\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQG8IrStcUGaJBb0j3NpEKxRSKJmrj-cVfw6AqiNQAXCNKn95IWuCMwwmNWuT7WpHtWUdc0KIsddmWMMVAzj2B5FbYcFzaPVLdHGWfgYRF29MHtLK6Bgf2kE1vdKDCWnkgcUrj5ujTpOfGCxdP1Ixoh7UdBOuRLqSpYqcBoKS3jRp3CG\",\"title\": \"doi.gov\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH3h53jqxHUFanpfBoCWCSFEatHhzUvZTR2jq6wxCsWxLa7DYQTY3FcNoJiHbyjDmfZE4zkpISg00GsT1Hgy5cwXP0SutjGcjnURlTdnR1gcGlI7KmbRyfP_arMsTmdWmQABglBAGRpZQHV-3WZjrd1UKjCg3h81XbQM43c\",\"title\": \"treehugger.com\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF8jkKGB0tfT_EQFuq46hgDDGDYYgsal_m4_v4VYb9AXgSdVwPJQQY6DrYs7GSYkpEGzC79CZGpAOHULsH4NL23A7LRqFItKHvcVg5Nwx-RhlZJMi-XdGpfClg3DuydwXn30A==\",\"title\": \"britannica.com\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEOcyghAA8GOTgGxMjelz2bAQn5RrgyF6WKnKhjZ2HByNFaOM-WtZN-XtMXWLRls6quJSFyIk-flfzmfNWZYvQ1YacXTlvLHRcQ5E3IwKDrfMkGGh8hVX-Uchje967i3azI3-38CojrrRMry7FNriPmBXwym1572RM=\",\"title\": \"crittercarewildlife.org\"}}],\"groundingSupports\": [{\"segment\": {\"endIndex\": 157,\"text\": \"Otters are carnivorous mammals belonging to the subfamily Lutrinae, part of the weasel family (Mustelidae), which also includes badgers, mink, and wolverines\"},\"groundingChunkIndices\": [0,1]},{\"segment\": {\"startIndex\": 159,\"endIndex\": 278,\"text\": \"There are 13 extant species of otters, all of which are semi-aquatic, living in both freshwater and marine environments\"},\"groundingChunkIndices\": [0,2,3]},{\"segment\": {\"startIndex\": 280,\"endIndex\": 345,\"text\": \"They are found on every continent except Australia and Antarctica\"},\"groundingChunkIndices\": [2,4]},{\"segment\": {\"startIndex\": 348,\"endIndex\": 479,\"text\": \"These mammals are recognized by their long, slim bodies, short limbs, and powerful, webbed feet, which make them excellent swimmers\"},\"groundingChunkIndices\": [0,3,4]},{\"segment\": {\"startIndex\": 481,\"endIndex\": 528,\"text\": \"Most species also possess a long, muscular tail\"},\"groundingChunkIndices\": [0,3]},{\"segment\": {\"startIndex\": 530,\"endIndex\": 723,\"text\": \"Otters have incredibly dense, insulated fur, especially sea otters which have the thickest fur of any animal, helping them trap air for warmth and buoyancy in water as they lack a blubber layer\"},\"groundingChunkIndices\": [0,1,2,3,4]},{\"segment\": {\"startIndex\": 725,\"endIndex\": 812,\"text\": \"Their diet primarily consists of fish, but can also include frogs, birds, and shellfish\"},\"groundingChunkIndices\": [0,3]},{\"segment\": {\"startIndex\": 814,\"endIndex\": 918,\"text\": \"Otters are known for their playful behavior, engaging in activities like sliding and playing with stones\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 920,\"endIndex\": 981,\"text\": \"They typically live in dens called \\\"holts\\\" near water sources\"},\"groundingChunkIndices\": [4]}],\"webSearchQueries\": [\"otters\"]}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 218,\"totalTokenCount\": 377,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 60,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 60}],\"thoughtsTokenCount\": 87},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"5akxad_NHe_l7M8PxvHM6A8\"}\r\n\r\n"} +{"key": "0efcc43b", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Hi Rens! It's nice to meet you.\\n\\nHow can I help you today?\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 6,\n \"candidatesTokenCount\": 19,\n \"totalTokenCount\": 843,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 6\n }\n ],\n \"thoughtsTokenCount\": 818\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"76kxaaWiAvufkdUPtuPAsAM\"\n}\n"} +{"key": "4b2cc0aa", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Your name is Rens.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 30,\n \"candidatesTokenCount\": 5,\n \"totalTokenCount\": 264,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 30\n }\n ],\n \"thoughtsTokenCount\": 229\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"86kxaf3IB-ulkdUPhZqD8QM\"\n}\n"} +{"key": "f6fa33fe", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Your name is Rens.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 43,\n \"candidatesTokenCount\": 5,\n \"totalTokenCount\": 298,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 43\n }\n ],\n \"thoughtsTokenCount\": 250\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"9qkxaY6yHO2skdUPzOjMWQ\"\n}\n"} +{"key": "5a848f30", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"This image features an adorable **puppy** peeking out from behind a bush of **purple flowers**.\\n\\nHere's a more detailed breakdown:\\n\\n* **Main Subject:** A small, fluffy puppy, likely a Cavalier King Charles Spaniel or similar breed, with white fur and large patches of reddish-brown/tan on its floppy ears and around its eyes. It's lying down on green grass, looking directly at the viewer with big, dark, expressive eyes.\\n* **Foreground/Left:** A lush green bush covered in numerous small, delicate light purple or lavender flowers. The puppy is positioned as if it's emerging or hiding behind this bush.\\n* **Ground:** The puppy is resting on vibrant green grass.\\n* **Background (Right):** The background is softly blurred, showing hints of darker, possibly wooden, structures or furniture, suggesting an outdoor garden or patio setting.\\n\\nThe overall impression is one of cuteness and natural beauty.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 264,\n \"candidatesTokenCount\": 201,\n \"totalTokenCount\": 1262,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 6\n },\n {\n \"modality\": \"IMAGE\",\n \"tokenCount\": 258\n }\n ],\n \"thoughtsTokenCount\": 797\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"_Kkxab7NCIapnsEPlrHggAc\"\n}\n"} +{"key": "c701342e", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I can't spell your name because I don't know what it is!\\n\\nIf you tell me your name, I'd be happy to spell it for you.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 4,\n \"candidatesTokenCount\": 36,\n \"totalTokenCount\": 327,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 4\n }\n ],\n \"thoughtsTokenCount\": 287\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"_qkxaZPPBbHHnsEPkaa4sQo\"\n}\n"} +{"key": "edcea65d", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"1, 2, 3, 4, 5\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 5,\"candidatesTokenCount\": 13,\"totalTokenCount\": 44,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 5}],\"thoughtsTokenCount\": 26},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"_qkxabe9J73qkdUP3uvz6QM\"}\r\n\r\n"} +{"key": "cbed80d3", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5,\n \"b\": 3\n }\n },\n \"thoughtSignature\": \"Eo4DCosDAXLI2nyYc7UeX4zozAPN7XvfhLBbTrNjARNB5P1UHbRmID4piz3C1ZPLZ89ZaEQQ6bd/pWp0VX9kOMZkaQPlOM1MFaPNv64ulC2Or2OsJrrs05f7yeEC31rZUSPihKQuFfEZUdoRtPJJcuJQsjrraKvCpw2FrnSjBbCKVYe9ll4E2aOgyQcv3m9UceUogK9vUct1vQ0JYACbpradiIVDOXe5PC9h9BncrKb6jHvDZySrBWmUXLntx0eer6J9NHZekHc1WwpVp55G8qYOKjLVJ1oL758eqAxkbhc4XfI7KHD6N3Nl/VtjVM/6lK62pVRde7XGbUdTuZEtL0GLC4LiGY88Mkrg7Pkshtj7Cl/eePjn9QrtwzCsGIzAkpTLt86nGZP2V+RyDI8/gEWbG2HhYCSC9kSkgRkvGUvGZ9oZJ/la1fk4H4w1/gooirn/R+QGa6euDtcE9yP1qX7U73lSHqnbWe9T4HGObYwYI+p7irMEQsIl1w6U2sAsmnHPG+kntiJdKZ6BBaBarYY=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 96,\n \"candidatesTokenCount\": 18,\n \"totalTokenCount\": 217,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 96\n }\n ],\n \"thoughtsTokenCount\": 103\n },\n \"modelVersion\": \"gemini-3-pro-preview\",\n \"responseId\": \"BqoxacfeNYm4nsEPwtuUuQM\"\n}\n"} +{"key": "245db044", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"The result of adding 5 and 3 is **8**.\\n\\n**Explanation:**\\nI used the `simple_add` tool to calculate the sum of the two numbers provided (5 and 3), which returned the result 8.\",\n \"thoughtSignature\": \"EoEECv4DAXLI2ny8a1xvymFJ/vgLaWDo8ouX70DSjW7+1z8a/erh5uW4nsD0c6awPoTaWrAepxc33ITaIVdmMtgFuXREaf/wGRT8juRw5I179jax8UxI/MFsVCQyPdjrtkhVZT1t2NDLWxfVZdFp2YC5BZZ3uBaAaYX5hTctlnurndLcdg96a8OMeJNTjTiBuLRBwrEDgV3ie9KtFvnbiyT9q/sXGbieL45ZPqwJCCtQ0V+gXTaEIWDeABQpLbNirtsWG6g2xUF+ZHAS3EfvFUDMLyCride827t/GSl1HE7dmxMl6gFeTu6G2UHEFPUbG3E/Rbk0kswRYNrktijDxU2scVQkLT/OLGq5j6aBchW5VDwJgUJGjHJe651hZo0awMNfVRkysdjOmAm+flgfbpNkW4fz0sWtKX03gjW3fmHCi8aATXMUuBU0aiwbCJ0wtQDAKX8xkEdlkAeJSxws/stuJzOwXhop9ea7ND6AaEp3qF1ipKUx5b3W0F+0glhSiz4WUVS3ZTgdop/MT1nZxHiYRukWUCrwZEkJlxX1VCzCeu5Ilv7mqQC1qic8kVTUX2FxMpPZuu/UZxQySTyRLCRpbhyjHxY7uHCWNLLwf/YJ8cmRUNpeYKD85hdLshNquw1qY2Mnp8u23bp4C1+z902hrPRxgu+7mXnllZt1vhBcp/z1\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 167,\n \"candidatesTokenCount\": 49,\n \"totalTokenCount\": 353,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 167\n }\n ],\n \"thoughtsTokenCount\": 137\n },\n \"modelVersion\": \"gemini-3-pro-preview\",\n \"responseId\": \"DKoxaajdGv-0kdUPtM-XsQQ\"\n}\n"} +{"key": "e80caeb3", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I will use the `simple_add` tool with the numbers 5 and 3 as arguments to get the sum.\",\n \"thoughtSignature\": \"CrsFAXLI2nxw2v7iht4pqmqx5g4WolrvxJ8BI8gw8vbU8gAs126iJNAM9Hx6B01DlVToTf6y5vYFXhrd3bP+Heau3mmrFttK1rmFqFj4yZfZHwEF0DJYdxxZxsbGQj79MWsZg3BMUEycFswl3d2FKQfeyaUTHWjcpRO+Wg1hyv2Cbf4anqV5Kk5lbxzN/Y+ywXHjZ1BgO009dYxxe7Gm+9BPEnm6L4ANJeqY6ykGk5OnE08mJDzRUpw1ckZgHJ9Cuem9EzoWIxeag6S+tZtQiuMhBPplnCUAiV8x7uX7kSr/V6QcG2nmrEbPR5CCGcN4t0yo1HRsUeJap6SdQWdeyfzIrobomALrPm4/Z2TP6ObmqtNw45OkvuwiWmvLVuYfq/BdN0N6kEiRHPQdjnq65Oy7ZUF7UwAgwN/QbwvU9nnMO0tBVDHgBabJczGKg4SU4s9L79oAPJ3nFYZQJGwrTq95j5dqUnbLtX+ck1hgLWA/LCuOZbAueWCUCrcUKHEE43SrLfp4OkwvETWUxn4xnoLmZ0XwIPeE+MqRrjKI+yrawyVmXn7XFix5FOK8QQK5fQfSaSxutQWyC8W33TtpnJhQ9o7fgUg0qCTbP5sBPteiI5Gs9uBCOsPd8LKlB/0R/AoPTNMzw+eha5O3OL7N/p+hL4Zrx00PiRruzcafqfeNXcn/Q/xTmedDMm19bUE1j6uxeUr3d0OXFWhHKvwmRS2uVKGeSJnUlH4DGZADLl8S3YGRrWq7JCFdb0yqj8PrSkrT1+PzwW3m/smxayU014dgvT5rf7hf2FY4ZlawrhurpR9iRZ7XJ0SZCVL6oPskgrEGQPumeBiHyQbAct7Wze8NYygYk7BHvODcv2ksxg9m3kHVbjaIh/ObcTCZW4h4Luvh9TA3jddf5w1sW685AVy4eMG6Vxe9SAvpq6HY\"\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5,\n \"b\": 3\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 85,\n \"candidatesTokenCount\": 45,\n \"totalTokenCount\": 317,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 85\n }\n ],\n \"thoughtsTokenCount\": 187\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"EKoxaaORIYytkdUP3pug4AM\"\n}\n"} +{"key": "8e9d033c", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Based on my use of the `simple_add` tool, the sum of 5 and 3 is 8.\\n\\nI successfully used the tool with the inputs `a=5` and `b=3`, and it returned the result `8`. The goal is complete.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 209,\n \"candidatesTokenCount\": 58,\n \"totalTokenCount\": 859,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 209\n }\n ],\n \"thoughtsTokenCount\": 592\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"F6oxaY65ObmqkdUP8_Gi4AM\"\n}\n"} +{"key": "cf7ccca3", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I used the `simple_add` tool to calculate 5 + 3. The tool returned a result of 8.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 159,\n \"candidatesTokenCount\": 26,\n \"totalTokenCount\": 295,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 159\n }\n ],\n \"thoughtsTokenCount\": 110\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"GaoxaeLFB4a6nsEPwc6xgAM\"\n}\n"} +{"key": "e6f1d2cd", "response": "{\"model\":\"claude-sonnet-4-5-20250929\",\"id\":\"msg_0175d9DtGkqyNWBDzcDzFFWn\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"## Summary\\n\\nI successfully completed the calculation using the `simple_add` tool.\\n\\n**Result:** 5 + 3 = **8**\\n\\n**Explanation:** The `simple_add` function took two parameters:\\n- `a = 5` (first operand)\\n- `b = 3` (second operand)\\n\\nThe function added these two numbers together and returned the correct result of 8.\\n\\nThe goal has been fully completed - no further work is needed.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":769,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":108,\"service_tier\":\"standard\"}}"} +{"key": "eac54be2", "response": "{\n \"id\": \"chatcmpl-Cj5e2bM1vAFZOs2SUBSBoWDD1o0wC\",\n \"object\": \"chat.completion\",\n \"created\": 1764862494,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I used the simple_add tool to calculate 5 + 3, and the result is 8.\\n\\nSummary:\\n- 5 + 3 = 8\\n\\nNo further work is needed, as the calculation was completed successfully. If you have more math questions or need further explanations, please let me know!\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 155,\n \"completion_tokens\": 62,\n \"total_tokens\": 217,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_433e8c8649\"\n}\n"} +{"key": "831bb29b", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"**My Reasoning on this Simple Addition Problem**\\n\\nOkay, so I'm looking at this problem and it's pretty straightforward. First, the user's asking about addition, which immediately points me towards the `simple_add` tool. Seems like a good fit for this task.\\n\\nNext, I need to understand the inputs. The user gives me two numbers: 5 and 3. I quickly see that `simple_add` takes two parameters, conveniently named `a` and `b`. I can easily map the user's \\\"5\\\" to the parameter `a` and the user's \\\"3\\\" to the parameter `b`.\\n\\nFinally, to get the answer, I need to execute the addition. I know I can call the function with the assigned parameters. So I'll just formulate the correct function call: `print(simple_add(a=5, b=3))` should do the trick. That'll give me the sum. Easy peasy.\\n\",\n \"thought\": true\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 3,\n \"a\": 5\n }\n },\n \"thoughtSignature\": \"CuADAXLI2nyWO0CQDly8P3vsYo3Yc3eyV4gDjYb9yvQq7t//BS2bq6LWPXkVwXQTIju4kxHkgd4HdjaBPmAY5qxePsaO+31uqUH2jPAQBjn2pQxigo91xFkIyHXWgquKKmLCWRmr3ozZ33OHRgc82C4/KxoIFqL2VQHh9k5Gz92yk4OvQbIj+BCw0F88lG/t/Azh1VmLBJfai1tgdSP2AeXOmxqvW7do7hwqXu/THDwajeHSk4t2pb2s92QVhumfcXTEuDvcjZh25BeTDVmtxRKSSGS4rFzxJIN5e+J1F9Gw28b658P7XXjU/JAOzv397F3vePR5bdLH0m0MEZZcZBog61E3P/HW2dUUnVbRemKfFvWxg4GIzXTmEZ2PlvIEhvJyApQwbED3BakRNOznb34qwBODtT3Sni642obxsLym3+SPyzwh8qapiZmjtxfIVZJ7Y6+TTQkw3c9lCjiksxqnmaA6XTtNpShsBx8qjGLHSduJkpzoeLHJWE9jZZNeKonvuH5m78EvCu8BeDi0z/LbaERbs/XCpvOfaqtOSfrvnrCzsF1ZnTgDDIv5V/+dbPxiQZaf8hqqjEVuhPlJ/jSL98qjDdBZzogunT2It7WuB+1wxVMESmE0OjZveJt/+eMD\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 74,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 235,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 74\n }\n ],\n \"thoughtsTokenCount\": 141\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"JKoxaYKQFom4nsEPwtuUuQM\"\n}\n"} +{"key": "aaeacdb8", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"**Summarizing My Reasoning**\\n\\nOkay, so the user wants a summary of what just happened. Let's see... first, I need to recognize the user's implicit request for a recap. They want a concise explanation of how I arrived at the answer. Let's dig in. I see the user asked \\\"What's 5 + 3?\\\" My call to the `simple_add` tool was quite straightforward: `simple_add(a=5, b=3)`, and the tool spit back `8`.\\n\\nMy analysis is simple: the user presented an elementary math problem. I leveraged a dedicated tool, the `simple_add` function, to do the heavy lifting of the computation. That function then took the input `5` and `3`, and the output was `8`, which is the correct sum.\\n\\nNow to formulate the summary. Since this is an expert level interaction, I'm thinking the user wants a no-nonsense, technically correct summary. The core information to convey is the question, the tool used, and the result. However, the tool use isn't strictly necessary here. Given the brevity of the process, a concise statement like \\\"The sum of 5 and 3 is 8\\\" is the ideal answer. This is direct, accurate, and completely fulfills the objective. Plus, the goal was achieved, so no further explanation is necessary.\\n\",\n \"thought\": true\n },\n {\n \"text\": \"Based on my tool use, the sum of 5 and 3 is 8.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 361,\n \"candidatesTokenCount\": 18,\n \"totalTokenCount\": 837,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 361\n }\n ],\n \"thoughtsTokenCount\": 458\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"K6oxaYuoI_jc7M8P2vLGgQQ\"\n}\n"} +{"key": "0c996c2a", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. A member of the weasel family, there are 13 different species of otters found in various aquatic\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 55,\"totalTokenCount\": 308,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" habitats around the world.\\n\\nKey characteristics of otters include\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 68,\"totalTokenCount\": 321,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" their long, slender bodies, short legs with powerful webbed feet for swimming, and a strong tail that helps them move\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 92,\"totalTokenCount\": 345,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" through the water. They are also distinguished by their very dense fur, which\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 107,\"totalTokenCount\": 360,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" traps air to keep them warm and buoyant, as they lack a layer of blubber for insulation like other marine mammals. In fact, sea otters have the thickest fur of any animal.\\n\\nTheir diet consists mainly of fish, but can also include frogs, crayfish, and crabs\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 165,\"totalTokenCount\": 418,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \". Otters are a keystone species, meaning they play a critical role in their\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 182,\"totalTokenCount\": 435,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" ecosystem, such as controlling sea urchin populations which in turn protects kelp forests.\\n\\nOt\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 202,\"totalTokenCount\": 455,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"ters can be found in a variety of environments, including freshwater rivers, lakes, and marshes, as well as coastal marine habitats\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 227,\"totalTokenCount\": 480,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \". They build dens, known as holts, in riverbanks or under tree roots.\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 245,\"totalTokenCount\": 498,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" While some otter populations have faced declines, reintroduction programs have been successful in some areas.\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"groundingMetadata\": {\"searchEntryPoint\": {\"renderedContent\": \"\\u003cstyle\\u003e\\n.container {\\n align-items: center;\\n border-radius: 8px;\\n display: flex;\\n font-family: Google Sans, Roboto, sans-serif;\\n font-size: 14px;\\n line-height: 20px;\\n padding: 8px 12px;\\n}\\n.chip {\\n display: inline-block;\\n border: solid 1px;\\n border-radius: 16px;\\n min-width: 14px;\\n padding: 5px 16px;\\n text-align: center;\\n user-select: none;\\n margin: 0 8px;\\n -webkit-tap-highlight-color: transparent;\\n}\\n.carousel {\\n overflow: auto;\\n scrollbar-width: none;\\n white-space: nowrap;\\n margin-right: -12px;\\n}\\n.headline {\\n display: flex;\\n margin-right: 4px;\\n}\\n.gradient-container {\\n position: relative;\\n}\\n.gradient {\\n position: absolute;\\n transform: translate(3px, -9px);\\n height: 36px;\\n width: 9px;\\n}\\n@media (prefers-color-scheme: light) {\\n .container {\\n background-color: #fafafa;\\n box-shadow: 0 0 0 1px #0000000f;\\n }\\n .headline-label {\\n color: #1f1f1f;\\n }\\n .chip {\\n background-color: #ffffff;\\n border-color: #d2d2d2;\\n color: #5e5e5e;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #f2f2f2;\\n }\\n .chip:focus {\\n background-color: #f2f2f2;\\n }\\n .chip:active {\\n background-color: #d8d8d8;\\n border-color: #b6b6b6;\\n }\\n .logo-dark {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #fafafa 15%, #fafafa00 100%);\\n }\\n}\\n@media (prefers-color-scheme: dark) {\\n .container {\\n background-color: #1f1f1f;\\n box-shadow: 0 0 0 1px #ffffff26;\\n }\\n .headline-label {\\n color: #fff;\\n }\\n .chip {\\n background-color: #2c2c2c;\\n border-color: #3c4043;\\n color: #fff;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #353536;\\n }\\n .chip:focus {\\n background-color: #353536;\\n }\\n .chip:active {\\n background-color: #464849;\\n border-color: #53575b;\\n }\\n .logo-light {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #1f1f1f 15%, #1f1f1f00 100%);\\n }\\n}\\n\\u003c/style\\u003e\\n\\u003cdiv class=\\\"container\\\"\\u003e\\n \\u003cdiv class=\\\"headline\\\"\\u003e\\n \\u003csvg class=\\\"logo-light\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"9 9 35 35\\\" fill=\\\"none\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M42.8622 27.0064C42.8622 25.7839 42.7525 24.6084 42.5487 23.4799H26.3109V30.1568H35.5897C35.1821 32.3041 33.9596 34.1222 32.1258 35.3448V39.6864H37.7213C40.9814 36.677 42.8622 32.2571 42.8622 27.0064V27.0064Z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 43.8555C30.9659 43.8555 34.8687 42.3195 37.7213 39.6863L32.1258 35.3447C30.5898 36.3792 28.6306 37.0061 26.3109 37.0061C21.8282 37.0061 18.0195 33.9811 16.6559 29.906H10.9194V34.3573C13.7563 39.9841 19.5712 43.8555 26.3109 43.8555V43.8555Z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M16.6559 29.8904C16.3111 28.8559 16.1074 27.7588 16.1074 26.6146C16.1074 25.4704 16.3111 24.3733 16.6559 23.3388V18.8875H10.9194C9.74388 21.2072 9.06992 23.8247 9.06992 26.6146C9.06992 29.4045 9.74388 32.022 10.9194 34.3417L15.3864 30.8621L16.6559 29.8904V29.8904Z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 16.2386C28.85 16.2386 31.107 17.1164 32.9095 18.8091L37.8466 13.8719C34.853 11.082 30.9659 9.3736 26.3109 9.3736C19.5712 9.3736 13.7563 13.245 10.9194 18.8875L16.6559 23.3388C18.0195 19.2636 21.8282 16.2386 26.3109 16.2386V16.2386Z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003csvg class=\\\"logo-dark\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"0 0 48 48\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003ccircle cx=\\\"24\\\" cy=\\\"23\\\" fill=\\\"#FFF\\\" r=\\\"22\\\"/\\u003e\\n \\u003cpath d=\\\"M33.76 34.26c2.75-2.56 4.49-6.37 4.49-11.26 0-.89-.08-1.84-.29-3H24.01v5.99h8.03c-.4 2.02-1.5 3.56-3.07 4.56v.75l3.91 2.97h.88z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath d=\\\"M15.58 25.77A8.845 8.845 0 0 0 24 31.86c1.92 0 3.62-.46 4.97-1.31l4.79 3.71C31.14 36.7 27.65 38 24 38c-5.93 0-11.01-3.4-13.45-8.36l.17-1.01 4.06-2.85h.8z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath d=\\\"M15.59 20.21a8.864 8.864 0 0 0 0 5.58l-5.03 3.86c-.98-2-1.53-4.25-1.53-6.64 0-2.39.55-4.64 1.53-6.64l1-.22 3.81 2.98.22 1.08z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath d=\\\"M24 14.14c2.11 0 4.02.75 5.52 1.98l4.36-4.36C31.22 9.43 27.81 8 24 8c-5.93 0-11.01 3.4-13.45 8.36l5.03 3.85A8.86 8.86 0 0 1 24 14.14z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003cdiv class=\\\"gradient-container\\\"\\u003e\\u003cdiv class=\\\"gradient\\\"\\u003e\\u003c/div\\u003e\\u003c/div\\u003e\\n \\u003c/div\\u003e\\n \\u003cdiv class=\\\"carousel\\\"\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHMd_42vfihCboCd5yHnP-qNLI_HT9IlHlmUVtfwlvdUXNXv3j3xyNb0rHCQNdBc6TIFVyfqWq7LumjIWId_Uq3jCByHcKmTEeowsrIn_4LxkGBrtfifv77ZYbyP5_LZlv7Zzyx-6XORTwcaSFvwAhssNCIXlKV65txezfhAZ8EgrQ8fdZxt7x2\\\"\\u003eotters\\u003c/a\\u003e\\n \\u003c/div\\u003e\\n\\u003c/div\\u003e\\n\"},\"groundingChunks\": [{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGkX41Wyp8epNwOW65lhQ-7iOla1ORY5hBQSIxt0oY6IpBM9omX0XKJTLzkuEHJBvQBjlsXLISjX7m-dKXwpMC1VaYUmawxMP6fJ7RucjDEhRvxEzOvWkK9DIYMvws=\",\"title\": \"wikipedia.org\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFeoBXVzJTeFymAW7epJ8oTiKKaYbd1lb0ePPN2wpy8bxfBwyAvggeZDSoGbNp8RMEBJU7MR3Ne1ZKfT_LRXkw2QfryYn_nt9rewLkiW-Th7tcvvUnrIrM-nRx6ezmrlGBm\",\"title\": \"britannica.com\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH5_gxxY7d1_3GK3lPurE3-AI0487RmhHqt3hzDN1NnYk2b3DScF7_6Wi3_-O4c_HGoczctHH_VwM7_iyUjTRA7B3Dtslpi5Vz7aqB5S_M3sPStnh--hqj_dH9SL7Q8JWOgYr0MtHkSjYqxmu1NfN1tWB1PDK8vnS3doYEVah_PKao=\",\"title\": \"doi.gov\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEgQpJyBfXkdo90VP5-fU7V1OWwHf4a6hGyBGwYDNKc4BCJ0Pbmw7vr9QZDa7NzQHuno5lmNZ5BHIURVpFBHRiOg7NcHevlZaEDkfCmB2Bev4hm1T6FnLK-_dak1TaVRCU_5GwT9NjdGz953O8BB2XAJxAImu0E3GIqavw=\",\"title\": \"treehugger.com\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHqQTGheYeURsOfADjIFXqIczhpXYC1zvv5OnfQYNGtQlwXHxG1eHC3zdc5CybwdYz5lu9uJ-jbgbZ0kf1oxxSTqO2It1mHso5uAWtOwtv5wyvyXGtAj7bp2k8gfNQh99jypyPDAsqgdTTxErPVZ-9yZ_p3PTqY7Q==\",\"title\": \"crittercarewildlife.org\"}}],\"groundingSupports\": [{\"segment\": {\"endIndex\": 103,\"text\": \"Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life.\"},\"groundingChunkIndices\": [0,1]},{\"segment\": {\"startIndex\": 104,\"endIndex\": 227,\"text\": \"A member of the weasel family, there are 13 different species of otters found in various aquatic habitats around the world.\"},\"groundingChunkIndices\": [2,3,1]},{\"segment\": {\"startIndex\": 229,\"endIndex\": 403,\"text\": \"Key characteristics of otters include their long, slender bodies, short legs with powerful webbed feet for swimming, and a strong tail that helps them move through the water.\"},\"groundingChunkIndices\": [0,1]},{\"segment\": {\"startIndex\": 404,\"endIndex\": 577,\"text\": \"They are also distinguished by their very dense fur, which traps air to keep them warm and buoyant, as they lack a layer of blubber for insulation like other marine mammals.\"},\"groundingChunkIndices\": [0,2]},{\"segment\": {\"startIndex\": 578,\"endIndex\": 634,\"text\": \"In fact, sea otters have the thickest fur of any animal.\"},\"groundingChunkIndices\": [2,3]},{\"segment\": {\"startIndex\": 636,\"endIndex\": 720,\"text\": \"Their diet consists mainly of fish, but can also include frogs, crayfish, and crabs.\"},\"groundingChunkIndices\": [0,1]},{\"segment\": {\"startIndex\": 721,\"endIndex\": 885,\"text\": \"Otters are a keystone species, meaning they play a critical role in their ecosystem, such as controlling sea urchin populations which in turn protects kelp forests.\"},\"groundingChunkIndices\": [2]},{\"segment\": {\"startIndex\": 887,\"endIndex\": 1021,\"text\": \"Otters can be found in a variety of environments, including freshwater rivers, lakes, and marshes, as well as coastal marine habitats.\"},\"groundingChunkIndices\": [4]},{\"segment\": {\"startIndex\": 1022,\"endIndex\": 1089,\"text\": \"They build dens, known as holts, in riverbanks or under tree roots.\"},\"groundingChunkIndices\": [4]},{\"segment\": {\"startIndex\": 1090,\"endIndex\": 1199,\"text\": \"While some otter populations have faced declines, reintroduction programs have been successful in some areas.\"},\"groundingChunkIndices\": [4]}],\"webSearchQueries\": [\"otters\"]}}],\"usageMetadata\": {\"promptTokenCount\": 12,\"candidatesTokenCount\": 263,\"totalTokenCount\": 516,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 12}],\"toolUsePromptTokenCount\": 93,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 93}],\"thoughtsTokenCount\": 148},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"MKoxabbaFrmqkdUP8_Gi4AM\"}\r\n\r\n"} +{"key": "ef788a94", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 3,\n \"a\": 5\n }\n },\n \"thoughtSignature\": \"Cq8JAXLI2nyI1OmWr9kdtN0PS13ZhCWAfCRxf4MjaoInzCR/bwzqgYAwIMWbSpvvcpsFOeCp0BVKgD612WkQS5UYEsLGebguKY7QxSm/sKX+hVaRfySu/INlef29rks6OJ6Jl7KyH4ux+v0h0RoRtv5mPARXfQArRpO4Z41Mp51KSqJY7f4HcYdfVt3ORp1IZPBFfZ5YTvtyT194V1FWhzRxc5fagOwt8LP22c+6ppmE8tNQMojPgX3KbaMIEisu5FRXd09A43JEUOz2hdLTijvbwvGtDtVLcujOSgJ88sIZr7q8cQBYNXV+jygwYH60XakR+zifT6vdzzYF0QQTXT4fCtriZdxRLh63alrvbVQHTTBHwhrQjz5pgMZk3IvgoFpsZbjhfBadJIUC0k5mvFCkQ/ehQdUgnNlzIy57+PchqAnpeoZ/L3ZpGdi3C3wwrcjwj5nxu9lktSWlv7A4XzC7SWMMQIfGVbMuCEOLcOaJlri2ZWqVBepG+eY5qYZ6wfiyx/XF4VyWPHhnmmqX3X97zBulVf7wRWG0yeiTHdPAbPH6s8H3qrbVdJvX1TEZdt5nMDgCxa7hXiMc9uLfE1UDMQS/h4I01Pe8gCn9feHZXizrk3GP2FMfkdO1v0vp3xjzDroGzbWe75ecqrH+dD1zpwC3dBL3kgi3e2wJsiKars+Vjnd1VRhMyylgh+q1bDo1q82NzkeXmrnRpjEE99IYxlAL8id89BVv2t2fMuBxu740uFjNW/pNlg47Yv78XM0oscqfgbj2jtm2ulBRqb7gKtoaDMeyhhzxms1medlJJ2TLSptzJuNEdrPpumKOlE0lDnYfU58QPp0fM7tGp/cApMC1nLkSsLKyra9Y76G4nTA8nTlYt4+C1a1i1rsZ3BLorfEhl0+jo/3yWxxlB2gisk5W5kwZSPZgkkFjYARadruhc8spE+VV8BOw7A548HDxvlm/kFe9AXfIx+sckdYu+r112QcJq77KL5fZAQlCGOT1h33jIW+Z0HmIlSMyIrn0eCQUwZH3s3e7unFIyKKI2DwF5huUAhFgWf8NdCqNxxuMs3wFPWw1oI6h8iMwbSUyejMc/V5gswDUU1I7Q+9LMRq5C2mrPjAQX8PdcngApiSzbODXmIPSKeXjctC0mlDZ5v622LRsgA++UDQ5L+MPpqJRzaVMXL0VsK8OQzpBpZ81dhV2QxdsHyd1vqjQ772sqF/AFmW0FBZyWooXEKMK063PBLBlTMTYT56b9C7FiA6JhqjgZkSiF+7UIuBDuAot7r/eayENYEBbI8IiDbqmvHBLqDAcnV9Wdw4iLlZzcqj/c/h4/R9NoSxOrAhaZuTwpGrZsbCaUCou7KTST4ASdvLeB1jk2h96dNLQw5ns1IHkmOVVf0kC0kVzcvIzKB3M7UpbOOCFDnsjODLNQqL3RLSwj/b+l03X6U55AViorZe9KipS59/X8YKMKQygPEY6a1d4y5EP0yLHJWPARA4Qc/iWYWA1gJAvprYchS1OoiF+2gPCFUmmTL30tbVJ1dsf7SGaGgGiPAFWvWP4/nRGG2Xu9XlwW1ZhSpU8XRMkKOVLRp30jsObCZXMkkc8a9U=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 83,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 504,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 83\n }\n ],\n \"thoughtsTokenCount\": 401\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"N6oxacGlLZGznsEPs82q8Qc\"\n}\n"} +{"key": "a1959678", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 7,\n \"a\": 8\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 117,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 137,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 117\n }\n ]\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"OKoxaZjWMOi1kdUP9ZuhKA\"\n}\n"} +{"key": "daa54aae", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 11,\n \"a\": 15\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 152,\n \"candidatesTokenCount\": 22,\n \"totalTokenCount\": 174,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 152\n }\n ]\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"OqoxaevaDu2mnsEPpe6qSQ\"\n}\n"} +{"key": "6049a132", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"OK, let's break it down.\\n\\nFirst, we add 5 and 3, which equals 8.\\nThen, we add 7 to that, which equals 15.\\nFinally, we add 11 to that, which equals 26.\\n\\nTherefore, ((5 + 3)+7)+11 = 26.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 189,\n \"candidatesTokenCount\": 76,\n \"totalTokenCount\": 265,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 189\n }\n ]\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"PKoxab6eJ7TCnsEPuaPz-Q8\"\n}\n"} +{"key": "ce36a9f7", "response": "{\n \"id\": \"chatcmpl-Cj5eXbgbJXgISDcu7NEdu3eejg1WO\",\n \"object\": \"chat.completion\",\n \"created\": 1764862525,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_HzJ12UBeTZDyVGCNi7GNKzZX\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"arguments\": \"{\\\"a\\\":8,\\\"b\\\":9}\"\n }\n }\n ],\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\": 17,\n \"total_tokens\": 195,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} +{"key": "48d1dcc8", "response": "{\n \"id\": \"chatcmpl-Cj5eZdBT5FSgvT7nzlbPNMWZCqwZe\",\n \"object\": \"chat.completion\",\n \"created\": 1764862527,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"(5 + 3) = 8 and (7 + 2) = 9. Multiplying them together: 8 \u00d7 9 = 72. \\n\\nSo, (5 + 3) \u00d7 (7 + 2) = 72.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 203,\n \"completion_tokens\": 55,\n \"total_tokens\": 258,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} +{"key": "063e440c", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5,\n \"b\": 3\n }\n },\n \"thoughtSignature\": \"CsoJAXLI2nyi+SWD/EIPkeNQgWjtEr0fmsi/pcJlk7Oyg+2sPpofUjK097Mg5fGTgPmllZru8StllpOcJVLLPPvWzpKkkeaGVj4ZDnRFvSfJSlVo7GMyRTWimJNRASCEAWKTUCQ5A/vyGqhYDRQQKAEnBtdcqfG/+QvhHvUr77XMLAWJTR4kgaczZV1igOnGjrhddo5d5Ti/bl2CrwGIem5FQXR1R0SOjrtsXDDD3KJFX386MecRiCNHmmBLnTL8C3hJ24jds5IxqDvKvXcxYxdTFVdF5vwN2IslkxaBrrHToNTlCj508WC0a8mRNqtkWGV6GQAfnSubqhhCMp9Et7djmDQgfl+F1fw7YfIyaPDIJgVgatLZhOE4Xyceku0fGY3Aqxmj27i702bMaLgVvOkB4BTgsmtKMPeYXxPaOapAYdgAw9Y64y2sjd/3yBvt+rX0V/rCX5z4WISavzvu0Bv5Bi0XZ91HzeqhFdR7vj/X24HBZ7peszVXdx+9V0rscIFmJGWulSVFtL1LFz0T60ig44ms1yECZQCsbn1ChJ+Eev5H6XhCbnRBvqht920GGVSo/XwHEzCg59b1vhFi59Ndt5PSwVmxIWydxSsUYT6Tfv2cFX2HhA1tyPB2Fch/Fbb2vx3pFX/CfHAWmub/SBCxhgG6zZqJ4CiXDTbKtbf636fTO8OtnC+SAbVBKjaylzx/6iRtx0nnz4wdAI9j3qcX6duXQWhTtlghHU/xBzmT6HovR/O60Oc4v2tzPig/qtWIFjKdLLorB0F5ElQQNqtvpsctZJIEJX9e1+1SXH0LdBERe2DdpamnSxX1U9yKCcCcZfwQkjNpvKDVbwCZ6gsASe2LzKhoRKzaZ+hYw1loksQdFto1Y2QHVLB+cxhhAZ+ADnBL5cdufwmpVi2kFDzM/rcUxYXPKll0f85RxNNT3uXvtpfmQoiGLvq9rY7KKVf6N0/AS/6RvIj50PSXWoPax2DzRRFp7etN1ehTF041iXAxTNwZJD7uWXkZIQ0eeBWTm3IeWyw72jhDfnxmssWzKDRUUYIFaN9sONIJP8lRvUikVvsmmh9POZNuycugYMHKM1Z/jJgOuissZGY/wwthpjTdLs9aX9PSOY8ClfjOMwDq7TpbYVNZRpej0dD9r8BJl1TVn0jM+Qsz6x+tuLtDsJnIPlx8m2MKYx9qYrDY4AimTcJ1IIPGe7xAeC494r5LtTZ7GKdD2eS+JWsn7prstD3/hxrYt/Y1VDc3L6wW11XgSZugWCK21nW800AR19aIqVw9xqycZ29qzkXXVcAlKzyIz3xW6HeL1ZSm1VehnunPIbDdj9PYrYc+aWe4op2trDesPLwd9u7Wj3azkIgNLwjhiDbbYFe6iTziEgQW4C42aYLlPOlsDO0QIb3wBWSh2hWkb65JblhQwlpNEOhFSWb4oVYR4XHNXlgX98yg13mrUx9pvSV5V0K5W52qyOg1vv8HRatQ1pERP0oPJwao0Mu5GLQTEB3V+zCKVr/uUgE/zfwIwhYsarRwVCqX9wFy+y6QxgImjBgiPkfS1BrblO+tTR2p/XSDzr/5glVy6t8nIfgvu9bTf/mrvNzS/y4ZHXIj7xvbahWcNYCK0cbVX2Fnk+6tH0HeO88KrgEBcsjafFlbendVA5x2dMVfBPV89SuXazNAD1AnvHXSrSYEPbpQMfXVqZj8es5lsjrecWk3hfUOOJCPYyCehpC6oI+l1xXC/XU/b5e53Avdtqg4+NvwIJa1FZkf2wm9jAFamvQZsOchRSAHHF/KwM4AOw+RCBwCOODRm+XESbhvA5m6p7mwclV/ell/gdl98cGaapFFUGue2mHMezPIUe2Gf7i068X1bQjm2COqmRoKXwFyyNp8Fc2fQ6ide/xeMpRt1JChE7n15+5qkf/OxwiBcmsrCELf1vqqy0ZAXTphc5al6hKSN1gz0Bc6fEvwn2BxYxEgXVjvFEP35GCKwcnef65ak7NqbfhuKdQVzb6o\"\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 7,\n \"b\": 2\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 133,\n \"candidatesTokenCount\": 40,\n \"totalTokenCount\": 570,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 133\n }\n ],\n \"thoughtsTokenCount\": 397\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"RqoxabekBeqFkdUPp8qSiQQ\"\n}\n"} +{"key": "1388be57", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"multiply\",\n \"args\": {\n \"b\": 9,\n \"a\": 8\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 200,\n \"candidatesTokenCount\": 18,\n \"totalTokenCount\": 218,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 200\n }\n ]\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"SKoxafDZN8OdkdUP0Pao6Q8\"\n}\n"} +{"key": "3fa6a2f3", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"(5 + 3) * (7 + 2) = 72\\n\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 231,\n \"candidatesTokenCount\": 17,\n \"totalTokenCount\": 248,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 231\n }\n ]\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"TKoxaZ6eCoOzkdUPie-_oAQ\"\n}\n"} +{"key": "08cafe8f", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 5,\n \"a\": 10\n }\n },\n \"thoughtSignature\": \"CvsFAXLI2ny8YLvBawTd8D7gBw9VmF8EuI2U6Cjg78oPBdDYqQ6SJ9VeYHkwte8J9GJUOSj0XxmWCDSnIUCE4k1ywaBXjKvCBEULv91iDTIlWtLzfAxqoVce6byGsTL6JRCyeLWtLn8pjbvQ2dCfMEc+JbwZqmjgkA5kkRVXbMSS+clU8gJ4eZzDEM8ybvBIbbK0SpHlcKqwnpADIGlRiAH/3DXaZlEnIqSlaVZKFtQIbx4K0t3R1+BuOt2nIuss1i0Vwxa68ywc+UEOfl8eBmVcozr2VDTDb6ce5cW13yQ6gpwf3VDUMb5C7R5qFRgtUijGac4QncRTnQMreakSctMA/wdL6DxhDs82I67+9EP0edctTO3DCv/VjwIAejTUa67rH9O6tmteIcNC0Re2ylsDXPH0rZYHH+FQcNIYGBJQkXL3oDXe/AcT4YWGjCMQ1TNNPiOgyYiLwejgzCi5/mgddl75RNKf+j8m4tx9yJSi97ATkj4h6NBR5XjiMDXxZwwVCpKF89csyiP6zSqddweWcIStJ+VLZEy4PEgGjKeC+YGEqMQmr2GCYAZtsaRsixvDpZnvqy4vBbWz7h31DyTZIs7KhQPBdp1B2EklwCoUhPTId2nD3xfTTC+BEsCWylp7tpvvzDL31A5D3mxUMnxnNYhA3RW9W8Vru1LICx8ChdHFkjhkZHO3PFcAShL4SiGGMbltDUKqVVAks1m+sFheO1kX5yc5GLtA39CSel6ZWsgyIwDecufseaCRXDkW5KnoakQmA+UMHos8yNi8tsIFITWEerc5oOWnmpcZxuL2z5YnJ+ntOnFREvY6z77lzELTjkOFPmGOnqBoECiFatj2ZC1o0B8tnWtCIkXMYKDYYnK2PlhCRfwlTa7lub9qtVo+4SblW+lcyTPifPy7w7uhT2Wkv1sFwykB9LUfnpcqExS6AO8lkng3JOjBGa5w2l2uoBa+232aI0ukPI6XdyStCffI4FRp1QoDVzLj0EZtvJc+iPSeHgAeJYMsCQ==\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 196,\n \"candidatesTokenCount\": 21,\n \"totalTokenCount\": 478,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 196\n }\n ],\n \"thoughtsTokenCount\": 261\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"T6oxaYyXOY3l7M8P-8aO6QM\"\n}\n"} +{"key": "1053e420", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"b\": 1,\n \"a\": 2\n }\n },\n \"thoughtSignature\": \"CoQCAXLI2nyE+/Pk8/Z5JYl9tai+rE0NrCA+TEcxB3NU/pknbVuQttrpWaN+hjPEAbEHo1+q4wCLeIkIto5Al8x2UCrssbg0b2b03Wh0MOpIM2Y3Xf13adEbXQN8FvL5ORxt5oxWrnGq19kzf9Ln9/SRQK1JsfK4TB61yp+XI8wIWHIPpNCAhGf/+XYfxL7FmAsMMtt5F7FQAkXUgzLZwhPGeXZ1yMQCyAU1RfHvc9lBswHldffGPvdL/C2juLH0IZy85aBSyjXJFkawjWZ50qUnQP2HiYedyKXJoz1irGU45jUM5QphiLSxXW/1Jan/s+HLsuYuGhy4Wa/BwUhepxjTyu0oby4=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 232,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 330,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 232\n }\n ],\n \"thoughtsTokenCount\": 78\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"U6oxabOjKaG1nsEPrJWY6Q8\"\n}\n"} +{"key": "871f5e2e", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"\\n\"\n },\n {\n \"text\": \"Of course, let's calculate the final answer step by step.\\n\\n**Step 1: Solve the first part in parentheses.**\\n* 10 + 5 = 15\\n\\n**Step 2: Solve the second part in parentheses.**\\n* 2 + 1 = 3\\n\\n**Step 3: Perform the multiplication.**\\n* Now we take the result from Step 1 and multiply it by 3:\\n* 15 * 3 = 45\\n\\n**Step 4: Perform the division.**\\n* Finally, we take the result from Step 3 and divide it by the result from Step 2:\\n* 45 / 3 = 15\\n\\n**Final Answer:** The result of the calculation is **15**.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 280,\n \"candidatesTokenCount\": 164,\n \"totalTokenCount\": 583,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 280\n }\n ],\n \"thoughtsTokenCount\": 139\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"V6oxaYi1OviskdUP1ojUwQQ\"\n}\n"} +{"key": "15a364e8", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 1,\n \"b\": 2\n }\n },\n \"thoughtSignature\": \"CrAZAXLI2nxsCoK/0tyOzlRrOECiLKSBLp3CWhK3/N9pWfw9Y7bhRFxBXiIkAnaL9XeBqezVARCHP0aq2ruMEpa+Z6S/eHA1eHPuK6bE6zsq3hB3BwhK5+Q4aAwajoPzY+CsEpSchbd2xWJBl62qB4ORhjJjfArkvAwD7D61lR2l1tFutzSQNUAqez8+09121q7uIef0bQGB402TZuxlqPe60o73KWOlRs/nlM0Wnuz63o7/uifLYdPuD1t5avRAcFhriAyJTyVp1aye8OhZj/RaWHXyJr/ArLAryUPZMPIEKhVtNn6G1M4Agw/I3/H6BEJKm/kkCSn2a5pqBeiNaE+Oa7aHra0SRLiGJzjkt3IsCjivcaOY0AX1a/SMDIHuXPjaUdL0FvlOhldQhxYCVLTmQ9XRv378iZanRf9sdmxvyGNY4pWqCxi9kPL0R2Zwju1jbr8v5XZwLS5swyyXSOsw0KT2pXiYXZyJAHcVcSTSrsNqna6rP2oZhkowIr/riO/KSBbRDkFhJPLCEBDYQ9ZsDxp8QHr8OGXljjkhIbRhQ7e64FZDLCz0MyPL7MylOYJ6rSt5Sxz3JArR4mrCGGgrWb8CQ3RhCXvkIuXxwnfyvLe5uIZivTZO7htoFLMb0y2NlOFKu2I4DnA68PtUlSrIxc4VurOGjtgf6KtgGX1s0J3On5tisA/5fQeCLQvNCjhxnDUPvVA4pCXqjcn321cy0PTynYy+/RwcDO9CVngaH1KMGyIlIgkqzSjAsVng9Pk7JFWgsaFEQsnYkcEExA/8izipHnTrGUozHoWF4nUkGg7qlAqL+1BDpWg7Al4z7k0Z8OZSWU2JcMXwXbiKyJqTJoueEhh6CgETQR5qjGTuEVzdMKPojUA9dxEAe8xhz4gTn+2xJ2+I3mQiA0r9CKL5EGpSaHv1N6O+SVkhJUXVsWfVPCfPPi0VuSsqNexYpy/wSor26ZgSTNxPcVU5BElIBw13ToaBzlHZGDnB36DetjWe7hF36Z2rCvDCny3l6lw5j0sD8O4feaE4XZ9IqsYLjsIG5TX3EgJYUvity1eJhAkUAp9ABYnK7BVP2o6P/4RUPLsqnZgtjjjrjSDX/kCu8DLkGBFUe9ISL80qH8gVs6vOCgS1a7xZZAkekf4VHWO64praVHmyinwhB1Ie9Jwdlrzf5u8YskB/gyWuSiOrHcQLlk0cQ+6jfiDVOmFOM/YTX/IjjAKJOIpVkP8z9gNbHq9ZFKOxVHccZJwBDBS2qW2/cA5GR1IJZLOTP6732449i1GBnAXqwJXC8LACe+V3beLcYXueCSPTNTJL+3+O1MFNmRLPV2ffI0Ptlo14T1ezcYwSjqACOMAE5YgleNHT6kkqFgemVquVmDGS8JiYIt9X5G6AdhJ4b4IOePLA21IPvyq8yRorgpKaEsut1RheDpTf488mUUIhEpS0/jtr6psz2PEXbLSr9gowOB8oLRmETnVtOiszLNez+pWaZ2KotA3VV22yfUhmK42Mk1tL2LrHpMcud+8lmp870spvyEm9ebPVEGB9uyVyhhyUr+zsYLBEj0ymzB8dzlLXZXcgLYzeJ0KWC5YpjJSakASONcocAiCM0JjGGUUR8kC65N4IFkVaLCNNnWX4iBNW6OELzgUwNfKjz+wSA+5TDl602bu/2A0pAHysPKrOqwiZJNkNTKXnaVNXdezCW1umFGLlS728qUcK/NYxuC3eE4D36Nf3mY6ZVA4JRwXgHaPrTPER2fUVyJiWTRGoI9G5mgBP/8bFpgZAK9QrSZJnZy0yvagpevsexrHz6JmQAsB3et6bceHYLnbGIBW4VZ2XlYsvHxMNmQb+4AN0lixMDUgUL9jMiYRPoVi1KHF8H7UFluAo2Eqj27zeXC6WO5qWEKibtpzFx8ix1K7i6IvzYtd7xulXB3YGyITzJUNWRwON/6FdzEtVyRkdlMpqX5Qzxs80qismu2RcS15cSWyIN3Ys2jyW7NDSmaNQNeZHigqRURQ5PouYplEWMy/nVLHWiTDe+Y2dxs2DcuwocfJpZGPkdJt9741mZc4O+m1AeyE80XgpPumxhsqfYdskY6hdCai0qoCGSSL1o38J1+fPZ+aDTd0iTUY5JHEa8oQcK6YTUtS713GrDz2dRIKz1Y2/vof860RRov6D8spgrKgSjt0WSnRiLl3NQ5mdaek4tqi9E467ahfjELqvbW5t99XKPI0+EMFi99+1rPcUFcdU7ZdTC8ZrE3JbmTRkxgOghTCrRYNJ4Px1Oyjv1KbiuYAfZ0+5SekjkVboOxrptGqwGNZKXs06ET2rqRnSe78WNggOQEJG0B7Z5KE3QDn14yILAIR9AC+7Mlder+7GQrxeURMF1RUR+I1mNLV+Z0QI6RXtBaDd6bmfc8yAxvMMVN/bSEWe6TpKfWGnKJnADcJo1dDZ+MyNT28ag2F3dB4hL8hYojym+XBfNdNlmIibWgAx3fHi46fLaJw22hKCIzdAbB/VTfLcOyxi9rr5rESj0uePDy6l/hionlv6sjX5eWehCK63j4DLhQh/SMpMN1pFF8cdTymVmlogEg27k1Tjc6ooq5KGGDeVonNue7FaaS3NlLyc5QPSrZfow8Ham0DTtHcYJW1PLpivTdjewNE26uLk5L/yOzFCkn3bDAlVzaQfbMKYr5twzfMLTo55gi6JGWj8x80KHA5dpuF+rdaUP/c4/q2V489ljNPCHUz07w3H77HmonmgYqU2JXPLJuVDJKt8feXiT/0T1SoCelcq5Jprt06owTZHlp5I194QMkPOU3piPqK9iDBOQKpR7HS62h2lJHtr7UicbEKC71YflKAdnE0fAKZ4M9xyfkaS1yPAE1LhnfL6lwMYX9fPn7I4sLodOjlazAC/ZJwhW/UY6sfwvuiaLy6N49iEciIfbJ7xY38bpzgxbPDna24c8LJNIa4djxp3phCuySF6+7uAe/Xj73cvX6/eQgt+iyPIaapcQafs+iVkOKectNAIrAdXQpHvVem5xUNlpA4U/GOgozXHAOM6/VrwtLeqthKp2CC9cocv2F/op2Jo8D2APcsyjTENbLs5XprADUbmZdcTvz3Rwk76XWGO+oM4tH6rVZctkYZOwz/eYyL6smFNOWzPIlXFFvtNJ3CFNRRsLqwsHzHgn0ncFfdULxx0Zy49FHWXH3Js1K5p0075SOkHd9liik9cTI919iynqcaRvsMEHf8mBd9Q0bc9Os/30Aqb38KmVYvt7f/dSldF2hZxEktLRFfBFIgYQsF4QsnmiTR1Uur1bSXG0uMKo1Rm6F38qa5F7TfclWon5EHbS058jtuPD3U9W4nL0jd5n/iJpoYEBUf1Siaw0exl7eX+ufG7celvuil5DW/29ctjrpVeajN0jlQUAxQFH0WzWcEpMsb4Y0tAbgAVq2SromFwrqSKGbdQaZxMffJcCZIb9QPVUPf55dSaw0Dj4mTVgjtO+bp1XTkcIoCHsxy8PCv+BwI7YANT/XQzKI3ZK9rnEt3RHPx3OMEiJM37S2pPg6kC5MaYtfg8g5GPYoCE6SV2/gxvICG4EUgQrlZdVWGryjVeFnrJyuQDC9LAI1CUc5GeZ+h8zlKVo+kWt+pBc/jmX9EaU1qq7PjOdDS6/QqIkJOEjVVescDmfi0HbWCsDxV0/oKb0t3Dk3plVSD0utwkHyG3uU+vqzXBNbc8ImO5lXuhmigCJf9TzcgmxOq94UrfhAvdszLWuitkibb2EZBZeRhsjD8rW14y+vXrhI+3WCkY1dERnI/Zn7G8CInDfVrKDrWzRDbMBkvAZExqNpvWaXIt34XJsKj6kTAotdjSH7hE8qO2hJ/2zXJ6c9YN28Mv4XBAqnHZ/GC03ZNAFOp+L5MU7va/vBNV6ZljG5mqAdD21o+I++MPpI2s1tN/hxEpirrCeF4vbwRO5ZnSXS9xc7MyflXDyap1SIig9fi+RTU3NAA9tLFYLxISk+XN0+zywIrCxL92XGOsEw9ZzEeH+bARMKRW08pnN4P5LQ7fcSjwdvajsO1Mld7xBlm+LwSIAOJJD0zdB2qqY1tO9uUorrhtQoX80v+JmuzeD0NFgAWfrdZaP0iRiHdQbZe25+nxh+quDO4Dv5x9WmXf5kZyhTDa6q4LA7Y3q0WM7Dtk3+gmcdjE9sAq9Ags0tDX/E9ZBxh0lSD0Svu1Hsnvbpk5wXyvpNMnHFVNY1QuEXmzJ6Hm7W0W4pK79xCTODfT0UxEkdtKAR+uyTC1aNWubvxfF6afj63KO6my7waQZl89vzV3S2o9Z9DVf9I5OxgFEM4FzYXubTCTXNaVg/Oe1itD/t2CH/HEVtETFjQ=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 95,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 1030,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 95\n }\n ],\n \"thoughtsTokenCount\": 915\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"YaoxaZK2CbKhkdUPxLgM\"\n}\n"} +{"key": "be38125c", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Based on my initial calculation, I found that:\\n\\n* 1 + 2 = 3\\n\\nHowever, I did not complete the full goal you requested. I was unable to perform the subsequent additions.\\n\\nTo complete the problem, the following steps still need to be done:\\n1. Take the result of 3 and add 2 to it.\\n2. Take that new result and add 3 to it.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 169,\n \"candidatesTokenCount\": 89,\n \"totalTokenCount\": 828,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 169\n }\n ],\n \"thoughtsTokenCount\": 570\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"Z6oxabWSPMH9nsEPoK-IyAQ\"\n}\n"} +{"key": "7975067e", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"2 + 2 = 4\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 8,\n \"candidatesTokenCount\": 7,\n \"totalTokenCount\": 225,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 8\n }\n ],\n \"thoughtsTokenCount\": 210\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"i6oxabvPNPebvdIPlf7jwQg\"\n}\n"} +{"key": "9ca9b7b8", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"async_add\",\n \"args\": {\n \"b\": 7,\n \"a\": 5\n }\n },\n \"thoughtSignature\": \"CsEGAXLI2nziimeXQJFA6Ji4jJ4nO1RDNAAKHUFLT55IihZXAWXS9ZGJT/nD3XibbDf0+MDbIhpJ+RZOLRZSvHPcdWNEscg8mINDnZu2jaWAbd3MLM63mQTLwaKIGQ0mM/JzqNwCqhPFtdldHEFz+zYXM23dr/9Epon0d3/fbFtA3cErty8xC/Y9j7sNf4ATlRYn6rm1Geb6sZajuSjkqwAsguOwdDm0r1jwHprKK1ucco1tte6w1OyB0nYZPm24847ob7haFd5hc9MTvonmBMzrywALc1h2xrOEedOO4wsd/DkeYWWkOuKBxDBwvUMbBCumZMTcA7VzxG2o2rbmprntAMPB4wO1+oWFvvQ4HehLJ03Vv19M8FOUhc2GR/ZuIvYgx4Oz+DzwXkqLyHFZkn9WwX8ZMqTGkEUTaPoyUWS2g+qCx8kRSF5NQ2zcUV/qvC45jaf34+R3WLEgk8GXdOAGsYpkjUijiio5l1PVzqsCmk8cakl4qrKHFUHSIX6LnicSxduU7K8Iqmdlpo0sqVSuPoLgh2I414nLcxGqznni5bm1sMun+UVxx4F9PVJOMq3xmD6TbGMK5Kxco9MYnkO9rSx6bipyjNElc+6h1YIahP9qumV9mbmVZdPVA5gjRbCyRycuhRPC5pqIgy07MfHXMyljz16PCzonWtYg7qeRFBxdgx/+rH3rjJZ+O/n/afwlWpKhQ78I0ozH8x/TpsTxM8qhTgOFa0KHrj3HLE9o67Ru/NMhgKH39Rz/lAculny8QsNT0NijLF1pLyiHvm9hN5Qz505sUSPnpJAoUG40YqGB0z0u0UlXoKv+VhgGWEAZmnmL4+i5b0dGEJjHpNDAFK0/SPSc4B+Q4P6Dad0fFneUJQsR8VvumUxQJR8ioRyIENa+qPQuLLCoA/uN8wanuicCJv2iNeLwvS1Y62rQNPvFokQBL2yiZMWmJFzvLk83AYTZS7t6Gi+r6qeR//rTC1fvqIdrX3nVLwxGRiFEvVMuvKh03qXOXJJ6mOd6AzVU1m0F3yV5QSEZjnT2K7vseS9Ci8b+U0QPxuYOKfWg1R5NYj0uGjHF6eqoeKHwiNiWBJTJzMbtBRhK/0l5LlzXYgA=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 73,\n \"candidatesTokenCount\": 20,\n \"totalTokenCount\": 309,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 73\n }\n ],\n \"thoughtsTokenCount\": 216\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"j6oxafmBNqyCvdIPu4PziAE\"\n}\n"} +{"key": "e3139987", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"Based on the tool usage, I was able to complete the requested calculation.\\n\\n**Finding:** The sum of 5 and 7 is 12.\\n\\nThe goal was successfully completed, and no further work is needed.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 148,\n \"candidatesTokenCount\": 46,\n \"totalTokenCount\": 772,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 148\n }\n ],\n \"thoughtsTokenCount\": 578\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"laoxabj3NJ24xN8PyZPiuQI\"\n}\n"} +{"key": "7c8ef967", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"async_add\",\"args\": {\"b\": 7,\"a\": 5}},\"thoughtSignature\": \"CiIBcsjafHuwU4cK6Kpus2x9D3E4lG1q4SHrHScUPituJtHYClsBcsjafCAY92mvr+WKuite3oiaiuarKxrYI03XN5hhTV8QXGhOS5MH/nvmUfDScFnQBs2ckrofs8I+7s9HhcMMdctW4itYOTunnVDqzKdhTtkfQPcyWs0TuXgaCowBAXLI2nzyTD5Pj7Z6rZ+CFlKdgzEb4SK5ERJhIV1sh09swtVg71QOPl4GzyHNZiQeWHEawqHUgNQwijzLD6qli+I5H/lpq4vEF1DPQKhUlInKPI11YDCiNkTmTDfE2ShrlORJ94YSegbYD1aCscWugkgof3ac2CbATn06x4Fw80QLSzlxV8/+26b4C4kKhgEBcsjafLrN16P5zwelYxLqLIb+/FCcK8cFbvGNzeNSgo4wSBRmMGAR6B1Jy77dzpazmebiOiE6iOwZNQBi3tm5QIfSY7kBHY9b0ofh56GvRT+FM/FHP6k4dy+ZSM26Urc43MhsX2U8mk+owqirwIq34qVssvjd7xy3VD71vYsiXd4OKSlHQgp8AXLI2nwAdib+UJhos55OM9qf29ALav9EsDuRD9vif9+BnxgBqvCz/mIdNlpRHqAEFTBcdWkBlhseou2ToQXHReQPgJg977puIj5h5khgaiBuRfwxgMhmI5KT8Dc20KdcdsHLZ1D+fdrPyG0L2ZwQz635MX/EXe5fAvqOtgp5AXLI2nz5jn+zGrAwt7POCRxIzyapHU7HUSi7esMjbl9tf1KZwJqOLiDSEn1d/1TmFkfkXN7sQU9c3qP31CXaNQ/GPUv5mgBQZhIYUfTKO8lICMwOAoOMfHZ9a8TPtlsTMRz+dco1ZUg1AIDg5DHyPF3w4UzsK/SWtQqJAQFyyNp8l1jexvzmwnzASjp7S2QTFuWf/KEaVpqevoy4vR8wn9hVrB5YtuydA/kKEq7mdS+Icc8Yr5ADr2d+W6IV6ZFj+TS4GiGUAfrD2SZYGXp858qElYbCCE27Ixcxzlnot3UaWKNu0Bnfpi5LFChmcHkrrrEsXyrO2rckYr6YSva1ZqOqDaTlCosBAXLI2nw26y1xMFVZguIZfNAyJROO5xY7QSA0tp0DThmbgjb+D2r9mPTdHC8/1z+bje8qt4EiL0EMfF1UL6R0GoOndCoD3ovtLVAcIPJN/ZsaDfMUzp06ivdvAg34bnCBgKqEpK3pmxStU+JereMlYbJvydsWOTRpXTRF2J3LRh/T7cCIUafAJVU0rgqWAQFyyNp87zPqN7CCA5TF4CJJt97AM2pzgmKC5d3l1fTu2eHFIheLTc4IjoiuQy4yzerfrrhUJn2c9vZsX5PtdxY1bmj6Yv3c8e17MciPBs+c/Z6yjwUYdqkgMpR3RK4aqirE5GNW6D7rHKuXeBo6tAnVS8sPE0rrJ2DSlA2jHoDvxqqdn56oRYmaqPZLwQc2RTp/KsvQagpFAXLI2nynO6b2A2yWL7jxVhvDjcc/EdfciKpd2mNi+ur13yqGUeMjpIej6ah05MWCBPMdSbVrxatl83/Mz8V7GZzxr+vb\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"finishMessage\": \"Model generated function call(s).\"}],\"usageMetadata\": {\"promptTokenCount\": 73,\"candidatesTokenCount\": 20,\"totalTokenCount\": 315,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 73}],\"thoughtsTokenCount\": 222},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"l6oxaZLSOPWfvdIP0OrWkQE\"}\r\n\r\n"} +{"key": "2738fdf9", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Based on the previous turn, I was asked to calculate 5 + 7.\\n\\n**Findings:**\\nI used the `add\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 175,\"candidatesTokenCount\": 27,\"totalTokenCount\": 711,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 175}],\"thoughtsTokenCount\": 509},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"m6oxafmaKtK1vdIP4_-2sAY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"` tool with the inputs `a=5` and `b=7`. The tool returned the result **12**.\\n\\n\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 175,\"candidatesTokenCount\": 52,\"totalTokenCount\": 736,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 175}],\"thoughtsTokenCount\": 509},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"m6oxafmaKtK1vdIP4_-2sAY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"My finding is that 5 + 7 = 12.\\n\\nThe goal was successfully completed, and no further work\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 175,\"candidatesTokenCount\": 77,\"totalTokenCount\": 761,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 175}],\"thoughtsTokenCount\": 509},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"m6oxafmaKtK1vdIP4_-2sAY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" is needed.\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 175,\"candidatesTokenCount\": 80,\"totalTokenCount\": 764,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 175}],\"thoughtsTokenCount\": 509},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"m6oxafmaKtK1vdIP4_-2sAY\"}\r\n\r\n"} +{"key": "9940225d", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Defining Efficiency**\\n\\nI'm currently dissecting the prompt, specifically focusing on the meaning of \\\"efficient.\\\" I've realized efficiency can be interpreted in several ways, and I'm mapping out different efficiency metrics like time and space complexity. This analysis is crucial for determining the optimal sorting algorithm.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"totalTokenCount\": 95,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 71},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Prioritizing Speed**\\n\\nI'm now prioritizing the speed consideration, given the list's size. While theoretical efficiency is essential, I'm leaning toward evaluating practical implementation speeds for 1000 items. The \\\"random\\\" data also influences my approach.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"totalTokenCount\": 348,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 324},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Assessing Real-World Speed**\\n\\nI'm now focusing on practical execution. With a list of 1000 random integers, I'm thinking that while Big O is useful, direct benchmarking might provide the most concrete answer. I'm considering running some comparative tests.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"totalTokenCount\": 629,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 605},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"**Evaluating Quicksort**\\n\\nI'm now considering Quicksort's practical advantages. Given random data and a list size of 1000, Quicksort's average-case performance is excellent. Implementation is relatively straightforward, which also factors into overall efficiency.\\n\\n\\n\",\"thought\": true}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"totalTokenCount\": 702,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"For a list of 1000 random integers, the most efficient way is to use the **built-in\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 20,\"totalTokenCount\": 722,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" sorting function** provided by your programming language.\\n\\n* **Python:** `my_list.sort()` or `sorted\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 45,\"totalTokenCount\": 747,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"(my_list)`\\n* **Java:** `Arrays.sort()`\\n* **JavaScript:** `my_\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 70,\"totalTokenCount\": 772,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"array.sort((a, b) =\\u003e a - b)`\\n* **C++:** `std::sort\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 94,\"totalTokenCount\": 796,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"()`\\n\\nThese functions are highly optimized, often using a hybrid algorithm like **Introsort** (a mix of Quicks\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 119,\"totalTokenCount\": 821,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"ort, Heapsort, and Insertion Sort). For a small size like 1000, they are practically unbeatable in\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 145,\"totalTokenCount\": 847,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" terms of both implementation speed and performance.\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 24,\"candidatesTokenCount\": 153,\"totalTokenCount\": 855,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 24}],\"thoughtsTokenCount\": 678},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"oaoxae2cKJf0xN8PxNKIQQ\"}\r\n\r\n"} +{"key": "f34e7838", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Of course, let's break this down. First, we'll evaluate the expressions in the parentheses, `(10\",\"thoughtSignature\": \"CiQBcsjafJQlNwBAipdwk+lu6FgC7mogxFxRVQwJHHAhGuQQAywKSwFyyNp8XLsYbiBol7J7iTw0wCnYpccn8U/hmAK3Amj0xtI4kfqysRkYFCaAYzBZp6jgDrpTFvQBPH7yg3NYzVq01v40uZhN59Jj8wqGAQFyyNp8Rq3A5kTSJtwfe6hzwGqb83cWpipCiE+krhxwGBR0sYI1Iwwg8a5Wq9CYoBakaws4i4IpRlhgRKjfKIRsDV/xzktmrTAgP2yucLEJoXDdDhHiF1LkTpWWSYiYkYzIbaFcg2WIOQzdzU/qJJ6O5bn5LS3ZvZr/qME0XPtA+8h8W1fMCm4BcsjafJZcuCJEtioPx1UqShidvvpH+uomYgP/7YLCtZwU367I3R7sgWMHN2R8Xp0GFPEdfr1ORTB1NiukCykeSLCZWgyYiazTEawjuJdUpWuSfmArOovPsHERMlNj9qQ46PmcXLYfLLKsF81fGwpMAXLI2nyVrYYyA3vDDbJHxk20/e+/GDqHvcEEMOl/3+d8C4z9yoKndzOmv0nQ7dDFyQi5qxLQ6sRZwru22z64ReBBdktzODQfsdZIBgpyAXLI2nxqfpLHPWxcuo/YNQLKKv7DepoSXcYuPkEEDzNMmQXlkrRhttZeHMX/ncCSl6C0uppp0Ry+RE9TUujl4UcWHAZKU/IMxtVOBhdHxlED6rvvzTKPylHtVwyfXwFt7/DcODSAT392/hK05biZMyoVCmoBcsjafGxigk1Z4siAv1uC/Iw/Lt2K0M8lkq/38OO+yta1t6llLb0NVg6ErtNnChrNbdy8PK4/zLupFsV9O/9lSgudxRGFfQwds1AGzaqZ07fKuKsZEUo937fkiAGpiekI32Ydj49hwY+WCoEBAXLI2nxxrQC9yLP6iAFCoMiMqwKlsjrMU1AAwVTOt96TpLr0/OnPTpBGIYvnkWtBOm+lwupZa4S+RB9AaS73E8tsr4tySHlKPfA8bX8Fns2RQglg1dKR3vatKHHBmA8I1FnIPncr+4HlHP+oyZ2XYiRmFJEmon3D8NCc1xzioqOBCnQBcsjafK4q7c+8gP4owTOuQnzC+NjKIblZXNlW4WEgnKNejI1Pi42lhrITmqdv9AW1Fq+T7ERJbN2Ycms9/rkLh8w8DH7a4JfhQ91KcOmvIJm53t+UKLb9GiVOkK5pnSCu4eoF9EjaYdwoseI/RhuYsQsRAApbAXLI2nxZyEAtEoLuzUuCfHCEtIU4EGFT8F1nmLalxrBjJgA2IZjPclqQS/NRe1mIqWUBineJCLR/WJ4IgwaZHiCMbb03Osw6Ux6Fcs2vAO7JICIzowHk7OK3XQpgAXLI2nzwpJO46iMnk51jCNISyhfWFuvqHNiCVWPHVZ00WpA8r/rpHx7jRhZwQfTKigu5p9TYVjkp/9zbsC2yCM0cIlikTlItYEofwoRTNoDWuUwwzg+3sg7K0WyXtAHxClUBcsjafEoQ50SSh/sLk66D7fYvMPsWheNyIbQboQ/DlNyM5PrHE7Dbezk4CseWW521bhq2YCt4f/rvVzoEI+yuTxRxZHdrWA5q7PzAEC2STPentq2nClwBcsjafP8FV9Yt5hftaOkoDcUObgfHj26aCRzEt2gQKiwHo4lauIP4F0WyJkBA1/o068/9Jqv2g5P1rUDfUALkt90Wb0DVzlC4HrhE7ZcECM74rlBhZi2GZJsxZQqBAQFyyNp87Hj8LBw097kqQw4jLg2CO2ZPL65TBnz4LZrClhDplnYpiyPspNgnIkcqp0zWJm1r0DdvlACRZ/Lwe2zKBCzreyx/kjBZcokk8TtN/Mio9GAmm6KAvWr+lwzNgQvzvXC68R18ScOTVApcXNKQJVvamPRAlI1UoderCGirGApxAXLI2nwdBF+kTb9/B6cx6d7M0HORI9CpCfpJs0zvJm/lVfO/Zzj2/K/5/L2whc34Tx3S/CLhduXFLHOGCB7/CQHAnwEmLv4sAEjqyhQL6eZS/TuVdQNEcvpEOWXy4XzGWL6rG47GeH12VD+F1UnCqbcKVwFyyNp8Y9bj83cPqOMS4a2cZRrMfdKzvZA1YKQNADas6nKvA48Jx61Ycr4VNCkhOcpKZ4BvBgtT0f+QlADWA0RmhKkRpe2kegbQCTGZq8T6RkKxKxSkngpcAXLI2nxiYxDA5Ha3T6n3vxXptwftXV9mEjnVIj7/8NfSXxawWLTIrTNZY9ednicKfV/aVEqS1uJNdFmGanHjG/Dgi1xbP4AppbXlpwZH8Eqyob1Bn8acXh+LCwYKdQFyyNp8aVkcUKRJ41Cd9zjfYTJ4hQwZ9WVDGV8h9+4rLfg8rhuM18dkrdRHpRXLKQ4FbAQzB9v0OK9QCYuHihX7iMdyZfeqrm1BZJ+jySjYm2cC+U+ywfzo05JYql6zsxdjBTquFnYdRS9zbFPkWqp58ROh6AplAXLI2nznltNM70JFfyoBw1EvaVFrsXI6wrFYuDy8PWUGdZxq/+wvDt15n7E1ceCv/8wnyTdAkGfI6xMcpAp39Zvosv1xsHP63+ZfHSNtoj2li/zlgnuz8ryFV0p2jBd3fmYEtkkKXgFyyNp8YiibfSZ2IDMFHx8APwj3TvR4gUFO6UnLcglq6DAoMB0riYGyKE+w+1msErnv/Wm5W5LYTqKhJC36ZZ0Yyq5F7B3GiaeIssD82lqzsTYhGl46ig4nSKEU7O0KdgFyyNp8jcdBf0EKg98Jd5Kvx+jPKGy5LVhck8gAx7n9rfJeH/t4YXVf2E20y7jH6u47Dt3kDQF0ywLIGFumdwUtKAqysXpE/IjbCAqm6ecr3pk0gSI/xDjA6u/AqcJm00DqvaV/rhQ4AfsjNJWhvdaHoWkmLTYKfgFyyNp80M5crSIbQChvJhdAM1xhRQFcA5Gmz5TNL9JPhnm2JAMgVXHQ370H8FSPTp6MQ5WDWp38cXA5LL5OvrRQ59UBGZZTJ7susWoXLgSlaICExOeQjmkw2bRO78hlnd8ntfQOgvPqZGjioN87bbTn6unoY74ljS+qKCf0sQpUAXLI2nz6DEKwK/JvzOHCU1xWut/PdoA/8+qii5oa9cUA1d1OfN6AL1fZSVaGnWadFGiU6gtRqYQU2Pj3zCrqDKWXbT5FoZz6Ad30DSvT5JJgt8m/CnkBcsjafP3Qg0xdkXkt953gAjveroeqoUpRafd+kgxXs3IiuHESFraJqGonH9W+lLXVXq2iyIz4oMn3OJYMAkC+izuWTbplvgoQKVKjxS22jJD1ujj5QJtPq4D24Wf9RfLv+9/xXT4uNQa/nZtAcQ9rljluEdk0R5p8CmwBcsjafJm5QlagL2VtSzhC2sF5/nB1s5fq8k4YnpKalj1ABsSBicCkfLywdd5n2G+VdLnDajNaVnUk6wevhkSk2aRAZgsHhIp6v08DWreTAJ9HuAT2+PnvoQgHD8Wanh3ApqgguBCRoPXAwa0KXAFyyNp8mdS4DFxP8cdsKRvRPY/QzReUHZMBHAYFdR5VAa8kg1hyoGDdmzptMd3SuoxbRJQoeECFASpIW/2ohARxWseb2hPz/x5o607iKphpktVZZtNHahETFTtGCmcBcsjafJQnI0DHfOEyg8OHAcJoyKXFw6KNjG/R9ewMWf9hI+1RHYJ30diNLLSOmd/PWkxDznvYKGoE5vk5GctEt5ywkm5YmilXETHzUYX63Z/aA6N0ujdqabISbM1yW3+JircHiVADCkcBcsjafJorg78ZuZLfPWqa3GQsDe4LeGcK2iG8vC16VoYxWMty89yYSQqMZsiNZv8kBNXXOvYu/t/3GxnBEJ1wrd1+cnmvIQ==\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 206,\"candidatesTokenCount\": 26,\"totalTokenCount\": 889,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 206}],\"thoughtsTokenCount\": 657},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"qqoxacLcKYm4vdIPgK6SkAE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" + 5)` and `(2 + 1)`, in parallel.\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 206,\"candidatesTokenCount\": 41,\"totalTokenCount\": 904,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 206}],\"thoughtsTokenCount\": 657},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"qqoxacLcKYm4vdIPgK6SkAE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"simple_add\",\"args\": {\"a\": 10,\"b\": 5}}}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 206,\"candidatesTokenCount\": 62,\"totalTokenCount\": 925,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 206}],\"thoughtsTokenCount\": 657},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"qqoxacLcKYm4vdIPgK6SkAE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"simple_add\",\"args\": {\"a\": 2,\"b\": 1}}}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 206,\"candidatesTokenCount\": 82,\"totalTokenCount\": 945,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 206}],\"thoughtsTokenCount\": 657},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"qqoxacLcKYm4vdIPgK6SkAE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 206,\"candidatesTokenCount\": 82,\"totalTokenCount\": 945,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 206}],\"thoughtsTokenCount\": 657},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"qqoxacLcKYm4vdIPgK6SkAE\"}\r\n\r\n"} +{"key": "0ab62b2c", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"Now\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 347,\"candidatesTokenCount\": 1,\"totalTokenCount\": 348,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 347}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"sqoxaebyHJXnxN8P5uXs2AY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" that we have the results for the expressions in the parentheses, we can proceed.\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 347,\"candidatesTokenCount\": 17,\"totalTokenCount\": 364,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 347}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"sqoxaebyHJXnxN8P5uXs2AY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" We have simplified the expression to `15 * 3 / 3`. Next, we will perform the multiplication and division\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 347,\"candidatesTokenCount\": 42,\"totalTokenCount\": 389,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 347}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"sqoxaebyHJXnxN8P5uXs2AY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" in order from left to right. So, we'll first multiply 15 by 3.\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 347,\"candidatesTokenCount\": 63,\"totalTokenCount\": 410,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 347}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"sqoxaebyHJXnxN8P5uXs2AY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"multiply\",\"args\": {\"b\": 3,\"a\": 15}}}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"finishMessage\": \"Model generated function call(s).\"}],\"usageMetadata\": {\"promptTokenCount\": 347,\"candidatesTokenCount\": 82,\"totalTokenCount\": 429,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 347}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"sqoxaebyHJXnxN8P5uXs2AY\"}\r\n\r\n"} +{"key": "0dfd5305", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\\n\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 456,\"totalTokenCount\": 456,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 456}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"taoxabzcDLSNvdIPx-zO0QY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"We have now calculated `15 * 3` and have the result `\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 456,\"candidatesTokenCount\": 16,\"totalTokenCount\": 472,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 456}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"taoxabzcDLSNvdIPx-zO0QY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"45`. The expression is now `45 / 3`. The final step is to perform the division.\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 456,\"candidatesTokenCount\": 39,\"totalTokenCount\": 495,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 456}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"taoxabzcDLSNvdIPx-zO0QY\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"divide\",\"args\": {\"b\": 3,\"a\": 45}}}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"finishMessage\": \"Model generated function call(s).\"}],\"usageMetadata\": {\"promptTokenCount\": 456,\"candidatesTokenCount\": 58,\"totalTokenCount\": 514,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 456}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"taoxabzcDLSNvdIPx-zO0QY\"}\r\n\r\n"} +{"key": "9ec6facb", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"\\nWe\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 543,\"candidatesTokenCount\": 1,\"totalTokenCount\": 544,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 543}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"t6oxaaaOAu_Vxs0PqMDkuQE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" have now completed the final step of the calculation. The expression `45 / \"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 543,\"candidatesTokenCount\": 17,\"totalTokenCount\": 560,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 543}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"t6oxaaaOAu_Vxs0PqMDkuQE\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"3` evaluates to `15.0`. Therefore, the final answer is 15.0.\\n\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 543,\"candidatesTokenCount\": 39,\"totalTokenCount\": 582,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 543}]},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"t6oxaaaOAu_Vxs0PqMDkuQE\"}\r\n\r\n"} +{"key": "3bb166b3", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"We just solved the mathematical expression `(10 + 5) * (2 + 1) / 3` step-by-step using the available tools.\\n\\nHere's a breakdown of the process:\\n\\n1. **Parentheses First:** We first calculated the expressions inside the parentheses:\\n * `10 + 5` was solved using `simple_add(a=10, b=5)`, which gave us `15`.\\n * `2 + 1` was solved using `simple_add(a=2, b=1)`, which gave us `3`.\\n\\n2. **Multiplication:** Next, we multiplied the results from the first step:\\n * `15 * 3` was solved using `multiply(a=15, b=3)`, resulting in `45`.\\n\\n3. **Division:** Finally, we performed the division:\\n * `45 / 3` was solved using `divide(a=45, b=3)`, which gave us the final answer.\\n\\nThe final result of the entire calculation was **15.0**.\",\n \"thoughtSignature\": \"CpMNAXLI2nxhyWYOomgLZEeg1IWdj12zg1vWH+yImYTOlAsRp2iloi8QTebI/hLUiy1lHtY9EmXpHfoWmF+M7iezHjr5t2BWCFzNQrOdLOnOQ4A0hPMdqzMNmWwa7P96Hu4HFN3XwiFUXvU8zqFwMKyshb0uPMO4kqAaEOoG5oeGxA2sHdPx+1z2YQD1NT/vhi7di//Z5rsZRBm97M2z+tSn4LfU0b2vEiv/holF6TdhZe/WmUHppS+oHolG9tAbP80EXXE1eWcc2KbaorV2Lqmzp5HiB349jzRqoiOt1lhVjSfjCL3QlXK/j1cD/jUyMSwx4L8jadW5vm0FIRDhd4K7Xa2M1xI403xjVfJGmz/oUuiraKzXDI+/plL7I+kpO3lt0BGL9hpIXt2O/nzX7BvGzY5gm1B4P1AUDVjAJgtKiWnvjkLQTozPDvBa5yyk5BQ9yN1P/5Ud94n3VKdiUaNm4OO2ZHp1ud9tsE5HNiUb2UQDgdz0L09/Ap8zuLAtRx15TON/bvRrz0T1HB4X98mE7GeYm+eTOaujzNYUizZpAEX8cQ/vZyzFWTi8A/2FCAfDAKAGXYVfFaRTvrncUZo6A574RKYijDrlj//mEr0phKDPR6qTr3PBJigU7iS4dSLZnbSTgmdedLNqzsNOx4U81TXZCA6ZuDbLsqk58qGJKpJzrkpR1Dymrx8brsV/n96SxpOPvYPFi1wvZo3c+bbYJ+/i49XWs/ZWZI8btCYL1faK/nFlJDuTee2avx2qFjpf4uU/R4EQJpOwnqtholvxfGMH3X/30QWwhN7DXEtZE0S+UJhZ7HlKTFO/0B9gA6AiqwOM9KpBFzZvMDXvoZJaLTi8+TjT6rQ2LnhNbQemHMPYlElQX/tSyeNexFmMlMQ47qMOQF3sbvBs1EUX6qhELyZTs5OMrRaznvEp/ycx/FS36m22Z6PTyixuqUf3MRFOqAWKsktUYupHxqDDZt1HV9JkrVLt1C6YuGanCvBoJB/k8R9RC7tFruIagnId2tEERFBFihVIRnpiVCwoY8v9ObsTp32QQZQKZVgMaWMOkMvCVP5rz2zQegBKYjZ8gamNNuVXPlsqgdqGceROnyBZlU9GEAijFZiejwdg2BdsM/RLnWIVzNGMTfaGlt2Hn5uIkryV5OB++PaTiJZoCDxORzzSK5kw/VRw6aWEC0H6u6GqJ8bzF0jIdF6cAPn2g3K1ofIepNvEStD/N3DoByPwouNG/fjwatUxwgQJCrmCKL0mUn5ir9tnyF+aWrgMAVP5gILBVQwWokKZACwFMMz3Nd8r76DsUGi+s08bHVw58oe9qZ5TaMb3J0OHYh2+1TByVkV3fdkbWcNEkCJr5z9HD5cCkgDUHTFzvVDHs1MCGZGZvMYkxOo31L0OmLDGIiXw9SCVrI0z2Dusdx1BZyydoN5ivOvKNHNoK5cR2KxZxFhB6oph9XhVirn815OajMk1eoyQHVHCyU/ROPEYqrxJf0JsshihPp6Z095o3u+IyY+6kpYBBiX2GnCp//i4EEgjvzwHOMwsd4Q5u95TBMqu6p+qJZqNxCNMm/Mra11zb+ovw9W2Oyext+8v0Y69vQInKxMwH19yDzasagdN9dfs6GPbsDvzwiIsTGoDf5FnWWNYkqoXs/y4jJwvJqLIcyHpI5nyav0FqbJm7JHX7RRQ8TKC5ekD8x6WJdfogCtHnzLpKeBGDAdxlYpYhkLp0ngVY2/oWOQbbb3VxckfwAE87wc82hlsqHANQ4HEOpj6kFuHt+FMAqlwM5Fx/boeaQ8hU8042bQPPrV/GvmhBDb5tMzeWpT5G4T/GdUvJPxvZl9JnxoK/kDiHKWiYZOJAEXl5aAR7Zk99cd8FslTvmJajRaVR8Ae7kTWH7zIlc0AAKqx+vYh+dhNg/WfoBoSbMyzsnuzK6r8u5MIjS5VDJGBosqpuu7e0Rjn+zUXf/o1F35hqgLlJx1HuIG0gR/xUozksx5vkhN61sdohHUbrtOzx0o8g7WHe2IQtDBtPxOBkpk5Ww0NIGth2MVamFh5s8M0tkNiLhFknEd/E0GdhcQ4QJTg2Vr1PcnxD3v52YcPzLlLcHcklddRR3jGNeQkekhPeKLM9MLeWimSgHhBRzzet1sa3rVo+iQuxyeU7QGvCdv/IEaP2aXsoAVFlmG4A5oAjAad3A92HNopWLmODW9+pXNDtIexO9fqCd44FCAv6iCSu9tM2NzCbyx/7pyEsp15i55S\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 501,\n \"candidatesTokenCount\": 243,\n \"totalTokenCount\": 1213,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 501\n }\n ],\n \"thoughtsTokenCount\": 469\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"vqoxaaD0KfLWxN8PwuyWsQY\"\n}\n"} +{"key": "96f2aacf", "response": "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"### Weather in New York City: Chilly with a Chance of Snow Showers\\n\\n**New York, NY** -\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 38,\"totalTokenCount\": 288,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" This morning in New York City is currently cloudy with a temperature of 38\u00b0F (3\u00b0C), though it\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 63,\"totalTokenCount\": 313,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" feels more like 33\u00b0F (1\u00b0C). There is a very low chance of snow\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 84,\"totalTokenCount\": 334,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \". The humidity is currently at 54%.\\n\\nToday\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 96,\"totalTokenCount\": 346,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"'s forecast shows a high of around 41\u00b0F (5\u00b0C) and a low of 21\u00b0F\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 123,\"totalTokenCount\": 373,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" (-6\u00b0C). Skies will be mostly cloudy, gradually becoming sunny.\\n\\nLooking ahead, Friday is expected to bring snow showers with temperatures ranging from 30\u00b0F to \"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 159,\"totalTokenCount\": 409,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"31\u00b0F (-1\u00b0C). The weekend forecast shows partly sunny skies with a high of\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 179,\"totalTokenCount\": 429,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" 41\u00b0F (5\u00b0C) on Saturday and 40\u00b0F (4\u00b0C) on Sunday.\\n\\nThe extended forecast indicates a mix of sun and clouds with a potential for light\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 220,\"totalTokenCount\": 470,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" rain early next week and a chance of a rain and snow mix by next Thursday. A heavy\"}],\"role\": \"model\"},\"index\": 0,\"groundingMetadata\": {}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 239,\"totalTokenCount\": 489,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" snowstorm is possible next Sunday.\"}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0,\"groundingMetadata\": {\"searchEntryPoint\": {\"renderedContent\": \"\\u003cstyle\\u003e\\n.container {\\n align-items: center;\\n border-radius: 8px;\\n display: flex;\\n font-family: Google Sans, Roboto, sans-serif;\\n font-size: 14px;\\n line-height: 20px;\\n padding: 8px 12px;\\n}\\n.chip {\\n display: inline-block;\\n border: solid 1px;\\n border-radius: 16px;\\n min-width: 14px;\\n padding: 5px 16px;\\n text-align: center;\\n user-select: none;\\n margin: 0 8px;\\n -webkit-tap-highlight-color: transparent;\\n}\\n.carousel {\\n overflow: auto;\\n scrollbar-width: none;\\n white-space: nowrap;\\n margin-right: -12px;\\n}\\n.headline {\\n display: flex;\\n margin-right: 4px;\\n}\\n.gradient-container {\\n position: relative;\\n}\\n.gradient {\\n position: absolute;\\n transform: translate(3px, -9px);\\n height: 36px;\\n width: 9px;\\n}\\n@media (prefers-color-scheme: light) {\\n .container {\\n background-color: #fafafa;\\n box-shadow: 0 0 0 1px #0000000f;\\n }\\n .headline-label {\\n color: #1f1f1f;\\n }\\n .chip {\\n background-color: #ffffff;\\n border-color: #d2d2d2;\\n color: #5e5e5e;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #f2f2f2;\\n }\\n .chip:focus {\\n background-color: #f2f2f2;\\n }\\n .chip:active {\\n background-color: #d8d8d8;\\n border-color: #b6b6b6;\\n }\\n .logo-dark {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #fafafa 15%, #fafafa00 100%);\\n }\\n}\\n@media (prefers-color-scheme: dark) {\\n .container {\\n background-color: #1f1f1f;\\n box-shadow: 0 0 0 1px #ffffff26;\\n }\\n .headline-label {\\n color: #fff;\\n }\\n .chip {\\n background-color: #2c2c2c;\\n border-color: #3c4043;\\n color: #fff;\\n text-decoration: none;\\n }\\n .chip:hover {\\n background-color: #353536;\\n }\\n .chip:focus {\\n background-color: #353536;\\n }\\n .chip:active {\\n background-color: #464849;\\n border-color: #53575b;\\n }\\n .logo-light {\\n display: none;\\n }\\n .gradient {\\n background: linear-gradient(90deg, #1f1f1f 15%, #1f1f1f00 100%);\\n }\\n}\\n\\u003c/style\\u003e\\n\\u003cdiv class=\\\"container\\\"\\u003e\\n \\u003cdiv class=\\\"headline\\\"\\u003e\\n \\u003csvg class=\\\"logo-light\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"9 9 35 35\\\" fill=\\\"none\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M42.8622 27.0064C42.8622 25.7839 42.7525 24.6084 42.5487 23.4799H26.3109V30.1568H35.5897C35.1821 32.3041 33.9596 34.1222 32.1258 35.3448V39.6864H37.7213C40.9814 36.677 42.8622 32.2571 42.8622 27.0064V27.0064Z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 43.8555C30.9659 43.8555 34.8687 42.3195 37.7213 39.6863L32.1258 35.3447C30.5898 36.3792 28.6306 37.0061 26.3109 37.0061C21.8282 37.0061 18.0195 33.9811 16.6559 29.906H10.9194V34.3573C13.7563 39.9841 19.5712 43.8555 26.3109 43.8555V43.8555Z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M16.6559 29.8904C16.3111 28.8559 16.1074 27.7588 16.1074 26.6146C16.1074 25.4704 16.3111 24.3733 16.6559 23.3388V18.8875H10.9194C9.74388 21.2072 9.06992 23.8247 9.06992 26.6146C9.06992 29.4045 9.74388 32.022 10.9194 34.3417L15.3864 30.8621L16.6559 29.8904V29.8904Z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath fill-rule=\\\"evenodd\\\" clip-rule=\\\"evenodd\\\" d=\\\"M26.3109 16.2386C28.85 16.2386 31.107 17.1164 32.9095 18.8091L37.8466 13.8719C34.853 11.082 30.9659 9.3736 26.3109 9.3736C19.5712 9.3736 13.7563 13.245 10.9194 18.8875L16.6559 23.3388C18.0195 19.2636 21.8282 16.2386 26.3109 16.2386V16.2386Z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003csvg class=\\\"logo-dark\\\" width=\\\"18\\\" height=\\\"18\\\" viewBox=\\\"0 0 48 48\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\"\\u003e\\n \\u003ccircle cx=\\\"24\\\" cy=\\\"23\\\" fill=\\\"#FFF\\\" r=\\\"22\\\"/\\u003e\\n \\u003cpath d=\\\"M33.76 34.26c2.75-2.56 4.49-6.37 4.49-11.26 0-.89-.08-1.84-.29-3H24.01v5.99h8.03c-.4 2.02-1.5 3.56-3.07 4.56v.75l3.91 2.97h.88z\\\" fill=\\\"#4285F4\\\"/\\u003e\\n \\u003cpath d=\\\"M15.58 25.77A8.845 8.845 0 0 0 24 31.86c1.92 0 3.62-.46 4.97-1.31l4.79 3.71C31.14 36.7 27.65 38 24 38c-5.93 0-11.01-3.4-13.45-8.36l.17-1.01 4.06-2.85h.8z\\\" fill=\\\"#34A853\\\"/\\u003e\\n \\u003cpath d=\\\"M15.59 20.21a8.864 8.864 0 0 0 0 5.58l-5.03 3.86c-.98-2-1.53-4.25-1.53-6.64 0-2.39.55-4.64 1.53-6.64l1-.22 3.81 2.98.22 1.08z\\\" fill=\\\"#FBBC05\\\"/\\u003e\\n \\u003cpath d=\\\"M24 14.14c2.11 0 4.02.75 5.52 1.98l4.36-4.36C31.22 9.43 27.81 8 24 8c-5.93 0-11.01 3.4-13.45 8.36l5.03 3.85A8.86 8.86 0 0 1 24 14.14z\\\" fill=\\\"#EA4335\\\"/\\u003e\\n \\u003c/svg\\u003e\\n \\u003cdiv class=\\\"gradient-container\\\"\\u003e\\u003cdiv class=\\\"gradient\\\"\\u003e\\u003c/div\\u003e\\u003c/div\\u003e\\n \\u003c/div\\u003e\\n \\u003cdiv class=\\\"carousel\\\"\\u003e\\n \\u003ca class=\\\"chip\\\" href=\\\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF8I9Yw2RNvD7xuJGC-PpHjNCCK0d0Tq_cFaS4NaW-KP7t0cyocvR4TXv1F3kTlBCezuU_uZTaY-4CjYWjNE3QiSiy3r4Bxfc_n3pyKvZvMn47NVliZweX259vgN1lnPBpok4z6oQMFxF-7NzYOFg8YLRwyNatbXha0VlxLCkQpr_eoHS-LlwJHiASeOxOnzXE0\\\"\\u003eweather in NYC\\u003c/a\\u003e\\n \\u003c/div\\u003e\\n\\u003c/div\\u003e\\n\"},\"groundingChunks\": [{\"web\": {\"uri\": \"https://www.google.com/search?q=weather+in+New York, NY,+US\",\"title\": \"Weather information for New York, NY, US\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFinFHpruftELWXdhySOBKU6A_P3RgBeSy7oRK98au8oUTxa1-6gaqhQ3IbVv9dunPKn3osHc0LFvf4QjvwANvZXemdb8yqLeWyJvPUiDrhU7Ko3qNih1L_SldoWV_n-vFLx-tZIsU5NsANDt9cfYZYWWUDV-Vf4C-p-qcq4bmLd-IITw==\",\"title\": \"accuweather.com\"}},{\"web\": {\"uri\": \"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGH8EgvP-TQI_B02rYhMRwpi9xdI7AkKi-fzuHaeaS6sKN8NfX4t1nrye2IuaH6dvt4BmwUJeVftToIaT_84PTa2aUOZDnoLc_P8mfQFntKBCf-_Hoz9WerKMtEEGR73FHV7KsE1oXUqpDbCKTEyb5IIPDOJLa295nSaJTtow==\",\"title\": \"weather.gov\"}}],\"groundingSupports\": [{\"segment\": {\"startIndex\": 68,\"endIndex\": 212,\"text\": \"**New York, NY** - This morning in New York City is currently cloudy with a temperature of 38\u00b0F (3\u00b0C), though it feels more like 33\u00b0F (1\u00b0C).\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 213,\"endIndex\": 248,\"text\": \"There is a very low chance of snow.\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 249,\"endIndex\": 282,\"text\": \"The humidity is currently at 54%.\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 284,\"endIndex\": 364,\"text\": \"Today's forecast shows a high of around 41\u00b0F (5\u00b0C) and a low of 21\u00b0F (-6\u00b0C).\"},\"groundingChunkIndices\": [1]},{\"segment\": {\"startIndex\": 365,\"endIndex\": 419,\"text\": \"Skies will be mostly cloudy, gradually becoming sunny.\"},\"groundingChunkIndices\": [2]},{\"segment\": {\"startIndex\": 421,\"endIndex\": 531,\"text\": \"Looking ahead, Friday is expected to bring snow showers with temperatures ranging from 30\u00b0F to 31\u00b0F (-1\u00b0C).\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 532,\"endIndex\": 645,\"text\": \"The weekend forecast shows partly sunny skies with a high of 41\u00b0F (5\u00b0C) on Saturday and 40\u00b0F (4\u00b0C) on Sunday.\"},\"groundingChunkIndices\": [0,1]},{\"segment\": {\"startIndex\": 647,\"endIndex\": 804,\"text\": \"The extended forecast indicates a mix of sun and clouds with a potential for light rain early next week and a chance of a rain and snow mix by next Thursday.\"},\"groundingChunkIndices\": [0]},{\"segment\": {\"startIndex\": 805,\"endIndex\": 847,\"text\": \"A heavy snowstorm is possible next Sunday.\"},\"groundingChunkIndices\": [0]}],\"webSearchQueries\": [\"weather in NYC\"]}}],\"usageMetadata\": {\"promptTokenCount\": 6,\"candidatesTokenCount\": 246,\"totalTokenCount\": 496,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 6}],\"toolUsePromptTokenCount\": 87,\"toolUsePromptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 87}],\"thoughtsTokenCount\": 157},\"modelVersion\": \"gemini-2.5-pro\",\"responseId\": \"w6oxafzvKI7WvdIP3vmBuAk\"}\r\n\r\n"} +{"key": "d2d2dbf9", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"President\",\n \"args\": {\n \"last\": \"Jefferson\",\n \"birth_year\": 1743,\n \"birthplace\": \"Shadwell\",\n \"years_in_office\": \"1801-1809\",\n \"spouse\": \"Martha Jefferson\",\n \"first\": \"Thomas\"\n }\n },\n \"thoughtSignature\": \"CtMPAXLI2nw0GH3wHk/kiHrsdgiPgC7RDAURdVjufxnpYFUq7Qidy39v+ClHJONfATXtiYGKCgIawMA+VMz4YLDD7Xhy45IkjxX1SgTIVkAno6YtuCwydgjx8ygMJpJeH5Ke8fvKFYWexSt+sC/5IQoE4k/Hchm+/ndE1k4cW2VTSAkEggMnRAyson2rXVbeMFHtGu7gyu3uCuoCGO4tS9Lyga77PxTIOvsvWAsOH+UpMCWasPCpkKTY3J1BVfAbuQX5DVLQS3KM5V/9UV7OF+XwVbI2Mq7NLIslNnl+NKuVpTkTqTnsAkLlI4ko9+V6d5yYshe+D6GQxuok3oING/c5XONVknpACPOZ870AL/JcjyqQoskzzAXua9gN+OrkGtwJYpjQQiyYeSVmyMUQ1k+yC/9o3UmJoc0uswLUV+NwSPGV1wmLmqvNjuPkrpHj98mPufzputHtVFFLUVgsYhcGeJrXB7DZU90za2TCCobzWwRv+LNFwxYOhTk7JiYVFIzL68FE1piazbi+wXqc8G9KunGrcXCMmlVJM/kdBycIEaU+9SK7L6YJHa46xSSrywaBiMIJktDFJVJS4zE/RJ5zEnj3wLOx4/0JevdbY6MEqAV6+n+/AXhp6bMcL1Ki79kueGPKGd8L348k6OD0TlC+Y+wyqvu/gb0M3YSyYzyZcUSC89w/ZFg8clGdsV8+NFs972vc970evuhhjUo1gWHvGndt8nwf8EoYyAGtFtz4I7cTuzuvkVneYcG1fh7ao2Pr2N/3F9thvdZLO3p8w1QmMy5+1k7T2hkmrC3HY8pLPAiBDfx9KLM3v4SxNi40T7yK4vZlETnlILx5jbNBXIZ1XoLONI7vqOgB6MOC3dImxyyE+nJl9jZkGiEcYm+2iFoeTecpWmez42LdHBXsxZO6RSzGLMcF6Mi9Gdvq36sl/4ZXn+ttY9wRP1x8LQgY9Dz1NKcrb78tP+lcuW9fLo0aQtLxbnbYxxI9c74+PxRy8uIVJ0pSh3a/vMTfo0ewouB221cyVbY8DON07VGCPGsA4W/sW1qFHm36LrZZm5BEiOG5wmI2tMNQY1RWySJMwsVJ+xSqWHybXjwL1bPVmvYtyJzyJRwWUul+lNQURGiNK46jEuDfTa+JOQxMzJdC2B3UMksqeUrd31C/YF9CkY9EhcHD7gpzD4Akcbd/LC0+2R+T15NwvAH2MrD3EI9ho7Y/nMhEZbXFnY3YQaXdlsIiKzhkd29ERbeJ9G1jPPuZO6ezR5mciJ+cYDQLJtYJFZbB/QN5Qn/p1cOQYfqkNsECTrQgh5Fuxx8JdCl45x4GO9btUXuwDIV3z9bICpy7sLtgL6AR0Jhene4D0wNatQZcFO5/tOi61uS83wsJVjr33vU+hwuPf8w7Bznjx+BFzYkcykt6/M2Dwn3ejQ5PzR+UyLDyO7XBcF2Fva0uo9at0fnDtQYKgWWzSSgAcDLxU9ML1jkJYjuitQdIIchKUeEMpr07NiT4T9+Gin8vdQiD26IMDW4UYIOCAGDFvw/p8dEH0lJbVf/5VO80LlF8Etglq8UXqTaPFk3b2yVR4sSAHwdjME2nG5cRgNQOqORnHGvAUYT694KGsUroAQ/I81DHsAexxn7en7vt4+FrTWf3l/kgCzX2VJ/xFiKy5tSHRaaeNisyIbfru+w54al8U07D2zJU5/2Av+DwJQFSE8m/zaXmoKL11eLIpMFVbRQcuKpOj5TVqGqPQ0SGEkjQJFqhhBfexZ2wFIroTRJQy6d7F1W+FWTRounD4PU8kO01jQrfx9vKtFakoLso+SzHvADGAmp3LfhGYOYyK+tnkAIhMoLDwGP5JLA/7P08v6fUKSGBogMYSOLbEwvTylAh+R2HV6Lv7/aO8ntE403KZIHWRgDu83gBugMHKboviwXOAtxvg3rZZ/uFbmODAu0+hhR5+upBqDRrnFWJQ/497HfSW72aEA+m6NMcFTIgbsM+QBQ1ehFJpo0kEMD97ccFrksQJEKfCZ3L10QE1PRe3HunyIPuvn92mh08ewUlLlhezDhtef7umOI3p20U5Per6RZdV1gZTs3EHtjEtg/zQhBZNWF299r2rxUXlHmOSpO030CIkObA4AsBeHAHSvmewPh7L9urLod7b69x5CyYZULg/RU/x/wSYEFB/QvAsE/5qh7bZQdo9h/fkH3Birp/0C+zb+nJxlxwvLdcoybMTR14TOJbAVbznykN1ITqK6cC1KBXW3ueP0B3b/t0Y4p04Gg8hRWquTUnTA0TLfDlpysB03FA+VbEkrTESPmdGtaY9rRYpEmEx9KqiQNioKZg9xFsgHfMY8quM+FX++P5B2TW5nNXQlHH8glVIcE+pnWgs4NobW/YmuxkM9JdTkpTvb8N2SNGeuWZyTzf6h+XnJnmS0BECZRpu1igCB/vcL2/b+02bn/QbQxEW/R1kJmgNI142Q8ATY9M2JbbvH43XHSOjwjdridhaRmXQ/VKoPBOtdXUFrTV81/GaxmIN/U9sGeWuvAddD/2z2btlxYDx/F2SvFHiLQP7NO5dPvaw/FPZ92+8T72YC8uJ5xxCqSLOiCRlcDqZE9OjLIrKJ7VNdIg9WaypdH5DBunB2ZYkDEoyfpbR1Ga614+ldZ7sA2ewXQckts2otXL4vSneDtfwtEsRzvZPgY=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 143,\n \"candidatesTokenCount\": 58,\n \"totalTokenCount\": 637,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 143\n }\n ],\n \"thoughtsTokenCount\": 436\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"O5A2aaGoCq2mkdUP0daJ6Q8\"\n}\n"} +{"key": "7ccfaa26", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5,\n \"b\": 3\n }\n },\n \"thoughtSignature\": \"Es0DCsoDAXLI2nwC1X1+1iHT2KXF1tVXC5QMTyhSXGiOZe+UD2rNCFId2MQnUC9FeItLsu8Pjr1dyJT4KgmDIKxb90rCnmogXPrgpllviTX0mNb8RgU2hNvc2B4MRy23jP2NU1CYWVYRkgY6dOSH0ImUKx4SPv6kfGL/DQnIRV6IWZ1h6CahmH9K0ebX8an8ZUUVNIBce/0lZn/Buj+Ikdh6CHTQEE+6UqC9ou7Zj/+UDTfa7lasA+pe9SiITR9dUHBPqqCwYlxag1cB7P7uTr0pvR/Q/2ZF15xYcezEzP1AYawPEUbpq3gi6xIynouP8wJvCP1EHY7TWo7JB5ahfS5pNP05xlxCo+lSaXGl5dTDM0ZmVRX3ESIxNeMAu1t+bIHWXu46953Wr9kfUlL4cCrdUGwg+MWby0M8fDwQ5sabcU3n9ApAp0LaFxNvHm5GGpFLdHate9SmI34pBoT4w47MYFOgMWqERmArFaAHqJ8Mjo5OfCdmnuXEW23oMUSTmXZ4TyzrK6pX5Cl025b0wsiQnRVX6u9xWkdA5ANeAuozoaRcLAdsxPzE/sKQCfSpeSf8SoGVAVb5UxK1OoebI4kdZm4upuS+Zk2yLI7nudM=\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 97,\n \"candidatesTokenCount\": 18,\n \"totalTokenCount\": 239,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 97\n }\n ],\n \"thoughtsTokenCount\": 124\n },\n \"modelVersion\": \"gemini-3-pro-preview\",\n \"responseId\": \"zpE2aZqmDrrmnsEPponD0Aw\"\n}\n"} +{"key": "36546e54", "response": "{\n \"error\": {\n \"code\": 503,\n \"message\": \"The model is overloaded. Please try again later.\",\n \"status\": \"UNAVAILABLE\"\n }\n}\n"} +{"key": "08f4b9f7", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I will use the `simple_add` tool with `a=5` and `b=3` to calculate the sum.\",\n \"thoughtSignature\": \"CqMIAXLI2nxdrH/GQWpUzgnzhgRsCneVib/9hf7Ftaf0SthquSrSL98esVvqp9pvFibRRjDkomVgBzBvJPdJUtFznqwCtv5ZwzJ5Jx/xGuxBprt9jGsn/8ReEZSTH++7yuJ2LdHlLviiApoaq9iuCQQ393mhLt11NvmV85SW0QtzourpPy0FF2w9SAo6UosYGr/2DTkVR/vABYtCC12RlPgt4e+q3G6QrqhKW6IF3V7FJnbRl5YVU6+4Bu9OQ/huFh7f9GXKG5zfXVscIWqbrXo9qHgjBD46t5rvJTpODe4W/+qLYEqRZltlMjAMgcvOOP23eys97U/2Q/TlkOu8Wxzbxgab6HROdhDMydexkp/Pz6eEYc9MwK00RmVIIabVEHCyCDe9CuVWjbx40aGboVDKcOVr0z/5mdB6KgOG3fh/xVCVr/ZF32gAhwVyvDv77qv6LVc4bmL6tEimyVHv+hahaFTY5yGvAk6zNSqBnSZCjels7nH5wGEmGKqajleIJj73ICOmCRWl8mMdewfyHQE79zkTCl2zS5pJJ8eDu5G09jCR6HOmCpuh5nVW9LDnndX2N7wRvFCfGPWIlWhZcivkXklaGsBsSE+2TQ2P+lTG0E6WOQcnsRKAeQzPyopykVTgXQuBVKTScgtPZWaMHwjEIIRvpbUtamQVqGRkAac6suMe25MH1BuqAVjoJ//enEs0pacw7oN+TUFYPFRQyHRZidXmK/LBVb9cF4NigdRLdwiUIew3Kn3AP7YIiaMW4Pl6fbs1BChoQT9EDtmxVZE4mOIypwwgQOEF8UqH1B6aoIQWO4VX8KlhNHbC6Q9nU7c6yjMvXROtBMMB1JL7UWvTbJKow25mniTEeYSpgf6B5MrGz/Tzt5gN3LStXA7ljlcrs8lyPWmZyeZbDHNL1XHYnTgurPr2lZMRXfe2Iu3pMb57ha8vFbbWaXdOZcbcS56Q4Wy/sI/GRDG+FRqOODFqx/u/mPl8qTrNZuY37oma9S7Xcqe2026R41rTg/QZ6VzrYGMj3hFpsJ3yEz4tF1PACuBp5lnXSrImIo7dQZnN3Td+jr9Nmq0dTpMbL6WT3YefuYjxdms8YiisyjqhUT0CM23z/7xUblUuZLeUjONZh1un6LExTsOaeMvw6irx1qMgxYsqW96gUJnwiQErq4zCmywQkSzfgy//nh8TvJ8uTSbq1XRPmvzUQ2MdWKoL6wCjeqtOcNSzNrSB6l3OTYBYN2OwW9qe9ez0JAYtfs4UJ18LSxBzjAhxMsEnSEzQB3VsYZXIswE77qMLa4Suwx2+J90jBHBypmbCEF39CudsNnJqkEYQlqLEKKE/WGDllITHey65UsOV49VzFX+fQbWHKFTjQVuQqEoXUlqNjhrFh2wqEP339Kwi7TO5B9+OBS+2Y3Xd\"\n },\n {\n \"functionCall\": {\n \"name\": \"simple_add\",\n \"args\": {\n \"a\": 5,\n \"b\": 3\n }\n }\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 86,\n \"candidatesTokenCount\": 47,\n \"totalTokenCount\": 427,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 86\n }\n ],\n \"thoughtsTokenCount\": 294\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"GpI2aaOuEr60kdUPpLrU4Q8\"\n}\n"} +{"key": "eb70b1fb", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I have successfully completed the goal.\\n\\nTo find the sum of 5 + 3, I used the `simple_add` tool. I provided the inputs `a=5` and `b=3`. The tool processed these numbers and returned the result, which is 8.\\n\\nTherefore, 5 + 3 = 8.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 214,\n \"candidatesTokenCount\": 71,\n \"totalTokenCount\": 1001,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 214\n }\n ],\n \"thoughtsTokenCount\": 716\n },\n \"modelVersion\": \"gemini-2.5-pro\",\n \"responseId\": \"IpI2ae_LBvXNkdUPksi40A8\"\n}\n"} +{"key": "f05c62bc", "response": "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"text\": \"I used the `simple_add` tool to calculate 5 + 3.\\nThe tool returned the result 8.\\n\\nTherefore, 5 + 3 = 8.\"\n }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 160,\n \"candidatesTokenCount\": 38,\n \"totalTokenCount\": 293,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 160\n }\n ],\n \"thoughtsTokenCount\": 95\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"I5I2aZ-wEbvunsEP6YT22Qw\"\n}\n"} +{"key": "3ec9f8ba", "response": "{\"model\":\"claude-sonnet-4-5-20250929\",\"id\":\"msg_01AJvGV8nLWfqqvFFV7cX2RG\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"## Summary\\n\\nI successfully completed the calculation using the `simple_add` tool.\\n\\n**Result: 5 + 3 = 8**\\n\\n**Explanation:**\\nThe `simple_add` function took two parameters:\\n- `a = 5` (the first operand)\\n- `b = 3` (the second operand)\\n\\nThe function performed the addition operation and returned **8** as the result.\\n\\nThe goal has been fully completed - no further work is needed.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":770,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":110,\"service_tier\":\"standard\"}}"} +{"key": "5f0d7f06", "response": "{\n \"id\": \"chatcmpl-CkRIIQWbKX5CwFmJveVutdTe31o76\",\n \"object\": \"chat.completion\",\n \"created\": 1765184042,\n \"model\": \"gpt-4.1-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I used the simple_add tool to calculate 5 + 3, and the result is 8.\\n\\nSummary:\\n- I successfully completed the calculation using the tool.\\n- 5 + 3 = 8.\\n\\nNo further work is needed for this task. If you have more calculations or questions, please let me know!\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 156,\n \"completion_tokens\": 65,\n \"total_tokens\": 221,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_09249d7c7b\"\n}\n"} +{"key": "ab8f4dfb", "response": "{\"model\":\"claude-sonnet-4-20250514\",\"id\":\"msg_011PsdPTshDaz29s1KHqKTEV\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"I successfully completed the calculation using the add_numbers tool. The result of 47 + 23 is **70**.\\n\\nThe goal was fully accomplished - I used the requested tool to perform the addition and provided you with the correct answer.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":578,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":53,\"service_tier\":\"standard\"}}"} +{"key": "7c9331e2", "response": "{\"model\":\"claude-sonnet-4-20250514\",\"id\":\"msg_01DQ5ainCHCUjftqFqNPn7RC\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"I successfully completed the calculation using the add_numbers tool. Here's a summary of my findings:\\n\\n**Goal**: Calculate 47 + 23 + 59\\n**Result**: 129\\n\\n**Process**:\\n1. First, I added 47 + 23 = 70\\n2. Then, I added 70 + 59 = 129\\n\\nThe calculation is complete. The answer to 47 + 23 + 59 is **129**.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":707,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":107,\"service_tier\":\"standard\"}}"} +{"key": "c285da91", "response": "event: message_start\ndata: {\"type\":\"message_start\",\"message\":{\"model\":\"claude-sonnet-4-20250514\",\"id\":\"msg_018PMhYkMepyVyHbdXTXfjmX\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[],\"stop_reason\":null,\"stop_sequence\":null,\"usage\":{\"input_tokens\":1139,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":2,\"service_tier\":\"standard\"}} }\n\nevent: content_block_start\ndata: {\"type\":\"content_block_start\",\"index\":0,\"content_block\":{\"type\":\"text\",\"text\":\"\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"I don\"}}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"'t have web\"} }\n\nevent: ping\ndata: {\"type\": \"ping\"}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" search capabilities,\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" so I couldn\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"'t search\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" for\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" the elephant\"}}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" weights. To complete\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" your\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" request\"}}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\", you\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" would need to:\\n\\n1. Search\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" for average\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" weight\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" of male African eleph\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"ants in\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" kg\\n2. Search for average weight\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" of male Asian elephants in kg\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"\\n3.\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" Provide\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" those\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" two\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" numbers\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" so\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" I can ad\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"d them together\\n\\nOnce you have\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" those weights\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\", I can use\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" the add_numbers function to calculate\"} }\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\" the total.\"} }\n\nevent: content_block_stop\ndata: {\"type\":\"content_block_stop\",\"index\":0 }\n\nevent: message_delta\ndata: {\"type\":\"message_delta\",\"delta\":{\"stop_reason\":\"end_turn\",\"stop_sequence\":null},\"usage\":{\"input_tokens\":1139,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"output_tokens\":93} }\n\nevent: message_stop\ndata: {\"type\":\"message_stop\" }\n\n"} diff --git a/lisette/_modidx.py b/lisette/_modidx.py index bf98f9d..0afab30 100644 --- a/lisette/_modidx.py +++ b/lisette/_modidx.py @@ -43,13 +43,8 @@ 'lisette/core.py'), 'lisette.core.mk_msg': ('core.html#mk_msg', 'lisette/core.py'), 'lisette.core.mk_msgs': ('core.html#mk_msgs', 'lisette/core.py'), - 'lisette.core.mk_tc': ('core.html#mk_tc', 'lisette/core.py'), - 'lisette.core.mk_tc_req': ('core.html#mk_tc_req', 'lisette/core.py'), - 'lisette.core.mk_tc_result': ('core.html#mk_tc_result', 'lisette/core.py'), - 'lisette.core.mk_tc_results': ('core.html#mk_tc_results', 'lisette/core.py'), 'lisette.core.mk_tr_details': ('core.html#mk_tr_details', 'lisette/core.py'), 'lisette.core.patch_litellm': ('core.html#patch_litellm', 'lisette/core.py'), - 'lisette.core.random_tool_id': ('core.html#random_tool_id', 'lisette/core.py'), 'lisette.core.remove_cache_ckpts': ('core.html#remove_cache_ckpts', 'lisette/core.py'), 'lisette.core.stream_with_complete': ('core.html#stream_with_complete', 'lisette/core.py'), 'lisette.core.structured': ('core.html#structured', 'lisette/core.py')}, diff --git a/lisette/core.py b/lisette/core.py index f401d9d..dd8d24b 100644 --- a/lisette/core.py +++ b/lisette/core.py @@ -5,8 +5,8 @@ # %% auto 0 __all__ = ['sonn45', 'opus45', 'detls_tag', 're_tools', 'effort', 'patch_litellm', 'remove_cache_ckpts', 'contents', 'mk_msg', 'fmt2hist', 'mk_msgs', 'stream_with_complete', 'lite_mk_func', 'ToolResponse', 'structured', 'cite_footnote', - 'cite_footnotes', 'Chat', 'random_tool_id', 'mk_tc', 'mk_tc_req', 'mk_tc_result', 'mk_tc_results', - 'astream_with_complete', 'AsyncChat', 'mk_tr_details', 'AsyncStreamFormatter', 'adisplay_stream'] + 'cite_footnotes', 'Chat', 'astream_with_complete', 'AsyncChat', 'mk_tr_details', 'AsyncStreamFormatter', + 'adisplay_stream'] # %% ../nbs/00_core.ipynb import asyncio, base64, json, litellm, mimetypes, random, string @@ -81,10 +81,12 @@ def _repr_markdown_(self: litellm.ModelResponse): # %% ../nbs/00_core.ipynb def _bytes2content(data): - "Convert bytes to litellm content dict (image or pdf)" - mtype = 'application/pdf' if data[:4] == b'%PDF' else mimetypes.types_map.get(f'.{imghdr.what(None, h=data)}') - if not mtype: raise ValueError(f'Data must be image or PDF bytes, got {data[:10]}') - return {'type': 'image_url', 'image_url': f'data:{mtype};base64,{base64.b64encode(data).decode("utf-8")}'} + "Convert bytes to litellm content dict (image, pdf, audio, video)" + mtype = detect_mime(data) + if not mtype: raise ValueError(f'Data must be a supported file type, got {data[:10]}') + encoded = base64.b64encode(data).decode("utf-8") + if mtype.startswith('image/'): return {'type': 'image_url', 'image_url': f'data:{mtype};base64,{encoded}'} + return {'type': 'file', 'file': {'file_data': f'data:{mtype};base64,{encoded}'}} # %% ../nbs/00_core.ipynb def _add_cache_control(msg, # LiteLLM formatted msg @@ -250,7 +252,7 @@ def cite_footnotes(stream_list): def _mk_prefill(pf): return ModelResponseStream([StreamingChoices(delta=Delta(content=pf,role='assistant'))]) # %% ../nbs/00_core.ipynb -_final_prompt = "You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed." +_final_prompt = dict(role="user", content="You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.") # %% ../nbs/00_core.ipynb class Chat: @@ -285,7 +287,7 @@ def _prep_msg(self, msg=None, prefill=None): cache_idxs = L(self.cache_idxs).filter().map(lambda o: o-1 if o>0 else o) else: cache_idxs = self.cache_idxs - if msg: self.hist = mk_msgs(self.hist+[msg], self.cache, cache_idxs, self.ttl) + if msg: self.hist = mk_msgs(self.hist+[msg], self.cache and 'claude' in self.model, cache_idxs, self.ttl) pf = [{"role":"assistant","content":prefill}] if prefill else [] return sp + self.hist + pf @@ -306,6 +308,7 @@ def _call(self, msg=None, prefill=None, temp=None, think=None, search=None, stre tools=self.tool_schemas, reasoning_effort = effort.get(think), tool_choice=tool_choice, # temperature is not supported when reasoning temperature=None if think else ifnone(temp,self.temp), + caching=self.cache and 'claude' not in self.model, **kwargs) if stream: if prefill: yield _mk_prefill(prefill) @@ -348,29 +351,6 @@ def print_hist(self:Chat): "Print each message on a different line" for r in self.hist: print(r, end='\n\n') -# %% ../nbs/00_core.ipynb -def random_tool_id(): - "Generate a random tool ID with 'toolu_' prefix" - random_part = ''.join(random.choices(string.ascii_letters + string.digits, k=25)) - return f'toolu_{random_part}' - -# %% ../nbs/00_core.ipynb -def mk_tc(func, args, tcid=None, idx=1): - if not tcid: tcid = random_tool_id() - return {'index': idx, 'function': {'arguments': args, 'name': func}, 'id': tcid, 'type': 'function'} - -# %% ../nbs/00_core.ipynb -def mk_tc_req(content, tcs): - msg = Message(content=content, role='assistant', tool_calls=tcs, function_call=None) - msg.tool_calls = [{**dict(tc), 'function': dict(tc['function'])} for tc in msg.tool_calls] - return msg - -# %% ../nbs/00_core.ipynb -def mk_tc_result(tc, result): return {'tool_call_id': tc['id'], 'role': 'tool', 'name': tc['function']['name'], 'content': result} - -# %% ../nbs/00_core.ipynb -def mk_tc_results(tcq, results): return [mk_tc_result(a,b) for a,b in zip(tcq.tool_calls, results)] - # %% ../nbs/00_core.ipynb async def _alite_call_func(tc, ns, raise_on_err=True): try: fargs = json.loads(tc.function.arguments) @@ -401,6 +381,7 @@ async def _call(self, msg=None, prefill=None, temp=None, think=None, search=None tools=self.tool_schemas, reasoning_effort=effort.get(think), tool_choice=tool_choice, # temperature is not supported when reasoning temperature=None if think else ifnone(temp,self.temp), + caching=self.cache and 'claude' not in self.model, **kwargs) if stream: if prefill: yield _mk_prefill(prefill) @@ -460,20 +441,18 @@ def mk_tr_details(tr, tc, mx=2000): # %% ../nbs/00_core.ipynb class AsyncStreamFormatter: def __init__(self, include_usage=False, mx=2000): - self.outp,self.tcs,self.include_usage,self.think,self.mx = '',{},include_usage,False,mx + self.outp,self.tcs,self.include_usage,self.mx = '',{},include_usage,mx def format_item(self, o): "Format a single item from the response stream." res = '' if isinstance(o, ModelResponseStream): d = o.choices[0].delta - if nested_idx(d, 'reasoning_content'): - self.think = True - res += '🧠' - elif self.think: - self.think = False - res += '\n\n' - if c:=d.content: res+=c + if nested_idx(d, 'reasoning_content') and d['reasoning_content']!='{"text": ""}': + res+= '🧠' if not self.outp or self.outp[-1]=='🧠' else '\n\n🧠' # gemini can interleave reasoning + elif self.outp and self.outp[-1] == '🧠': res+= '\n\n' + if c:=d.content: # gemini has text content in last reasoning chunk + res+=f"\n\n{c}" if res and res[-1] == '🧠' else c elif isinstance(o, ModelResponse): if self.include_usage: res += f"\nUsage: {o.usage}" if c:=getattr(contents(o),'tool_calls',None): diff --git a/lisette/usage.py b/lisette/usage.py index 39a41fe..ad68a4a 100644 --- a/lisette/usage.py +++ b/lisette/usage.py @@ -26,9 +26,17 @@ def log_success_event(self, kwargs, response_obj, start_time, end_time): def _log_usage(self, response_obj, response_cost, start_time, end_time): usage = response_obj.usage ptd = usage.prompt_tokens_details - self.usage.insert(Usage(timestamp=time.time(), model=response_obj.model, user_id=self.user_id_fn(), prompt_tokens=usage.prompt_tokens, completion_tokens=usage.completion_tokens, - total_tokens=usage.total_tokens, cached_tokens=ptd.cached_tokens if ptd else 0, cache_creation_tokens=usage.cache_creation_input_tokens, - cache_read_tokens=usage.cache_read_input_tokens, web_search_requests=nested_idx(usage, 'server_tool_use', 'web_search_requests'), response_cost=response_cost)) + self.usage.insert(Usage(timestamp=time.time(), + model=response_obj.model, + user_id=self.user_id_fn(), + prompt_tokens=usage.prompt_tokens, + completion_tokens=usage.completion_tokens, + total_tokens=usage.total_tokens, + cached_tokens=ptd.cached_tokens if ptd else 0, # used by gemini (read tokens) + cache_creation_tokens=nested_idx(usage, 'cache_creation_input_tokens'), + cache_read_tokens=nested_idx(usage, 'cache_read_input_tokens'), # used by anthropic + web_search_requests=nested_idx(usage, 'server_tool_use', 'web_search_requests'), + response_cost=response_cost)) def user_id_fn(self): raise NotImplementedError('Please implement `LisetteUsageLogger.user_id_fn` before initializing, e.g using fastcore.patch.') diff --git a/nbs/00_core.ipynb b/nbs/00_core.ipynb index 40e5bfc..96d0604 100644 --- a/nbs/00_core.ipynb +++ b/nbs/00_core.ipynb @@ -72,9 +72,8 @@ "source": [ "#| hide\n", "from fastcore.test import *\n", - "from IPython.display import Markdown, Image\n", - "from fastcore.xtras import SaveReturn\n", - "from fastcore.test import *" + "from IPython.display import Markdown, Image, Audio, Video\n", + "import httpx" ] }, { @@ -227,6 +226,74 @@ "id": "25a6f62b", "metadata": {}, "outputs": [ + { + "data": { + "text/markdown": [ + "**gemini/gemini-3-pro-preview:**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Hello! How can I help you today?\n", + "\n", + "Whether you need help with a creative project, have a burning question, or just want to chat, I'm all ears. What's on your mind?\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-3-pro-preview`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=163, prompt_tokens=4, total_tokens=167, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=120, rejected_prediction_tokens=None, text_tokens=43, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-3-pro-preview', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Hello! How can I help you today?\\n\\nWhether you need help with a creative project, have a burning question, or just want to chat, I'm all ears. What's on your mind?\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"text\": \"Hello! How can I help you today?\\\\n\\\\nWhether you need help with a creative project, have a burning question, or just want to chat, I\\'m all ears. What\\'s on your mind?\"}', 'signature': 'EvoDCvcDAXLI2nwStssJVsA/41VhNGBtTYcO/AB6v51G/mWckRl8dTv7yc9sBdsfL/ZyZS46ncXHwgzkascEc6y3GrwXfb3U6h7+DjZQmYIx1ezMlYqP4rsYcpVU6laTtU8IpcGLj66SigR5a+JW9bAU3res5GL+3yXZcnCY7cSzGVG134A7FkFqaVsRjiU06e9UihOGiK6C+t54GrA4ITdLnMFo1RvReBArJFYjPhj5oXuvITyfhlkdKc4nH36eT55766GHpzjHn+l+T6/MwPiwr+kxqDI72O8IgxjBUyTCToQQm9Av5FE4syBSWcCGqJAGOjMe4aNIakMh5E/f5FTyJD7GJJANiKINDe9rjDFcgF7bwJQmeLCOY3DlseMXCfDegZtYupK1Jm948xza7GkUZzp5BxdjqsRdvbD8UeBcCIg5cxbjZAV2bRl97lrNhHGZw7wWxlOek3nhiqDdkyuz2UpsEKJ3wCaDXdH7EHoSirsLP5dyAezmVh+N8lcVpvqrNZmnVemF31X+R++BWmLyaS8vDyZWQ4SKKEiY171zvP8rZvO5eNNVwmhzLlyDiJJrB1TYdwt7L4utcJWFjTirdw96Ju+I7vjgmBetQiIWBqfvHRuy7o3z7jGHY1C6nW3/STJK3y0Q4WKbYi9ehoq0B1ii2NdMMyxrTgQ='}], provider_specific_fields=None))], usage=Usage(completion_tokens=163, prompt_tokens=4, total_tokens=167, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=120, rejected_prediction_tokens=None, text_tokens=43, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "**gemini/gemini-2.5-pro:**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Hey there! How can I help you today?\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=1051, prompt_tokens=4, total_tokens=1055, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=1041, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hey there! How can I help you today?', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=1051, prompt_tokens=4, total_tokens=1055, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=1041, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/markdown": [ @@ -328,7 +395,7 @@ } ], "source": [ - "ms = [\"gemini/gemini-2.5-flash\", \"claude-sonnet-4-5\", \"openai/gpt-4.1\"]\n", + "ms = [\"gemini/gemini-3-pro-preview\", \"gemini/gemini-2.5-pro\", \"gemini/gemini-2.5-flash\", \"claude-sonnet-4-5\", \"openai/gpt-4.1\"]\n", "msg = [{'role':'user','content':'Hey there!', 'cache_control': {'type': 'ephemeral'}}]\n", "for m in ms:\n", " display(Markdown(f'**{m}:**'))\n", @@ -354,16 +421,19 @@ { "cell_type": "code", "execution_count": null, - "id": "17911e6a", + "id": "d4c8b8f2", "metadata": {}, "outputs": [], "source": [ + "#| export\n", "#| export\n", "def _bytes2content(data):\n", - " \"Convert bytes to litellm content dict (image or pdf)\"\n", - " mtype = 'application/pdf' if data[:4] == b'%PDF' else mimetypes.types_map.get(f'.{imghdr.what(None, h=data)}')\n", - " if not mtype: raise ValueError(f'Data must be image or PDF bytes, got {data[:10]}')\n", - " return {'type': 'image_url', 'image_url': f'data:{mtype};base64,{base64.b64encode(data).decode(\"utf-8\")}'}" + " \"Convert bytes to litellm content dict (image, pdf, audio, video)\"\n", + " mtype = detect_mime(data)\n", + " if not mtype: raise ValueError(f'Data must be a supported file type, got {data[:10]}')\n", + " encoded = base64.b64encode(data).decode(\"utf-8\") \n", + " if mtype.startswith('image/'): return {'type': 'image_url', 'image_url': f'data:{mtype};base64,{encoded}'}\n", + " return {'type': 'file', 'file': {'file_data': f'data:{mtype};base64,{encoded}'}}" ] }, { @@ -474,7 +544,7 @@ "metadata": {}, "outputs": [], "source": [ - "model = ms[1]" + "model = ms[1] # use 2.5-pro, 3-pro is very slow even to run tests as of making" ] }, { @@ -486,19 +556,19 @@ { "data": { "text/markdown": [ - "Hey! How's it going? What's on your mind?\n", + "Hey there! How can I help you today?\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=16, prompt_tokens=8, total_tokens=24, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=767, prompt_tokens=2, total_tokens=769, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=757, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Hey! How's it going? What's on your mind?\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=16, prompt_tokens=8, total_tokens=24, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hey there! How can I help you today?', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=767, prompt_tokens=2, total_tokens=769, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=757, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -526,9 +596,9 @@ "metadata": {}, "outputs": [], "source": [ - "def c(msgs, **kw):\n", + "def c(msgs, m=model, **kw):\n", " msgs = [msgs] if isinstance(msgs,dict) else listify(msgs)\n", - " return completion(model, msgs, **kw)" + " return completion(m, msgs, **kw)" ] }, { @@ -540,19 +610,19 @@ { "data": { "text/markdown": [ - "Hey! How's it going? What's on your mind?\n", + "Hey there! How can I help you today?\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=16, prompt_tokens=8, total_tokens=24, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=767, prompt_tokens=2, total_tokens=769, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=757, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Hey! How's it going? What's on your mind?\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=16, prompt_tokens=8, total_tokens=24, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hey there! How can I help you today?', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=767, prompt_tokens=2, total_tokens=769, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=757, rejected_prediction_tokens=None, text_tokens=10, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -656,26 +726,28 @@ { "data": { "text/markdown": [ - "This image shows an adorable **Cavalier King Charles Spaniel puppy**! The puppy has the breed's characteristic features:\n", + "This is an absolutely adorable picture of a puppy!\n", "\n", - "- **Coloring**: Beautiful brown (chestnut) and white markings\n", - "- **Big, expressive dark eyes** that are very soulful\n", - "- **Long, fluffy ears** with wavy fur\n", - "- **Sweet, gentle expression**\n", + "Here's a breakdown of what's in the image:\n", "\n", - "The puppy is lying on grass with **purple flowers** (possibly asters or similar) in the background, making for a very charming portrait. The pup looks quite young, probably just a few months old, and has that irresistibly cute puppy face that Cavaliers are known for!\n", + "* **The Puppy:** The main subject is a very young puppy, most likely a **Cavalier King Charles Spaniel**. It has the breed's characteristic features: large, dark, expressive eyes, long, floppy ears with silky, wavy fur, and a sweet expression. The coloring, white with chestnut or reddish-brown patches, is known as \"Blenheim\" in this breed.\n", + "* **The Pose:** The puppy is lying down in the green grass, peeking out from behind a bush of flowers. It's looking directly at the camera with a curious and gentle gaze.\n", + "* **The Flowers:** To the left of the puppy is a cluster of small, delicate purple or lavender-colored flowers, which look like asters or a similar daisy-like flower.\n", + "* **The Setting:** The scene is outdoors, likely in a garden or yard. The focus is sharp on the puppy, while the background is softly blurred, which makes the puppy stand out as the main subject.\n", + "\n", + "Overall, it's a very charming and heartwarming photograph capturing the innocence and cuteness of a young puppy.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=152, prompt_tokens=104, total_tokens=256, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=1332, prompt_tokens=265, total_tokens=1597, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=1075, rejected_prediction_tokens=None, text_tokens=257, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=7, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"This image shows an adorable **Cavalier King Charles Spaniel puppy**! The puppy has the breed's characteristic features:\\n\\n- **Coloring**: Beautiful brown (chestnut) and white markings\\n- **Big, expressive dark eyes** that are very soulful\\n- **Long, fluffy ears** with wavy fur\\n- **Sweet, gentle expression**\\n\\nThe puppy is lying on grass with **purple flowers** (possibly asters or similar) in the background, making for a very charming portrait. The pup looks quite young, probably just a few months old, and has that irresistibly cute puppy face that Cavaliers are known for!\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=152, prompt_tokens=104, total_tokens=256, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='This is an absolutely adorable picture of a puppy!\\n\\nHere\\'s a breakdown of what\\'s in the image:\\n\\n* **The Puppy:** The main subject is a very young puppy, most likely a **Cavalier King Charles Spaniel**. It has the breed\\'s characteristic features: large, dark, expressive eyes, long, floppy ears with silky, wavy fur, and a sweet expression. The coloring, white with chestnut or reddish-brown patches, is known as \"Blenheim\" in this breed.\\n* **The Pose:** The puppy is lying down in the green grass, peeking out from behind a bush of flowers. It\\'s looking directly at the camera with a curious and gentle gaze.\\n* **The Flowers:** To the left of the puppy is a cluster of small, delicate purple or lavender-colored flowers, which look like asters or a similar daisy-like flower.\\n* **The Setting:** The scene is outdoors, likely in a garden or yard. The focus is sharp on the puppy, while the background is softly blurred, which makes the puppy stand out as the main subject.\\n\\nOverall, it\\'s a very charming and heartwarming photograph capturing the innocence and cuteness of a young puppy.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=1332, prompt_tokens=265, total_tokens=1597, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=1075, rejected_prediction_tokens=None, text_tokens=257, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=7, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -710,63 +782,45 @@ }, { "cell_type": "markdown", - "id": "3dc01faf", - "metadata": {}, - "source": [ - "### Caching" - ] - }, - { - "cell_type": "markdown", - "id": "23176bd8", - "metadata": {}, - "source": [ - "Some providers such as Anthropic require manually opting into caching. Let's try it:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e42ad26b", + "id": "e0e5e1e2", "metadata": {}, - "outputs": [], "source": [ - "def cpr(i): return f'{i} '*1024 + 'This is a caching test. Report back only what number you see repeated above.'" + "Some models like Gemini support audio and video:" ] }, { "cell_type": "code", "execution_count": null, - "id": "70b7f481", + "id": "38313733", "metadata": {}, "outputs": [], "source": [ - "#| eval: false\n", - "disable_cachy()" + "wav_data = httpx.get(\"https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav\").content\n", + "# Audio(wav_data) # uncomment to preview" ] }, { "cell_type": "code", "execution_count": null, - "id": "84103a63", + "id": "d75a81fa", "metadata": {}, "outputs": [ { "data": { "text/markdown": [ - "1\n", + "The audio says: \"The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years.\"\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=5, prompt_tokens=2073, total_tokens=2078, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=2070, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=2070)`\n", + "- usage: `Usage(completion_tokens=351, prompt_tokens=230, total_tokens=581, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=321, rejected_prediction_tokens=None, text_tokens=30, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=223, cached_tokens=None, text_tokens=7, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='1', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=5, prompt_tokens=2073, total_tokens=2078, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=2070, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=2070))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='The audio says: \"The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years.\"', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=351, prompt_tokens=230, total_tokens=581, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=321, rejected_prediction_tokens=None, text_tokens=30, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=223, cached_tokens=None, text_tokens=7, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -775,41 +829,42 @@ } ], "source": [ - "msg = mk_msg(cpr(1), cache=True)\n", - "res = c(msg)\n", - "res" + "msg = mk_msg(['What is this audio saying?', wav_data])\n", + "completion(ms[1], [msg])" ] }, { - "cell_type": "markdown", - "id": "05e39380", + "cell_type": "code", + "execution_count": null, + "id": "10b32b23", "metadata": {}, + "outputs": [], "source": [ - "Anthropic has a maximum of 4 cache checkpoints, so we remove previous ones as we go:" + "vid_data = httpx.get(\"https://storage.googleapis.com/github-repo/img/gemini/multimodality_usecases_overview/pixel8.mp4\").content" ] }, { "cell_type": "code", "execution_count": null, - "id": "220ab7b9", + "id": "e904bf9c", "metadata": {}, "outputs": [ { "data": { "text/markdown": [ - "2\n", + "This video is an advertisement for the Google Pixel 8 Pro smartphone, featuring a photographer named Saeka Shimada. She walks through Tokyo at night, demonstrating the phone's new \"Video Boost\" feature, which uses \"Night Sight\" to capture high-quality, vibrant video in low-light conditions. She is visibly impressed by the clarity and beauty of the footage she records in the city's atmospheric alleys.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=5, prompt_tokens=4147, total_tokens=4152, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=4144, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=4144)`\n", + "- usage: `Usage(completion_tokens=402, prompt_tokens=17402, total_tokens=17804, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=318, rejected_prediction_tokens=None, text_tokens=84, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=1873, cached_tokens=None, text_tokens=12, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='2', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=5, prompt_tokens=4147, total_tokens=4152, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=4144, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=4144))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='This video is an advertisement for the Google Pixel 8 Pro smartphone, featuring a photographer named Saeka Shimada. She walks through Tokyo at night, demonstrating the phone\\'s new \"Video Boost\" feature, which uses \"Night Sight\" to capture high-quality, vibrant video in low-light conditions. She is visibly impressed by the clarity and beauty of the footage she records in the city\\'s atmospheric alleys.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=402, prompt_tokens=17402, total_tokens=17804, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=318, rejected_prediction_tokens=None, text_tokens=84, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=1873, cached_tokens=None, text_tokens=12, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -818,8 +873,76 @@ } ], "source": [ - "res = c([remove_cache_ckpts(msg), mk_msg(res), mk_msg(cpr(2), cache=True)])\n", - "res" + "msg = mk_msg(['Concisely, what is happening in this video?', vid_data])\n", + "completion(ms[1], [msg])" + ] + }, + { + "cell_type": "markdown", + "id": "3dc01faf", + "metadata": {}, + "source": [ + "### Caching" + ] + }, + { + "cell_type": "markdown", + "id": "23176bd8", + "metadata": {}, + "source": [ + "Some providers such as Anthropic require manually opting into caching. Let's try it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e42ad26b", + "metadata": {}, + "outputs": [], + "source": [ + "def cpr(i): return f'{i} '*1024 + 'This is a caching test. Report back only what number you see repeated above.'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70b7f481", + "metadata": {}, + "outputs": [], + "source": [ + "#| eval: false\n", + "disable_cachy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "84103a63", + "metadata": {}, + "outputs": [], + "source": [ + "# msg = mk_msg(cpr(1), cache=True)\n", + "# res = c(msg, ms[2])\n", + "# res" + ] + }, + { + "cell_type": "markdown", + "id": "05e39380", + "metadata": {}, + "source": [ + "Anthropic has a maximum of 4 cache checkpoints, so we remove previous ones as we go:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "220ab7b9", + "metadata": {}, + "outputs": [], + "source": [ + "# res = c([remove_cache_ckpts(msg), mk_msg(res), mk_msg(cpr(2), cache=True)], ms[2])\n", + "# res" ] }, { @@ -835,20 +958,9 @@ "execution_count": null, "id": "e1c7e395", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=4144, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "res.usage.prompt_tokens_details" + "# res.usage.prompt_tokens_details" ] }, { @@ -1526,7 +1638,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Hey! How's it going? What can I help you with today?" + "Hey there! How can I help you today?" ] } ], @@ -1545,19 +1657,19 @@ { "data": { "text/markdown": [ - "Hey! How's it going? What can I help you with today?\n", + "Hey there! How can I help you today?\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=18, prompt_tokens=9, total_tokens=27, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- usage: `Usage(completion_tokens=818, prompt_tokens=3, total_tokens=821, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Hey! How's it going? What can I help you with today?\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=18, prompt_tokens=9, total_tokens=27, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hey there! How can I help you today?', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=818, prompt_tokens=3, total_tokens=821, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "execution_count": null, @@ -1653,32 +1765,26 @@ { "data": { "text/markdown": [ - "I'll help you calculate both of those sums using the addition tool.\n", - "\n", - "Let me break down what I'll do:\n", - "1. First calculation: 5478954793 + 547982745\n", - "2. Second calculation: 5479749754 + 9875438979\n", - "\n", - "Since these two calculations are independent of each other, I'll perform both in a single response.\n", + "I will use the `simple_add` tool to perform the two requested additions. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.\n", "\n", "🔧 simple_add({\"a\": 5478954793, \"b\": 547982745})\n", "\n", "\n", "\n", - "🔧 simple_add({\"a\": 5479749754, \"b\": 9875438979})\n", + "🔧 simple_add({\"b\": 9875438979, \"a\": 5479749754})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=215, prompt_tokens=659, total_tokens=874, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=674, prompt_tokens=149, total_tokens=823, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=523, rejected_prediction_tokens=None, text_tokens=151, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=149, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I'll help you calculate both of those sums using the addition tool.\\n\\nLet me break down what I'll do:\\n1. First calculation: 5478954793 + 547982745\\n2. Second calculation: 5479749754 + 9875438979\\n\\nSince these two calculations are independent of each other, I'll perform both in a single response.\", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{\"a\": 5478954793, \"b\": 547982745}', name='simple_add'), id='toolu_012k9WZ73j5h76gzDsAagbRq', type='function'), ChatCompletionMessageToolCall(index=2, function=Function(arguments='{\"a\": 5479749754, \"b\": 9875438979}', name='simple_add'), id='toolu_01TDEYgEXUEsxksK46opA6AQ', type='function')], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=215, prompt_tokens=659, total_tokens=874, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I will use the `simple_add` tool to perform the two requested additions. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.\", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=0, function=Function(arguments='{\"a\": 5478954793, \"b\": 547982745}', name='simple_add'), id='call_f759601e0acb4b0fa1bebbef3910', type='function'), ChatCompletionMessageToolCall(index=1, function=Function(arguments='{\"b\": 9875438979, \"a\": 5479749754}', name='simple_add'), id='call_ae1448f74140459fa37361265fa9', type='function')], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"text\": \"I will use the `simple_add` tool to perform the two requested additions. First, I\\'ll add 5478954793 and 547982745. Then, I\\'ll add 5479749754 and 9875438979.\"}', 'signature': 'CsYNAXLI2nxQDg2H7IQFKRfAB++XIjr6+QF5y7Bm79+vB52Ig/hIU2JzHavfsUJq/hLHh24tKaj4NtwCC5IEytQW4hWc53exlWACJevkSQGxCyUIefD4KF6hhSLXcltG7PzHdLUdTAfX6Vc2ocluBKEAGIpzhKL13gwHACM7GPldMgA3YYuFJ88qOIEk9TVE4Pzt4DI6/PT3Ztm18dNA6lF8Px28dfB6N1EVmWtWfcKN69tiOK/aEbrg+1yvzqugWCOS8/X+/x/IToz5hHlNpG3t2MYec82v/5wmFFicYc6+AtzjTFIcXKgGXMBY9RtLpYdn0yRPseNbJoi9zbHAfYIB/uJKSfU3xs9jSLOTLwIcj4BPJxo59t6JKEnwqKA2f/ZCkqu5c9L09uRfJ6CV/5qRKhqDQ502zqkC0VlYvTH6dFWBaOz0xiUSd6G2BpBBaYaCQY4ieI4l7U7N6zv1K8RW248A4xyN4oZXOjtFplcvxLaqdXkc1qFYDVnM9dsk1QGOmXQg6uPJkZQplSIF5LaG8N5FdEf2qPyGrYgqreeyxlU0oJit43HP70FS44q9it92ugCtUtgOcAVK86KlAe8wRlvbcUq8Fq+W/2fMTF1btc1oXHYaoMS8uwZ8YBPKmkQ9v/bCm2CQNp9Hqmbcs889Q9CIoL4KpDf1O2ULHHUKjAayb89/xV6lYD0qZv8+5asM0pGrod/FUsZJI6UKdsV/zIqbbJ9t5TxHR8LubJqZuo2l5q4RrHWICRMXTcGPPftFw/GK6cyfD4j0bXmXMIqx17OxIrBBIYAfMatGNIN9Sk5fzjkg+ah9BfaSdXxft6qWE9t1ETips3N1eFiVb9W1c3N6CUZFzRGtWo/X2LWi4/ZEwnIUpPN5jPMwlLGvcrnlPJZXivYbnOfI76iE9A4kunfQV6CM+QPofhwO033js+jvpbab1hb3a9QeZRPm2h8PQAiGRfhHzTkDnpUUOYu7AUg/vYWY75dUbD4v2srWNYXdqJ1JChUYV5Wf40L05GPLsUqyIXYJsGfLcICbK3YEla/5laSmV3aO7b8KNMLgZ/oatCnFd9p3kWyC/dEAgjIe2WzWSAXt/BIRYF7z6hMHltp3q6KQH3ov3/tTmQ7xltlt4cJANTzu8puzkzRYZU6tLYe9bej/Y2R7Yd1jLscQ6bRhXSIYo4M4+q7XRPZGHPa2eSCgVC9+ZiWVGKEFcd0dAwtLNA99FFTvtqtgsJgcgMjZXvxh5ewbu9jl3JHGB1IAayj6D6E8MPd38+5LVsWCSF8R4+urLiK62yru2RPL/NKcpb4FCXErGoPx9ywJXR0x3SvW5HXg0sNPhuUCrFpxmcCq/KJzWydzXnhCR37W6HgPzGvD3mMlwDHk59LFPQZ4q253tu5nOu+afphP6FlPwL7d8lrNT67Q4i2FMTk9X13IHJED4t6qAcGH0pBhuuLK25EZVVnJowc3V3pebGtAg4tpIoUVJ100YJ2b0g9pZZ+V9fgWJsGoYCWrZw3ZmZv1CLAufnpJKnC6J+jxqCgRwjjeq8y3Rqi7iDKNOWqu0AmyT1gmHgvgxbrC/LSakUTQR8lwPQq8ab/LKPRM6EvyiuxrsssjONiXiglxbUUzODzZF5wHvqmZ12BbtrndAFmOsI+P8IooYzJXeOrpF54+A5+1blqOY8rcL3Ur9rJjwqNpieW7QVKYGmURCGMbYngJINfbXr9Qm1N5GLUmWNLSA5nPrFXnzHBxBF7hKGdRT993UQBaWtYf4letLeeTTd1bClcD//lIA9guzVx0hL5PAZZmgV51T6/iOVCSETD5WE7ljlw74haDch9l8ZtLFw1BS9xSqmG6pK5M62NHxcEQvGcXAZvEbXrfUTqj6QEpUJQnntoExDczr3pGdJqYpIC6wuvW2wHaFVIisnveA13u/WtZ1rFy8fDZjo2oNVtBvuCuRUblAo+BadCk8T0wh52ZykRs0EZhkvo82YQFjZmEvv8AYBSPmYz/69H1Cqbbtu3TSaCmIIeqxZF9abMATtc6MfNE1bTST0m4/C/x+IWJJSCRLCdcJSiz0++FvhWCLeNI5zTem85LNPx0cB+JlvdfsJBt1KAPBXDV6XJWZZkc7e0L+amCRFR9PBfkoMORDrfswlwOCWyHc7uKO3rU8E4hh1lgKl9rWJCMT7SY8QSlB88tt92sr/wKVaSkqvCqU0WtCqTLO33aQyWYyfKxdCp57NN87AVk2BGwapaFMd/gSirJAhNmZ/lcHG1RnN/c2/85+iu2shLmeeEFBOh4tDA6vjOBhH5npM8g6C3qz+r2AB+PVaku'}], provider_specific_fields=None))], usage=Usage(completion_tokens=674, prompt_tokens=149, total_tokens=823, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=523, rejected_prediction_tokens=None, text_tokens=151, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=149, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -1736,11 +1842,11 @@ { "data": { "text/plain": [ - "[{'tool_call_id': 'toolu_012k9WZ73j5h76gzDsAagbRq',\n", + "[{'tool_call_id': 'call_f759601e0acb4b0fa1bebbef3910',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '6026937538'},\n", - " {'tool_call_id': 'toolu_01TDEYgEXUEsxksK46opA6AQ',\n", + " {'tool_call_id': 'call_ae1448f74140459fa37361265fa9',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '15355188733'}]" @@ -1787,16 +1893,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "I'll help you calculate both of those sums using the addition tool.\n", - "\n", - "Let me break down what I need to do:\n", - "1. Calculate 5478954793 + 547982745\n", - "2. Calculate 5479749754 + 9875438979\n", - "\n", - "Since these two calculations are independent of each other, I'll perform both additions in a single set of tool calls.\n", + "I will add the two pairs of numbers for you. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.\n", "🔧 simple_add\n", - "\n", - "🔧 simple_add\n" + "call:simple_add{a:5479749754,b:9875" ] } ], @@ -1815,32 +1914,22 @@ { "data": { "text/markdown": [ - "I'll help you calculate both of those sums using the addition tool.\n", - "\n", - "Let me break down what I need to do:\n", - "1. Calculate 5478954793 + 547982745\n", - "2. Calculate 5479749754 + 9875438979\n", - "\n", - "Since these two calculations are independent of each other, I'll perform both additions in a single set of tool calls.\n", + "I will add the two pairs of numbers for you. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.call:simple_add{a:5479749754,b:9875\n", "\n", "🔧 simple_add({\"a\": 5478954793, \"b\": 547982745})\n", "\n", "\n", - "\n", - "🔧 simple_add({\"a\": 5479749754, \"b\": 9875438979})\n", - "\n", - "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", - "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=216, prompt_tokens=659, total_tokens=875, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=288, prompt_tokens=149, total_tokens=437, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=25, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I'll help you calculate both of those sums using the addition tool.\\n\\nLet me break down what I need to do:\\n1. Calculate 5478954793 + 547982745\\n2. Calculate 5479749754 + 9875438979\\n\\nSince these two calculations are independent of each other, I'll perform both additions in a single set of tool calls.\", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{\"a\": 5478954793, \"b\": 547982745}', name='simple_add'), id='toolu_017KZv5hZLu9cs2tKZYapJ9g', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{\"a\": 5479749754, \"b\": 9875438979}', name='simple_add'), id='toolu_01HXAc8CXXZVufMNxdJ33n9N', type='function')], function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=216, prompt_tokens=659, total_tokens=875, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"I will add the two pairs of numbers for you. First, I'll add 5478954793 and 547982745. Then, I'll add 5479749754 and 9875438979.call:simple_add{a:5479749754,b:9875\", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{\"a\": 5478954793, \"b\": 547982745}', name='simple_add'), id='call_ad72fd4a072548e6a4b21078632a', type='function')], function_call=None, provider_specific_fields=None, reasoning_content='{\"text\": \"I will add the two pairs of numbers for you. First, I\\'ll add 547895479\"}'))], usage=Usage(completion_tokens=288, prompt_tokens=149, total_tokens=437, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=25, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "execution_count": null, @@ -1855,32 +1944,74 @@ { "cell_type": "code", "execution_count": null, - "id": "f355ecbe", + "id": "50bca992", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠\n", + "🧠🧠🧠🧠Of course! While it might seem complex at first, this is a classic calculus problem that becomes quite simple once you know the rules.\n", + "\n", + "The derivative of **x³ + 2x² - 5x + 1** is:\n", + "\n", + "**3x² + 4x - 5**\n", + "\n", + "---\n", + "\n", + "### Step-by-Step Solution:\n", + "\n", + "To solve this, we use a few fundamental rules of differentiation. The main idea is that we can take the derivative of each part of the expression (each term) separately and then add them together.\n", + "\n", + "The function is: `f(x) = x³ + 2x² - 5x + 1`\n", + "\n", + "Let's break it down term by term.\n", + "\n", + "#### 1. The Power Rule\n", + "The most important rule we'll use is the **Power Rule**, which states:\n", + "The derivative of `xⁿ` is `n * xⁿ⁻¹`\n", + "(In simple terms: bring the exponent down to the front as a multiplier, then subtract one from the original exponent).\n", + "\n", + "---\n", + "\n", + "**Term 1: `x³`**\n", + "* Using the Power Rule, `n = 3`.\n", + "* Bring the `3` to the front and subtract 1 from the exponent.\n", + "* Derivative = `3 * x³" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "⁻¹` = **3x²**\n", + "\n", + "**Term 2: `2x²`**\n", + "* We use the Power Rule on `x²` and keep the constant `2` as a multiplier.\n", + "* The derivative of `x²` is `2 * x²⁻¹` = `2x`.\n", + "* Now, multiply by the constant `2`: `2 * (2x)` = **4x**\n", "\n", + "**Term 3: `-5x`**\n", + "* You can think of `x` as `x¹`.\n", + "* Using the Power Rule, the derivative of `x¹` is `1 * x¹⁻¹` = `1 * x⁰`.\n", + "* Anything to the power of 0 is 1, so the derivative is `1`.\n", + "* Now, multiply by the constant `-5`: `-5 * 1` = **-5**\n", "\n", + "**Term 4: `+1`**\n", + "* There is a rule for constants: **The derivative of any constant number is always 0.**\n", + "* The derivative of `1` is **0**.\n", "\n", - "# Solution\n", + "---\n", "\n", - "To find the derivative of **f(x) = x³ + 2x² - 5x + 1**, I'll apply the power rule to each term.\n", + "### Putting It All Together:\n", "\n", - "## Power Rule: d/dx(xⁿ) = n·xⁿ⁻¹\n", + "Now, we just combine the derivatives of each term:\n", "\n", - "**Term by term:**\n", + "**3x² + 4x - 5 + 0**\n", "\n", - "1. d/dx(x³) = 3x²\n", - "2. d/dx(2x²) = 4x\n", - "3. d/dx(-5x) = -5\n", - "4. d/dx(1) = 0\n", + "Which simplifies to our final answer:\n", "\n", - "## Final Answer:\n", - "**f'(x) = 3x² + 4x - 5**" + "### **3x² + 4x - 5**" ] } ], @@ -1900,33 +2031,72 @@ { "data": { "text/markdown": [ - "# Solution\n", + "Of course! While it might seem complex at first, this is a classic calculus problem that becomes quite simple once you know the rules.\n", "\n", - "To find the derivative of **f(x) = x³ + 2x² - 5x + 1**, I'll apply the power rule to each term.\n", + "The derivative of **x³ + 2x² - 5x + 1** is:\n", "\n", - "## Power Rule: d/dx(xⁿ) = n·xⁿ⁻¹\n", + "**3x² + 4x - 5**\n", "\n", - "**Term by term:**\n", + "---\n", "\n", - "1. d/dx(x³) = 3x²\n", - "2. d/dx(2x²) = 4x\n", - "3. d/dx(-5x) = -5\n", - "4. d/dx(1) = 0\n", + "### Step-by-Step Solution:\n", "\n", - "## Final Answer:\n", - "**f'(x) = 3x² + 4x - 5**\n", + "To solve this, we use a few fundamental rules of differentiation. The main idea is that we can take the derivative of each part of the expression (each term) separately and then add them together.\n", + "\n", + "The function is: `f(x) = x³ + 2x² - 5x + 1`\n", + "\n", + "Let's break it down term by term.\n", + "\n", + "#### 1. The Power Rule\n", + "The most important rule we'll use is the **Power Rule**, which states:\n", + "The derivative of `xⁿ` is `n * xⁿ⁻¹`\n", + "(In simple terms: bring the exponent down to the front as a multiplier, then subtract one from the original exponent).\n", + "\n", + "---\n", + "\n", + "**Term 1: `x³`**\n", + "* Using the Power Rule, `n = 3`.\n", + "* Bring the `3` to the front and subtract 1 from the exponent.\n", + "* Derivative = `3 * x³⁻¹` = **3x²**\n", + "\n", + "**Term 2: `2x²`**\n", + "* We use the Power Rule on `x²` and keep the constant `2` as a multiplier.\n", + "* The derivative of `x²` is `2 * x²⁻¹` = `2x`.\n", + "* Now, multiply by the constant `2`: `2 * (2x)` = **4x**\n", + "\n", + "**Term 3: `-5x`**\n", + "* You can think of `x` as `x¹`.\n", + "* Using the Power Rule, the derivative of `x¹` is `1 * x¹⁻¹` = `1 * x⁰`.\n", + "* Anything to the power of 0 is 1, so the derivative is `1`.\n", + "* Now, multiply by the constant `-5`: `-5 * 1` = **-5**\n", + "\n", + "**Term 4: `+1`**\n", + "* There is a rule for constants: **The derivative of any constant number is always 0.**\n", + "* The derivative of `1` is **0**.\n", + "\n", + "---\n", + "\n", + "### Putting It All Together:\n", + "\n", + "Now, we just combine the derivatives of each term:\n", + "\n", + "**3x² + 4x - 5 + 0**\n", + "\n", + "Which simplifies to our final answer:\n", + "\n", + "### **3x² + 4x - 5**\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=329, prompt_tokens=67, total_tokens=396, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=149, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- usage: `Usage(completion_tokens=1465, prompt_tokens=29, total_tokens=1494, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=333, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"# Solution\\n\\nTo find the derivative of **f(x) = x³ + 2x² - 5x + 1**, I'll apply the power rule to each term.\\n\\n## Power Rule: d/dx(xⁿ) = n·xⁿ⁻¹\\n\\n**Term by term:**\\n\\n1. d/dx(x³) = 3x²\\n2. d/dx(2x²) = 4x\\n3. d/dx(-5x) = -5\\n4. d/dx(1) = 0\\n\\n## Final Answer:\\n**f'(x) = 3x² + 4x - 5**\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None, thinking_blocks=[{'type': 'thinking', 'thinking': \"This is a straightforward calculus problem asking for the derivative of a polynomial function.\\n\\nThe function is: f(x) = x³ + 2x² - 5x + 1\\n\\nUsing the power rule for derivatives: d/dx(xⁿ) = n·xⁿ⁻¹\\n\\nLet me find the derivative of each term:\\n- d/dx(x³) = 3x²\\n- d/dx(2x²) = 2·2x = 4x\\n- d/dx(-5x) = -5\\n- d/dx(1) = 0\\n\\nTherefore, the derivative is: f'(x) = 3x² + 4x - 5\", 'signature': 'EpoECkYIChgCKkChyjMWXlJo9+nW7XTZab5yybYseG/D2YWLq7UDRGFFgksy2aQxjxYk4ZhHkUkUZ/SMJ83vk8Rpyv6piaqNeXXFEgyNAYHrylsWmkmjYH0aDHevG6TmKo5z6T56eSIwIgkXQpZ8T8VJ48Gn+OpjFI497qy8LXRAGl4E0y+yfKX+Zpygptyq2QwPbj8utMwEKoEDecsl2A7AB9kUx44wWuHQ1syBNXvIBSN5dFE89B99i1gle/8Qa5dIaNW0nM3d9O+aBgrNzKY11d9kFdjAEhuzUdU1JHRVUu2ITQz4f/NLhZTNQmTXWrGil7v3+x9LCBSwA6AVfIOJGNkmpdlEozDARbTX6Q+typAwZpDb8DTda22nOabbswqz+q3PasHUlxHuE+BnDB44VppqtPRHuGLdLHKhzrdgr4UgXU/n5VMjga10qIgh9alShhfdQaFLUxRmru2Uv8SM3Hqd2nyI1QAU4dXWSy/tyK/fdnwAv8JIp4SzxXU8WuNd3g+7LZzG+LUGBYYdoBx+MjWMOTyOHM8Jsd4KFLdyRUQu+4XYvfrJtazRmTsHSXOSdo5v3CgvnXx80gTA4oTtShwn9VQH2mYsMlzdtfQhjM9IxGF4EL3kLZTeIpOIAex1eXQMawrEf8yC7G1zlAeBSglP1FtW9TQ78ceBSGUmMhclapBcmx0uWjWKfWGh475lJt1UKoOq3Qn7lBgB'}], reasoning_content=\"This is a straightforward calculus problem asking for the derivative of a polynomial function.\\n\\nThe function is: f(x) = x³ + 2x² - 5x + 1\\n\\nUsing the power rule for derivatives: d/dx(xⁿ) = n·xⁿ⁻¹\\n\\nLet me find the derivative of each term:\\n- d/dx(x³) = 3x²\\n- d/dx(2x²) = 2·2x = 4x\\n- d/dx(-5x) = -5\\n- d/dx(1) = 0\\n\\nTherefore, the derivative is: f'(x) = 3x² + 4x - 5\"))], usage=Usage(completion_tokens=329, prompt_tokens=67, total_tokens=396, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=149, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Of course! While it might seem complex at first, this is a classic calculus problem that becomes quite simple once you know the rules.\\n\\nThe derivative of **x³ + 2x² - 5x + 1** is:\\n\\n**3x² + 4x - 5**\\n\\n---\\n\\n### Step-by-Step Solution:\\n\\nTo solve this, we use a few fundamental rules of differentiation. The main idea is that we can take the derivative of each part of the expression (each term) separately and then add them together.\\n\\nThe function is: `f(x) = x³ + 2x² - 5x + 1`\\n\\nLet's break it down term by term.\\n\\n#### 1. The Power Rule\\nThe most important rule we'll use is the **Power Rule**, which states:\\nThe derivative of `xⁿ` is `n * xⁿ⁻¹`\\n(In simple terms: bring the exponent down to the front as a multiplier, then subtract one from the original exponent).\\n\\n---\\n\\n**Term 1: `x³`**\\n* Using the Power Rule, `n = 3`.\\n* Bring the `3` to the front and subtract 1 from the exponent.\\n* Derivative = `3 * x³⁻¹` = **3x²**\\n\\n**Term 2: `2x²`**\\n* We use the Power Rule on `x²` and keep the constant `2` as a multiplier.\\n* The derivative of `x²` is `2 * x²⁻¹` = `2x`.\\n* Now, multiply by the constant `2`: `2 * (2x)` = **4x**\\n\\n**Term 3: `-5x`**\\n* You can think of `x` as `x¹`.\\n* Using the Power Rule, the derivative of `x¹` is `1 * x¹⁻¹` = `1 * x⁰`.\\n* Anything to the power of 0 is 1, so the derivative is `1`.\\n* Now, multiply by the constant `-5`: `-5 * 1` = **-5**\\n\\n**Term 4: `+1`**\\n* There is a rule for constants: **The derivative of any constant number is always 0.**\\n* The derivative of `1` is **0**.\\n\\n---\\n\\n### Putting It All Together:\\n\\nNow, we just combine the derivatives of each term:\\n\\n**3x² + 4x - 5 + 0**\\n\\nWhich simplifies to our final answer:\\n\\n### **3x² + 4x - 5**\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None, reasoning_content=\"**Initiating Derivative Exploration**\\n\\nI'm now fully immersed in the process of formulating a clear explanation of the derivative for the given polynomial. I've broken down the user's request and am carefully considering the most accessible way to present this information, ensuring it's comprehensive and understandable. The focus is on clarity and step-by-step breakdown.\\n\\n\\n**Decomposing the Polynomial**\\n\\nI've just finished the preliminary analysis of the problem. I'm focusing on dissecting the user's need for a comprehensive, step-by-step breakdown due to their perception of complexity. I've begun to list down the initial steps and have zeroed in on the polynomial expression. Now, I'm identifying the necessary calculus rules to accurately solve this derivative problem. I'm prioritizing the power and constant multiple rules as the key components for a concise and accessible derivation.\\n\\n\\n**Constructing the Solution**\\n\\nI've structured my approach to address the user's perception of complexity. I'll provide both the immediate answer and a detailed, encouraging explanation. The core of my explanation revolves around applying the power, constant multiple, sum/difference, and constant rules step-by-step. I will ensure each step is clear, concise, and easy to follow. I plan to use the original expression in the response to enhance clarity.\\n\\n\\n**Detailing the Power Rule**\\n\\nI've just focused on refining the explanation of the power rule, emphasizing how it simplifies finding derivatives of polynomial terms. The breakdown will directly address how to calculate the derivative for each term in the polynomial, ensuring an easily understandable and comprehensive tutorial. The structure is now designed to start with the fundamental rules and apply them step-by-step to the given problem.\\n\\n\\n\"))], usage=Usage(completion_tokens=1465, prompt_tokens=29, total_tokens=1494, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=333, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "execution_count": null, @@ -2000,7 +2170,7 @@ { "data": { "text/plain": [ - "President(first='Thomas', last='Jefferson', spouse='Martha Wayles Skelton Jefferson', years_in_office='1801-1809', birthplace='Shadwell', birth_year=1743)" + "President(first='Thomas', last='Jefferson', spouse='Martha Jefferson', years_in_office='1801-1809', birthplace='Shadwell', birth_year=1743)" ] }, "execution_count": null, @@ -2053,6 +2223,8 @@ "name": "stdout", "output_type": "stream", "text": [ + "gemini/gemini-3-pro-preview True\n", + "gemini/gemini-2.5-pro True\n", "gemini/gemini-2.5-flash True\n", "claude-sonnet-4-5 True\n", "openai/gpt-4.1 False\n" @@ -2080,25 +2252,25 @@ { "data": { "text/markdown": [ - "Otters are carnivorous mammals in the subfamily Lutrinae, with 14 extant species that are all semiaquatic. They're found on every continent except Australia and Antarctica.\n", + "Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. There are 14 known species of otters, which are part of the weasel family.\n", "\n", - "Otters are distinguished by their long, slim bodies, powerful webbed feet for swimming, and their dense fur, which keeps them warm and buoyant in water. In fact, otters have the densest fur of any animal—as many as a million hairs per square inch in places.\n", + "**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming. They possess dense, waterproof fur that keeps them warm. Their size varies by species, ranging from about 2 to 6 feet in length and weighing between 6 and 100 pounds.\n", "\n", - "They are playful animals, engaging in activities like sliding into water on natural slides and playing with stones. All otters are expert hunters that eat fish, crustaceans, and other critters. Sea otters are particularly clever, using rocks to help them open mussels or other shellfish by placing a rock on their chests and smashing the shellfish against it.\n", + "**Habitat and Diet:** Most otters live in and around freshwater rivers, lakes, and wetlands, while two species are marine. Their diet is primarily carnivorous and consists of fish, crayfish, crabs, and other aquatic invertebrates. Some species are adept at using tools, such as rocks, to break open shellfish.\n", "\n", - "They can live up to 16 years, and include species ranging from the Asian small-clawed otter (the smallest) to the giant otter and sea otter (the largest).\n", + "**Behavior and Social Structure:** Otters are known for their playful nature, often seen sliding down riverbanks. Their social structure varies; some species are mostly solitary, while others live in groups. They communicate through a variety of sounds, including whistles and chirps. Otters can live up to 16 years in the wild.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=440, prompt_tokens=16651, total_tokens=17091, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), server_tool_use=ServerToolUse(web_search_requests=1), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=465, prompt_tokens=12, total_tokens=578, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=194, rejected_prediction_tokens=None, text_tokens=271, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=12, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Otters are carnivorous mammals in the subfamily Lutrinae, with 14 extant species that are all semiaquatic. They're found on every continent except Australia and Antarctica.\\n\\nOtters are distinguished by their long, slim bodies, powerful webbed feet for swimming, and their dense fur, which keeps them warm and buoyant in water. In fact, otters have the densest fur of any animal—as many as a million hairs per square inch in places.\\n\\nThey are playful animals, engaging in activities like sliding into water on natural slides and playing with stones. All otters are expert hunters that eat fish, crustaceans, and other critters. Sea otters are particularly clever, using rocks to help them open mussels or other shellfish by placing a rock on their chests and smashing the shellfish against it.\\n\\nThey can live up to 16 years, and include species ranging from the Asian small-clawed otter (the smallest) to the giant otter and sea otter (the largest).\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': [[{'type': 'web_search_result_location', 'cited_text': 'Otters are carnivorous mammals in the subfamily Lutrinae. The 14 extant otter species are all semiaquatic, both freshwater and marine. ', 'url': 'https://en.wikipedia.org/wiki/Otter', 'title': 'Otter - Wikipedia', 'encrypted_index': 'EpEBCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDPhp4wuMLBPpkzvdqhoMt61kca3Ci3Bk2vucIjA5/jSHWV1T6RVvm5w1DBqUVD+imn3dwb8iWgw2n7Em4LDyVLWwrshxvOCFLZAUm2IqFeSyRH5fJmyP5sI1l0DGXFHn7KdaDBgE', 'supported_text': 'carnivorous mammals in the subfamily Lutrinae, with 14 extant species that are all semiaquatic'}], [{'type': 'web_search_result_location', 'cited_text': 'The charismatic otter, a member of the weasel family, is found on every continent except Australia and Antarctica. ', 'url': 'https://www.nationalgeographic.com/animals/mammals/facts/otters-1', 'title': 'Otters, facts and information | National Geographic', 'encrypted_index': 'EpABCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDA1XI2UXozbN61Cu3xoMUesO93x/sVb1TrrfIjBD8Pb78ap+6Nb7yA9a7DZDahM3tWpdo3+J+kZpR1mfmQJabxHOOKNgY6ZrtluRzH8qFBBHjBNEtEiKMJt+/CX7jd/HCfklGAQ=', 'supported_text': \"They're found on every continent except Australia and Antarctica\"}], [{'type': 'web_search_result_location', 'cited_text': 'Otters are distinguished by their long, slim bodies, powerful webbed feet for swimming, and their dense fur, which keeps them warm and buoyant in wate...', 'url': 'https://en.wikipedia.org/wiki/Otter', 'title': 'Otter - Wikipedia', 'encrypted_index': 'Eo8BCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDH4C3U9j67rgb2OJnRoMVSp3beZDlABjsadbIjDx8kg3gPXNDrG/sqYVaTuYGwg1XBnMrhIpUM/fascBtWzAUl+dnGkSmbuwL98nHwQqE4Ga4NtF/jHWwxihz4izmHFUVLsYBA==', 'supported_text': 'Otters are distinguished by their long, slim bodies, powerful webbed feet for swimming, and their dense fur, which keeps them warm and buoyant in water'}], [{'type': 'web_search_result_location', 'cited_text': 'Otters have the densest fur of any animal—as many as a million hairs per square inch in places. ', 'url': 'https://www.nationalgeographic.com/animals/mammals/facts/otters-1', 'title': 'Otters, facts and information | National Geographic', 'encrypted_index': 'Eo8BCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDI62gcDwHg78HuXbaxoM4rhLa2DGWz8szqUlIjCISFNW3mcsltnwpuAm+CJyV/KVYv1scrIHjO4MspJ712erlTwjfkh+ZrZMIt174PYqE8vhi4A8SnQnFJbOj2PeS9DN+fgYBA==', 'supported_text': 'otters have the densest fur of any animal—as many as a million hairs per square inch in places'}], [{'type': 'web_search_result_location', 'cited_text': 'They are playful animals, engaging in activities like sliding into water on natural slides and playing with stones. ', 'url': 'https://en.wikipedia.org/wiki/Otter', 'title': 'Otter - Wikipedia', 'encrypted_index': 'Eo8BCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDEpb2h5a8/VfZfYOfxoMmWgUANcS8vEvhJIfIjCM+fyLY0nIxWgQSfHLp3LOC0GOvFR2zFhCMuwUCUJ3lF678joxXm8I6ndG4Ci9EHoqE7PM3505iDXe8moG5jbrHD9lofEYBA==', 'supported_text': 'They are playful animals, engaging in activities like sliding into water on natural slides and playing with stones'}], [{'type': 'web_search_result_location', 'cited_text': 'All otters are expert hunters that eat fish, crustaceans, and other critters. ', 'url': 'https://www.nationalgeographic.com/animals/mammals/facts/otters-1', 'title': 'Otters, facts and information | National Geographic', 'encrypted_index': 'Eo8BCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDHRNubA3bWcDTrprwBoM0w+toFRofDMv8DuXIjCbTzryO5DLeis7YNuhyU1NGoBGlS4ekUyNpRYTCvck4q8FqkRu2ZliS99RBSJMm1MqE4sRFlVCsEVMZpp6cqLE/iy4I+0YBA==', 'supported_text': 'All otters are expert hunters that eat fish, crustaceans, and other critters'}], [{'type': 'web_search_result_location', 'cited_text': 'While floating on their backs, sea otters not only nap, but also use rocks to help them open mussels or other shellfish. Otters place a rock on their ...', 'url': 'https://kids.nationalgeographic.com/animals/mammals/facts/sea-otter', 'title': 'Sea Otter - Animal profile, pictures, facts, range map | National Geographic Kids', 'encrypted_index': 'EpEBCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDOTyPgsWh2F9LqUoXxoMbuY5w/8I7z8D/Y/wIjDGk/Gr5tQgDf+XhIdMAUCb9FvWeJSVfYA3UZcLj9NJfDrTqNaITIT5kxavUbbpWxIqFf+dSrqr1QW7UnIYdVrSZx1aL9BX6xgE', 'supported_text': 'using rocks to help them open mussels or other shellfish by placing a rock on their chests and smashing the shellfish against it'}], [{'type': 'web_search_result_location', 'cited_text': 'They can live up to 16 years, with their diet mainly consisting of fish and sometimes frogs, birds, or shellfish, depending on the species. ', 'url': 'https://en.wikipedia.org/wiki/Otter', 'title': 'Otter - Wikipedia', 'encrypted_index': 'EpABCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDK6WD0xh3RfEvioJMBoMcPwllW35WihbE85fIjDQbeZEhHScByRsjF41WrPE6tccdGrvRIRLVmC4GH/L42Yu9AviomOUH4ZPD8eGNwIqFBvHtpsbrbcPDxCuAtsdGodTPjQaGAQ=', 'supported_text': 'They can live up to 16 years'}], [{'type': 'web_search_result_location', 'cited_text': 'The Asian small-clawed otter is the smallest otter species and the giant otter and sea otter are the largest. ', 'url': 'https://en.wikipedia.org/wiki/Otter', 'title': 'Otter - Wikipedia', 'encrypted_index': 'EpABCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDPL9K3pNyyr1xEckOBoMau8BVGDthVaCxv7QIjBKtI794vfUecRrYPPwZkcCfi/JFCWcXZnEO/2uWjjJL8LUWqq0rYZHsLeCU4oYpWsqFCgyvSy+8U5nh9Yt+9ohNXWeJQNoGAQ=', 'supported_text': 'the Asian small-clawed otter (the smallest) to the giant otter and sea otter (the largest)'}]], 'thinking_blocks': None}))], usage=Usage(completion_tokens=440, prompt_tokens=16651, total_tokens=17091, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), server_tool_use=ServerToolUse(web_search_requests=1), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. There are 14 known species of otters, which are part of the weasel family.\\n\\n**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming. They possess dense, waterproof fur that keeps them warm. Their size varies by species, ranging from about 2 to 6 feet in length and weighing between 6 and 100 pounds.\\n\\n**Habitat and Diet:** Most otters live in and around freshwater rivers, lakes, and wetlands, while two species are marine. Their diet is primarily carnivorous and consists of fish, crayfish, crabs, and other aquatic invertebrates. Some species are adept at using tools, such as rocks, to break open shellfish.\\n\\n**Behavior and Social Structure:** Otters are known for their playful nature, often seen sliding down riverbanks. Their social structure varies; some species are mostly solitary, while others live in groups. They communicate through a variety of sounds, including whistles and chirps. Otters can live up to 16 years in the wild.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None, annotations=[{'type': 'url_citation', 'url_citation': {'end_index': 178, 'start_index': 104, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 315, 'start_index': 180, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 372, 'start_index': 316, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 482, 'start_index': 373, 'title': 'study.com', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFFQo0zU7gp9wrrxaxTWtv8TcphvYKvxSL0ejmYdfsEAIJ9Dmy5pQwTOxqQbqW-sLKrQoPE4T1yQ9hl4oYgD5pc__Fd-lWZn4bfLFUgMdnRXKNpoaO8ZymBoGLtzTOqJg5lVnwtVKvNTNqCTWwdCI_U5pMgerGQNZqV6MB6U3N8VLVeGho='}}, {'type': 'url_citation', 'url_citation': {'end_index': 606, 'start_index': 484, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 714, 'start_index': 607, 'title': 'crittercontrol.com', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHFQi65Rnx7jTizXrLDnx9N61o8IEohpWVC-j22j9M5V2gDyfSLclxAvBrmbohJuXvgMiCSSeGF19YqgezmNicOEyDGdXuSN7BtDvqCQ8MeYqQldnPzK_oNNW8ta0tnw2LQmf0brWZHmclXib9JVj7JN6faMg=='}}, {'type': 'url_citation', 'url_citation': {'end_index': 793, 'start_index': 715, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 908, 'start_index': 795, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 1002, 'start_index': 909, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}, {'type': 'url_citation', 'url_citation': {'end_index': 1079, 'start_index': 1003, 'title': 'ukwildottertrust.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFRF0VW6hoXzWmAVA2B0Jmxy9-NCSur9G5Hhq8Nw6374Hcs01Keya_v6SCRSw-eSXk74OlD-BD4tvVj1tPGJkGQeCQdvNdc3z51uUssEeYzJquEt4YbWGQEZZvNHjUjow7tpOg='}}, {'type': 'url_citation', 'url_citation': {'end_index': 1123, 'start_index': 1080, 'title': 'wikipedia.org', 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz'}}]))], usage=Usage(completion_tokens=465, prompt_tokens=12, total_tokens=578, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=194, rejected_prediction_tokens=None, text_tokens=271, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=12, image_tokens=None)), vertex_ai_grounding_metadata=[{'searchEntryPoint': {'renderedContent': '\\n
\\n
\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n
\\n
\\n \\n
\\n'}, 'groundingChunks': [{'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz', 'title': 'wikipedia.org'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEodF4gXW5x0gckfC-61dKg4HrDDzH4Gmg5CYOFmSPmJAXVDu9Im4Hr6kIQCPXhkHas81DGHb9zOGZib_HCmBoR2P0YX2848NHuivTqntd0FcuMOEBWEXvvztxrJNEfH9c2QQ==', 'title': 'britannica.com'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFFQo0zU7gp9wrrxaxTWtv8TcphvYKvxSL0ejmYdfsEAIJ9Dmy5pQwTOxqQbqW-sLKrQoPE4T1yQ9hl4oYgD5pc__Fd-lWZn4bfLFUgMdnRXKNpoaO8ZymBoGLtzTOqJg5lVnwtVKvNTNqCTWwdCI_U5pMgerGQNZqV6MB6U3N8VLVeGho=', 'title': 'study.com'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFRF0VW6hoXzWmAVA2B0Jmxy9-NCSur9G5Hhq8Nw6374Hcs01Keya_v6SCRSw-eSXk74OlD-BD4tvVj1tPGJkGQeCQdvNdc3z51uUssEeYzJquEt4YbWGQEZZvNHjUjow7tpOg=', 'title': 'ukwildottertrust.org'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHFQi65Rnx7jTizXrLDnx9N61o8IEohpWVC-j22j9M5V2gDyfSLclxAvBrmbohJuXvgMiCSSeGF19YqgezmNicOEyDGdXuSN7BtDvqCQ8MeYqQldnPzK_oNNW8ta0tnw2LQmf0brWZHmclXib9JVj7JN6faMg==', 'title': 'crittercontrol.com'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHikH4zJLdkJQg80KmE30onD0iv0fWZ_owkdyLRLD5mJPT1Ntrt_J68utfO2omyUxMfcvbTn02AbqZiqrjWA3rcgIFitCZe9MouiXYAhploBRH3QTX_w__weveMt6jMqIZvCdOdlYADZhwC3UFYNg==', 'title': 'seaworld.org'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE_8ckWYXXHCvYLvK4fCKFn7rmuEsGlSG7Nocws9XT3LOHTrXmpE2K-IhEO94yPnji1KaVDmwpCkHoYJDUiJQ7AqU4_GrHw5U4EufghQffGg46OYooNPFlIJikjahDUs8MjwJiOSGbDqRJ4W-Mg_jDluZ8Nl59tTtYhqLolJWFwja8SGDBFJv65hPI3Or-LxPYZiekQmo39QagEzIpwEgIrL19KPwy6wqRZKvZTZ7H3ePZA-rXZAdqU4v7GblD8-QSM46rkqQv5we8oyOXt4w==', 'title': 'bluereefaquarium.co.uk'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFmhaLUT5FXSCe9JLketaUGHHz9D7eR0bXXCWq31gf6RBIw-p6_c9dd8CVsodWOf0e1b01wQQpqFaE8fnN62mNIXtNf-8z0dhhe7FOaRsATeuOsJEPK_UUaHTdgA0zFHRxPDEL13e2JhPnL4EJilGHgCzp56Vxr1Ff7UCfleETlclhbkhCau-LhItpI', 'title': 'woodlandtrust.org.uk'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE3NCd5oA17ZUlfbQ9XbxXkx380QemvE4qYk479kDjOKNHHKzjwcTdVE5zmRx-BUGjgIkgLaXLGU6vKU8ax7F44_dZKMJXlELIeB7iVRvUUjUT2PqTqzB0WJsv0wLScaa-FCsUyu_lhct_UYWUvbJan', 'title': 'seaworld.com'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGJ9ARniOp2Bq8pMJfgt0EQc0NfxbSNyKcKyh2NYu2qUv5-kSk545BuzRV1PZoEIaO7zpEvZ1jWSb39WidJGYGvjHOeNn9kRihYwOsV334tFa7fv7Ne7fTlctHBmrq7RPM9rKWdiwP275qlswCsrNPiovl7y66QBw==', 'title': 'petscare.com'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQETEv5OTvoUF8IiONMTdsfVYwAiOccFc0pvTYJWBRgAebHXihwUBZawAhuRd_F9GIrieqokMtARA3y6uoLjHg4JCCrBaiKw29iTsJm_G5sZqBJh9YC4O0bFPtbKJakXPLAw_oBdWxR-YWxBXnmGFp1PsYfFybS7eBw=', 'title': 'si.edu'}}, {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQE_5vdCEqYBtISJo0eWBUEojafrDzqiRq8p5N40Jq2yvJYs1GbAQQhULKc1fOHyAjBYyUYMfMKitLtwyLIWeYE0GK3KW1Mvs6n0K6MPikCeBoUtMKrwayDgC8XB2DWx9rKju8w4rvCWJLPp9PplTFZuxCw=', 'title': 'seaworld.org'}}], 'groundingSupports': [{'segment': {'endIndex': 103, 'text': 'Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life.'}, 'groundingChunkIndices': [0, 1]}, {'segment': {'startIndex': 104, 'endIndex': 178, 'text': 'There are 14 known species of otters, which are part of the weasel family.'}, 'groundingChunkIndices': [0]}, {'segment': {'startIndex': 180, 'endIndex': 315, 'text': '**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming.'}, 'groundingChunkIndices': [0, 1, 2]}, {'segment': {'startIndex': 316, 'endIndex': 372, 'text': 'They possess dense, waterproof fur that keeps them warm.'}, 'groundingChunkIndices': [0, 3]}, {'segment': {'startIndex': 373, 'endIndex': 482, 'text': 'Their size varies by species, ranging from about 2 to 6 feet in length and weighing between 6 and 100 pounds.'}, 'groundingChunkIndices': [2]}, {'segment': {'startIndex': 484, 'endIndex': 606, 'text': '**Habitat and Diet:** Most otters live in and around freshwater rivers, lakes, and wetlands, while two species are marine.'}, 'groundingChunkIndices': [0, 1]}, {'segment': {'startIndex': 607, 'endIndex': 714, 'text': 'Their diet is primarily carnivorous and consists of fish, crayfish, crabs, and other aquatic invertebrates.'}, 'groundingChunkIndices': [4, 0, 5, 6, 7]}, {'segment': {'startIndex': 715, 'endIndex': 793, 'text': 'Some species are adept at using tools, such as rocks, to break open shellfish.'}, 'groundingChunkIndices': [0, 8]}, {'segment': {'startIndex': 795, 'endIndex': 908, 'text': '**Behavior and Social Structure:** Otters are known for their playful nature, often seen sliding down riverbanks.'}, 'groundingChunkIndices': [0, 9, 10]}, {'segment': {'startIndex': 909, 'endIndex': 1002, 'text': 'Their social structure varies; some species are mostly solitary, while others live in groups.'}, 'groundingChunkIndices': [0, 3, 11]}, {'segment': {'startIndex': 1003, 'endIndex': 1079, 'text': 'They communicate through a variety of sounds, including whistles and chirps.'}, 'groundingChunkIndices': [3]}, {'segment': {'startIndex': 1080, 'endIndex': 1123, 'text': 'Otters can live up to 16 years in the wild.'}, 'groundingChunkIndices': [0]}], 'webSearchQueries': ['otter overview', 'what are the characteristics of otters', 'what do otters eat', 'otter behavior']}], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -2133,18 +2305,13 @@ { "cell_type": "code", "execution_count": null, - "id": "cf84f8c8", + "id": "45e2a7cf", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'web_search_result_location',\n", - " 'cited_text': 'Otters are carnivorous mammals in the subfamily Lutrinae. The 14 extant otter species are all semiaquatic, both freshwater and marine. ',\n", - " 'url': 'https://en.wikipedia.org/wiki/Otter',\n", - " 'title': 'Otter - Wikipedia',\n", - " 'encrypted_index': 'EpEBCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDPhp4wuMLBPpkzvdqhoMt61kca3Ci3Bk2vucIjA5/jSHWV1T6RVvm5w1DBqUVD+imn3dwb8iWgw2n7Em4LDyVLWwrshxvOCFLZAUm2IqFeSyRH5fJmyP5sI1l0DGXFHn7KdaDBgE',\n", - " 'supported_text': 'carnivorous mammals in the subfamily Lutrinae, with 14 extant species that are all semiaquatic'}]" + "dict_keys(['searchEntryPoint', 'groundingChunks', 'groundingSupports', 'webSearchQueries'])" ] }, "execution_count": null, @@ -2153,63 +2320,158 @@ } ], "source": [ - "r.choices[0].message.provider_specific_fields['citations'][0]" + "r['vertex_ai_grounding_metadata'][0].keys()" ] }, { - "cell_type": "markdown", - "id": "4f2a8559", + "cell_type": "code", + "execution_count": null, + "id": "da64e713", "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['otter overview',\n", + " 'what are the characteristics of otters',\n", + " 'what do otters eat',\n", + " 'otter behavior']" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "However, when streaming the results are not captured this way.\n", - "Instead, we provide this helper function that adds the citation to the `content` field in markdown format:" + "r['vertex_ai_grounding_metadata'][0]['webSearchQueries']" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "fc341e7e", + "cell_type": "markdown", + "id": "689cdf9d", "metadata": {}, - "outputs": [], "source": [ - "#| export\n", - "def cite_footnote(msg):\n", - " if not (delta:=nested_idx(msg, 'choices', 0, 'delta')): return\n", - " if citation:= nested_idx(delta, 'provider_specific_fields', 'citation'):\n", - " title = citation['title'].replace('\"', '\\\\\"')\n", - " delta.content = f'[*]({citation[\"url\"]} \"{title}\") '\n", - " \n", - "def cite_footnotes(stream_list):\n", - " \"Add markdown footnote citations to stream deltas\"\n", - " for msg in stream_list: cite_footnote(msg)" + "Web search results:" ] }, { "cell_type": "code", "execution_count": null, - "id": "c2150365", + "id": "030d17a5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQF2pn0bF_6oFB3sdMwWq6AhdM4zuO-Xps0_S1bLBeXmWli7HPTvNASqRFBdloU1Si-pU2Guj-4yGn8t2lY3znWVGeG1ZI8R93cajVmHVmeytR74QRLYVH77UwL_hiPz',\n", + " 'title': 'wikipedia.org'}},\n", + " {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEodF4gXW5x0gckfC-61dKg4HrDDzH4Gmg5CYOFmSPmJAXVDu9Im4Hr6kIQCPXhkHas81DGHb9zOGZib_HCmBoR2P0YX2848NHuivTqntd0FcuMOEBWEXvvztxrJNEfH9c2QQ==',\n", + " 'title': 'britannica.com'}},\n", + " {'web': {'uri': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQFFQo0zU7gp9wrrxaxTWtv8TcphvYKvxSL0ejmYdfsEAIJ9Dmy5pQwTOxqQbqW-sLKrQoPE4T1yQ9hl4oYgD5pc__Fd-lWZn4bfLFUgMdnRXKNpoaO8ZymBoGLtzTOqJg5lVnwtVKvNTNqCTWwdCI_U5pMgerGQNZqV6MB6U3N8VLVeGho=',\n", + " 'title': 'study.com'}}]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "r['vertex_ai_grounding_metadata'][0]['groundingChunks'][:3]" + ] + }, + { + "cell_type": "markdown", + "id": "8971dc08", + "metadata": {}, + "source": [ + "Citations in gemini: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb79aef5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'segment': {'endIndex': 103,\n", + " 'text': 'Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life.'},\n", + " 'groundingChunkIndices': [0, 1]},\n", + " {'segment': {'startIndex': 104,\n", + " 'endIndex': 178,\n", + " 'text': 'There are 14 known species of otters, which are part of the weasel family.'},\n", + " 'groundingChunkIndices': [0]},\n", + " {'segment': {'startIndex': 180,\n", + " 'endIndex': 315,\n", + " 'text': '**Physical Characteristics:** Otters typically have long, slender bodies with short legs and powerful webbed feet perfect for swimming.'},\n", + " 'groundingChunkIndices': [0, 1, 2]}]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "r['vertex_ai_grounding_metadata'][0]['groundingSupports'][:3]" + ] + }, + { + "cell_type": "markdown", + "id": "4f2a8559", + "metadata": {}, + "source": [ + "However, when streaming the results are not captured this way.\n", + "Instead, we provide this helper function that adds the citation to the `content` field in markdown format:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fc341e7e", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "def cite_footnote(msg):\n", + " if not (delta:=nested_idx(msg, 'choices', 0, 'delta')): return\n", + " if citation:= nested_idx(delta, 'provider_specific_fields', 'citation'):\n", + " title = citation['title'].replace('\"', '\\\\\"')\n", + " delta.content = f'[*]({citation[\"url\"]} \"{title}\") '\n", + " \n", + "def cite_footnotes(stream_list):\n", + " \"Add markdown footnote citations to stream deltas\"\n", + " for msg in stream_list: cite_footnote(msg)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c2150365", "metadata": {}, "outputs": [ { "data": { "text/markdown": [ - "Otters are [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") carnivorous mammals in the subfamily Lutrinae, part of [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") the Mustelidae family, which includes weasels, badgers, mink, and wolverines. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") The 14 extant otter species are all semiaquatic, both freshwater and marine.\n", - "\n", - "They have [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") long, slim bodies and relatively short limbs, with [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\n", + "Otters are carnivorous mammals belonging to the subfamily Lutrinae, part of the weasel family (Mustelidae), which also includes badgers, mink, and wolverines. There are 13 extant species of otters, all of which are semi-aquatic, living in both freshwater and marine environments. They are found on every continent except Australia and Antarctica.\n", "\n", - "[*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") All otters are expert hunters that eat fish, crustaceans, and other critters. They're known for being [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") playful animals, engaging in activities like sliding into water on natural slides and playing with stones. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") Otters live up to 16 years, and [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") baby otters, called pups or kittens, stay with their mothers until they're up to a year old.\n", + "These mammals are recognized by their long, slim bodies, short limbs, and powerful, webbed feet, which make them excellent swimmers. Most species also possess a long, muscular tail. Otters have incredibly dense, insulated fur, especially sea otters which have the thickest fur of any animal, helping them trap air for warmth and buoyancy in water as they lack a blubber layer. Their diet primarily consists of fish, but can also include frogs, birds, and shellfish. Otters are known for their playful behavior, engaging in activities like sliding and playing with stones. They typically live in dens called \"holts\" near water sources.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", + "- model: `gemini-2.5-flash`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=424, prompt_tokens=13772, total_tokens=14196, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- usage: `Usage(completion_tokens=305, prompt_tokens=12, total_tokens=317, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Otters are [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") carnivorous mammals in the subfamily Lutrinae, part of [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") the Mustelidae family, which includes weasels, badgers, mink, and wolverines. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") The 14 extant otter species are all semiaquatic, both freshwater and marine.\\n\\nThey have [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") long, slim bodies and relatively short limbs, with [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\\n\\n[*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") All otters are expert hunters that eat fish, crustaceans, and other critters. They\\'re known for being [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") playful animals, engaging in activities like sliding into water on natural slides and playing with stones. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") Otters live up to 16 years, and [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") baby otters, called pups or kittens, stay with their mothers until they\\'re up to a year old.', role='assistant', tool_calls=[], function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=424, prompt_tokens=13772, total_tokens=14196, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Otters are carnivorous mammals belonging to the subfamily Lutrinae, part of the weasel family (Mustelidae), which also includes badgers, mink, and wolverines. There are 13 extant species of otters, all of which are semi-aquatic, living in both freshwater and marine environments. They are found on every continent except Australia and Antarctica.\\n\\nThese mammals are recognized by their long, slim bodies, short limbs, and powerful, webbed feet, which make them excellent swimmers. Most species also possess a long, muscular tail. Otters have incredibly dense, insulated fur, especially sea otters which have the thickest fur of any animal, helping them trap air for warmth and buoyancy in water as they lack a blubber layer. Their diet primarily consists of fish, but can also include frogs, birds, and shellfish. Otters are known for their playful behavior, engaging in activities like sliding and playing with stones. They typically live in dens called \"holts\" near water sources.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None, annotations=[{'type': 'url_citation', 'url_citation': {'start_index': 159, 'end_index': 278, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 280, 'end_index': 345, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH3h53jqxHUFanpfBoCWCSFEatHhzUvZTR2jq6wxCsWxLa7DYQTY3FcNoJiHbyjDmfZE4zkpISg00GsT1Hgy5cwXP0SutjGcjnURlTdnR1gcGlI7KmbRyfP_arMsTmdWmQABglBAGRpZQHV-3WZjrd1UKjCg3h81XbQM43c', 'title': 'treehugger.com'}}, {'type': 'url_citation', 'url_citation': {'start_index': 348, 'end_index': 479, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 481, 'end_index': 528, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 530, 'end_index': 723, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 725, 'end_index': 812, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 814, 'end_index': 918, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEikjifWl2Ho0XBpILK2GDuDb_yAvNWrhkvWaMWYUyDvmjDr-vgVI5dbqrVg7m4c1bLSB7UFDU-HwbhaKEz0Btj6xrm00GjjGDZuGro4FxG5v5xAzEFYTBAzvYBLXbX', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 920, 'end_index': 981, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQEOcyghAA8GOTgGxMjelz2bAQn5RrgyF6WKnKhjZ2HByNFaOM-WtZN-XtMXWLRls6quJSFyIk-flfzmfNWZYvQ1YacXTlvLHRcQ5E3IwKDrfMkGGh8hVX-Uchje967i3azI3-38CojrrRMry7FNriPmBXwym1572RM=', 'title': 'crittercarewildlife.org'}}]))], usage=Usage(completion_tokens=305, prompt_tokens=12, total_tokens=317, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "execution_count": null, @@ -2218,7 +2480,7 @@ } ], "source": [ - "r = list(c(smsg, stream=True, web_search_options={\"search_context_size\": \"low\"}))\n", + "r = list(c(smsg, ms[2], stream=True, web_search_options={\"search_context_size\": \"low\"}))\n", "cite_footnotes(r)\n", "stream_chunk_builder(r)" ] @@ -2279,7 +2541,7 @@ "outputs": [], "source": [ "#| export\n", - "_final_prompt = \"You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.\"" + "_final_prompt = dict(role=\"user\", content=\"You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.\")" ] }, { @@ -2322,7 +2584,7 @@ " cache_idxs = L(self.cache_idxs).filter().map(lambda o: o-1 if o>0 else o)\n", " else:\n", " cache_idxs = self.cache_idxs\n", - " if msg: self.hist = mk_msgs(self.hist+[msg], self.cache, cache_idxs, self.ttl)\n", + " if msg: self.hist = mk_msgs(self.hist+[msg], self.cache and 'claude' in self.model, cache_idxs, self.ttl)\n", " pf = [{\"role\":\"assistant\",\"content\":prefill}] if prefill else []\n", " return sp + self.hist + pf\n", "\n", @@ -2343,6 +2605,7 @@ " tools=self.tool_schemas, reasoning_effort = effort.get(think), tool_choice=tool_choice,\n", " # temperature is not supported when reasoning\n", " temperature=None if think else ifnone(temp,self.temp),\n", + " caching=self.cache and 'claude' not in self.model,\n", " **kwargs)\n", " if stream:\n", " if prefill: yield _mk_prefill(prefill)\n", @@ -2394,6 +2657,20 @@ " for r in self.h if hasattr(r, 'choices'))" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e9247ba", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "@patch\n", + "def print_hist(self:Chat):\n", + " \"Print each message on a different line\"\n", + " for r in self.hist: print(r, end='\\n\\n')" + ] + }, { "cell_type": "markdown", "id": "ce163563", @@ -2419,19 +2696,21 @@ { "data": { "text/markdown": [ - "Hey Rens! Nice to meet you. How can I help you today?\n", + "Hi Rens! It's nice to meet you.\n", + "\n", + "How can I help you today?\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=20, prompt_tokens=14, total_tokens=34, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=837, prompt_tokens=6, total_tokens=843, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=818, rejected_prediction_tokens=None, text_tokens=19, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=6, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hey Rens! Nice to meet you. How can I help you today?', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=20, prompt_tokens=14, total_tokens=34, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Hi Rens! It's nice to meet you.\\n\\nHow can I help you today?\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=837, prompt_tokens=6, total_tokens=843, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=818, rejected_prediction_tokens=None, text_tokens=19, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=6, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -2454,19 +2733,19 @@ { "data": { "text/markdown": [ - "Your name is Rens! You told me that when you introduced yourself.\n", + "Your name is Rens.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=19, prompt_tokens=42, total_tokens=61, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=234, prompt_tokens=30, total_tokens=264, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=229, rejected_prediction_tokens=None, text_tokens=5, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=30, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Your name is Rens! You told me that when you introduced yourself.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=19, prompt_tokens=42, total_tokens=61, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Your name is Rens.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=234, prompt_tokens=30, total_tokens=264, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=229, rejected_prediction_tokens=None, text_tokens=5, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=30, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -2498,9 +2777,9 @@ "data": { "text/plain": [ "[{'role': 'user', 'content': 'Hey my name is Rens'},\n", - " Message(content='Hey Rens! Nice to meet you. How can I help you today?', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}),\n", + " Message(content=\"Hi Rens! It's nice to meet you.\\n\\nHow can I help you today?\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None),\n", " {'role': 'user', 'content': 'Whats my name'},\n", - " Message(content='Your name is Rens! You told me that when you introduced yourself.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})]" + " Message(content='Your name is Rens.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None)]" ] }, "execution_count": null, @@ -2512,819 +2791,61 @@ "chat.hist" ] }, - { - "cell_type": "markdown", - "id": "8f38015b", - "metadata": {}, - "source": [ - "You can also pass an old chat history into new Chat objects:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d9f575f3", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "Your name is Rens! You've asked me a couple times now - is everything okay? 😊\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=26, prompt_tokens=70, total_tokens=96, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Your name is Rens! You've asked me a couple times now - is everything okay? 😊\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=26, prompt_tokens=70, total_tokens=96, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chat2 = Chat(model, hist=chat.hist)\n", - "chat2(\"What was my name again?\")" - ] - }, - { - "cell_type": "markdown", - "id": "36165660", - "metadata": {}, - "source": [ - "You can prefix an [OpenAI compatible model](https://docs.litellm.ai/docs/providers/openai_compatible) with 'openai/' and use an `api_base` and `api_key` argument to use models not registered with litellm.\n", - "\n", - "```python\n", - "import os, litellm\n", - "OPENROUTER_API_KEY = os.getenv(\"OPENROUTER_API_KEY\")\n", - "OPENROUTER_BASE_URL = \"https://openrouter.ai/api/v1\"\n", - "c = Chat(\"openai/gpt-oss-20b\", api_key=OPENROUTER_API_KEY, api_base=OPENROUTER_BASE_URL)\n", - "c(\"hi\")\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "26748132", - "metadata": {}, - "source": [ - "### Synthetic History Creation\n", - "\n", - "Lets build chat history step by step. That way we can tweak anything we need to during testing." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b8ef8d88", - "metadata": {}, - "outputs": [], - "source": [ - "pr = \"What is 5 + 7? Use the tool to calculate it.\"\n", - "c = Chat(model, tools=[simple_add])\n", - "res = c(pr)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b5ef0aeb", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "@patch\n", - "def print_hist(self:Chat):\n", - " \"Print each message on a different line\"\n", - " for r in self.hist: print(r, end='\\n\\n')" - ] - }, - { - "cell_type": "markdown", - "id": "bde51fc9", - "metadata": {}, - "source": [ - "Whereas normally without tools we would get one user input and one assistant response. Here we get two extra messages in between.\n", - "- An assistant message requesting the tools with arguments.\n", - "- A tool response with the result to the tool call." - ] - }, { "cell_type": "code", "execution_count": null, - "id": "49792a9c", + "id": "3d010ee1", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{'role': 'user', 'content': 'What is 5 + 7? Use the tool to calculate it.'}\n", + "{'role': 'user', 'content': 'Hey my name is Rens'}\n", "\n", - "Message(content=None, role='assistant', tool_calls=[{'index': 0, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_01T7s3BS197qHzgBKAiNV1uQ', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})\n", + "Message(content=\"Hi Rens! It's nice to meet you.\\n\\nHow can I help you today?\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None)\n", "\n", - "{'tool_call_id': 'toolu_01T7s3BS197qHzgBKAiNV1uQ', 'role': 'tool', 'name': 'simple_add', 'content': '12'}\n", + "{'role': 'user', 'content': 'Whats my name'}\n", "\n", - "{'role': 'assistant', 'content': 'You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.'}\n", - "\n", - "Message(content='\\n\\nThe result of 5 + 7 is **12**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})\n", + "Message(content='Your name is Rens.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None)\n", "\n" ] } ], "source": [ - "c.print_hist()" - ] - }, - { - "cell_type": "markdown", - "id": "ab2eb0a2", - "metadata": {}, - "source": [ - "Lets try to build this up manually so we have full control over the inputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a37a77b6", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def random_tool_id():\n", - " \"Generate a random tool ID with 'toolu_' prefix\"\n", - " random_part = ''.join(random.choices(string.ascii_letters + string.digits, k=25))\n", - " return f'toolu_{random_part}'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f4a0bd16", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'toolu_0UAqFzWsDK4FrUMp48Y3tT3QD'" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "random_tool_id()" - ] - }, - { - "cell_type": "markdown", - "id": "d22e52b7", - "metadata": {}, - "source": [ - "A tool call request can contain one more or more tool calls. Lets make one." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e00e88b4", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def mk_tc(func, args, tcid=None, idx=1):\n", - " if not tcid: tcid = random_tool_id()\n", - " return {'index': idx, 'function': {'arguments': args, 'name': func}, 'id': tcid, 'type': 'function'}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "324b9182", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'index': 1,\n", - " 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'},\n", - " 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7',\n", - " 'type': 'function'}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tc = mk_tc(simple_add.__name__, json.dumps(dict(a=5, b=7)))\n", - "tc" - ] - }, - { - "cell_type": "markdown", - "id": "97da6222", - "metadata": {}, - "source": [ - "This can then be packged into the full Message object produced by the assitant." + "chat.print_hist()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "436abceb", - "metadata": {}, - "outputs": [], - "source": [ - "def mk_tc_req(content, tcs): return Message(content=content, role='assistant', tool_calls=tcs, function_call=None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94c031e7", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Message(content=\"I'll use the simple_add tool to calculate 5 + 7 for you.\", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{\"a\": 5, \"b\": 7}', name='simple_add'), id='toolu_gAL47D1qXIaSyZPaE1pu1lJo7', type='function')], function_call=None, provider_specific_fields=None)" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tc_cts = \"I'll use the simple_add tool to calculate 5 + 7 for you.\"\n", - "tcq = mk_tc_req(tc_cts, [tc])\n", - "tcq" - ] - }, - { - "cell_type": "markdown", - "id": "0a1a0364", - "metadata": {}, - "source": [ - "Notice how Message instantiation creates a list of ChatCompletionMessageToolCalls by default. When the tools are executed this is converted back\n", - "to a dictionary, for consistency we want to keep these as dictionaries from the beginning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "00cebbbb", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def mk_tc_req(content, tcs):\n", - " msg = Message(content=content, role='assistant', tool_calls=tcs, function_call=None)\n", - " msg.tool_calls = [{**dict(tc), 'function': dict(tc['function'])} for tc in msg.tool_calls]\n", - " return msg" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0d3468d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Message(content=\"I'll use the simple_add tool to calculate 5 + 7 for you.\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7', 'type': 'function'}], function_call=None, provider_specific_fields=None)" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcq = mk_tc_req(tc_cts, [tc])\n", - "tcq" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b75dc3e7", - "metadata": {}, - "outputs": [], - "source": [ - "c = Chat(model, tools=[simple_add], hist=[pr, tcq])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bd673382", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'role': 'user', 'content': 'What is 5 + 7? Use the tool to calculate it.'}\n", - "\n", - "Message(content=\"I'll use the simple_add tool to calculate 5 + 7 for you.\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7', 'type': 'function'}], function_call=None, provider_specific_fields=None)\n", - "\n" - ] - } - ], - "source": [ - "c.print_hist()" - ] - }, - { - "cell_type": "markdown", - "id": "c490dcfb", - "metadata": {}, - "source": [ - "Looks good so far! Now we will want to provide the actual result!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59e69d43", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def mk_tc_result(tc, result): return {'tool_call_id': tc['id'], 'role': 'tool', 'name': tc['function']['name'], 'content': result}" - ] - }, - { - "cell_type": "markdown", - "id": "94067b82", - "metadata": {}, - "source": [ - "Note we might have more than one tool call if more than one was passed in, here we just will make one result." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "175b9d78", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'index': 1,\n", - " 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'},\n", - " 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7',\n", - " 'type': 'function'}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcq.tool_calls[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6f969e27", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'tool_call_id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7',\n", - " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '12'}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "mk_tc_result(tcq.tool_calls[0], '12')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5d8e695", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "def mk_tc_results(tcq, results): return [mk_tc_result(a,b) for a,b in zip(tcq.tool_calls, results)]" - ] - }, - { - "cell_type": "markdown", - "id": "90d8c658", - "metadata": {}, - "source": [ - "Same for here tcq.tool_calls will match the number of results passed in the results list." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bd6e2307", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Message(content=\"I'll use the simple_add tool to calculate 5 + 7 for you.\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7', 'type': 'function'}], function_call=None, provider_specific_fields=None)" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcq" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59c2f72e", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'tool_call_id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7',\n", - " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '12'}]" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcr = mk_tc_results(tcq, ['12'])\n", - "tcr" - ] - }, - { - "cell_type": "markdown", - "id": "608b90d2", - "metadata": {}, - "source": [ - "Now we can call it with this synthetic data to see what the response is!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "efed96b7", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "The result of 5 + 7 is **12**.\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=17, prompt_tokens=720, total_tokens=737, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='The result of 5 + 7 is **12**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=17, prompt_tokens=720, total_tokens=737, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "c(tcr[0])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "db8e06d6", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'role': 'user', 'content': 'What is 5 + 7? Use the tool to calculate it.'}\n", - "\n", - "Message(content=\"I'll use the simple_add tool to calculate 5 + 7 for you.\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7', 'type': 'function'}], function_call=None, provider_specific_fields=None)\n", - "\n", - "{'tool_call_id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7', 'role': 'tool', 'name': 'simple_add', 'content': '12'}\n", - "\n", - "Message(content='The result of 5 + 7 is **12**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})\n", - "\n" - ] - } - ], - "source": [ - "c.print_hist()" - ] - }, - { - "cell_type": "markdown", - "id": "56b6af73", - "metadata": {}, - "source": [ - "Lets try this again, but lets give it something that is clearly wrong for fun." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "01a9049c", - "metadata": {}, - "outputs": [], - "source": [ - "c = Chat(model, tools=[simple_add], hist=[pr, tcq])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59f546c0", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'tool_call_id': 'toolu_gAL47D1qXIaSyZPaE1pu1lJo7',\n", - " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '13'}]" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcr = mk_tc_results(tcq, ['13'])\n", - "tcr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f7befdf1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "The result of 5 + 7 is **12**.\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=17, prompt_tokens=720, total_tokens=737, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='The result of 5 + 7 is **12**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=17, prompt_tokens=720, total_tokens=737, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "c(tcr[0])" - ] - }, - { - "cell_type": "markdown", - "id": "84387429", - "metadata": {}, - "source": [ - "Lets make sure this works with multiple tool calls in the same assistant Message." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "027f9a15", - "metadata": {}, - "outputs": [], - "source": [ - "tcs = [\n", - " mk_tc(simple_add.__name__, json.dumps({\"a\": 5, \"b\": 7})), \n", - " mk_tc(simple_add.__name__, json.dumps({\"a\": 6, \"b\": 7})), \n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44baa92b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Message(content='I will calculate these for you!', role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_XBetF5gIRHYH7LKBKxJsllLOD', 'type': 'function'}, {'index': 1, 'function': {'arguments': '{\"a\": 6, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_fU25035HyRrY03K6JBO94XfLE', 'type': 'function'}], function_call=None, provider_specific_fields=None)" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tcq = mk_tc_req(\"I will calculate these for you!\", tcs)\n", - "tcq" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2abb6a8f", - "metadata": {}, - "outputs": [], - "source": [ - "tcr = mk_tc_results(tcq, ['12', '13'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "866aa31d", - "metadata": {}, - "outputs": [], - "source": [ - "c = Chat(model, tools=[simple_add], hist=[pr, tcq, tcr[0]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5a9d9ecd", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "The answer is **12**. 5 + 7 = 12.\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=21, prompt_tokens=812, total_tokens=833, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='The answer is **12**. 5 + 7 = 12.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=21, prompt_tokens=812, total_tokens=833, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "c(tcr[1])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee111193", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'role': 'user', 'content': 'What is 5 + 7? Use the tool to calculate it.'}\n", - "\n", - "Message(content='I will calculate these for you!', role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_XBetF5gIRHYH7LKBKxJsllLOD', 'type': 'function'}, {'index': 1, 'function': {'arguments': '{\"a\": 6, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_fU25035HyRrY03K6JBO94XfLE', 'type': 'function'}], function_call=None, provider_specific_fields=None)\n", - "\n", - "{'tool_call_id': 'toolu_XBetF5gIRHYH7LKBKxJsllLOD', 'role': 'tool', 'name': 'simple_add', 'content': '12'}\n", - "\n", - "{'tool_call_id': 'toolu_fU25035HyRrY03K6JBO94XfLE', 'role': 'tool', 'name': 'simple_add', 'content': '13'}\n", - "\n", - "Message(content='The answer is **12**. 5 + 7 = 12.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})\n", - "\n" - ] - } - ], - "source": [ - "c.print_hist()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e5b97b6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "\n", - "\n", - "The result of 5 + 3 is **8**.\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=18, prompt_tokens=742, total_tokens=760, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\nThe result of 5 + 3 is **8**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=18, prompt_tokens=742, total_tokens=760, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], + { + "cell_type": "markdown", + "id": "8f38015b", + "metadata": {}, "source": [ - "chat = Chat(ms[1], tools=[simple_add])\n", - "res = chat(\"What's 5 + 3? Use the `simple_add` tool.\")\n", - "res" + "You can also pass an old chat history into new Chat objects:" ] }, { "cell_type": "code", "execution_count": null, - "id": "6c84d6ef", + "id": "d9f575f3", "metadata": {}, "outputs": [ { "data": { "text/markdown": [ - "Here's a joke based on the number 8:\n", - "\n", - "Why was 6 afraid of 7?\n", - "\n", - "Because 7 8 (ate) 9!\n", - "\n", - "But since we got 8 as our answer, here's another one:\n", - "\n", - "What do you call an 8 that's been working out?\n", - "\n", - "An \"ate\" with great figure! 💪\n", - "\n", - "(Okay, that one was a bit of a stretch... much like the number 8 lying on its side to become the infinity symbol ∞!)\n", + "Your name is Rens.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=116, prompt_tokens=774, total_tokens=890, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=255, prompt_tokens=43, total_tokens=298, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=250, rejected_prediction_tokens=None, text_tokens=5, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=43, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Here\\'s a joke based on the number 8:\\n\\nWhy was 6 afraid of 7?\\n\\nBecause 7 8 (ate) 9!\\n\\nBut since we got 8 as our answer, here\\'s another one:\\n\\nWhat do you call an 8 that\\'s been working out?\\n\\nAn \"ate\" with great figure! 💪\\n\\n(Okay, that one was a bit of a stretch... much like the number 8 lying on its side to become the infinity symbol ∞!)', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=116, prompt_tokens=774, total_tokens=890, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Your name is Rens.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=255, prompt_tokens=43, total_tokens=298, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=250, rejected_prediction_tokens=None, text_tokens=5, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=43, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -3333,39 +2854,24 @@ } ], "source": [ - "res = chat(\"Now, tell me a joke based on that result.\")\n", - "res" + "chat2 = Chat(model, hist=chat.hist)\n", + "chat2(\"What was my name again?\")" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "d6a8bec1", + "cell_type": "markdown", + "id": "36165660", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'role': 'user', 'content': \"What's 5 + 3? Use the `simple_add` tool.\"},\n", - " Message(content=None, role='assistant', tool_calls=[{'index': 0, 'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'toolu_01VCv3KiHsdFmQWipWta5iPb', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}),\n", - " {'tool_call_id': 'toolu_01VCv3KiHsdFmQWipWta5iPb',\n", - " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '8'},\n", - " {'role': 'assistant',\n", - " 'content': 'You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.'},\n", - " Message(content='\\n\\nThe result of 5 + 3 is **8**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}),\n", - " {'role': 'user', 'content': 'Now, tell me a joke based on that result.'},\n", - " Message(content='Here\\'s a joke based on the number 8:\\n\\nWhy was 6 afraid of 7?\\n\\nBecause 7 8 (ate) 9!\\n\\nBut since we got 8 as our answer, here\\'s another one:\\n\\nWhat do you call an 8 that\\'s been working out?\\n\\nAn \"ate\" with great figure! 💪\\n\\n(Okay, that one was a bit of a stretch... much like the number 8 lying on its side to become the infinity symbol ∞!)', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})]" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "chat.hist" + "You can prefix an [OpenAI compatible model](https://docs.litellm.ai/docs/providers/openai_compatible) with 'openai/' and use an `api_base` and `api_key` argument to use models not registered with litellm.\n", + "\n", + "```python\n", + "import os, litellm\n", + "OPENROUTER_API_KEY = os.getenv(\"OPENROUTER_API_KEY\")\n", + "OPENROUTER_BASE_URL = \"https://openrouter.ai/api/v1\"\n", + "c = Chat(\"openai/gpt-oss-20b\", api_key=OPENROUTER_API_KEY, api_base=OPENROUTER_BASE_URL)\n", + "c(\"hi\")\n", + "```" ] }, { @@ -3385,32 +2891,28 @@ { "data": { "text/markdown": [ - "# Image Description\n", + "This image features an adorable **puppy** peeking out from behind a bush of **purple flowers**.\n", "\n", - "This adorable image shows a **Cavalier King Charles Spaniel puppy** with the classic Blenheim coloring (chestnut brown and white markings). \n", + "Here's a more detailed breakdown:\n", "\n", - "**Key features visible:**\n", - "- Large, expressive dark eyes\n", - "- Soft, fluffy ears with reddish-brown fur\n", - "- White blaze down the center of the face\n", - "- White and brown coat\n", - "- The puppy is lying on green grass\n", - "- Purple/lavender flowers (possibly asters) are visible in the background\n", - "- The puppy has a sweet, gentle expression typical of the breed\n", + "* **Main Subject:** A small, fluffy puppy, likely a Cavalier King Charles Spaniel or similar breed, with white fur and large patches of reddish-brown/tan on its floppy ears and around its eyes. It's lying down on green grass, looking directly at the viewer with big, dark, expressive eyes.\n", + "* **Foreground/Left:** A lush green bush covered in numerous small, delicate light purple or lavender flowers. The puppy is positioned as if it's emerging or hiding behind this bush.\n", + "* **Ground:** The puppy is resting on vibrant green grass.\n", + "* **Background (Right):** The background is softly blurred, showing hints of darker, possibly wooden, structures or furniture, suggesting an outdoor garden or patio setting.\n", "\n", - "The photo appears to be a professional or high-quality portrait shot, capturing the puppy's endearing features and innocent charm. The natural outdoor setting with flowers adds to the overall sweet and peaceful mood of the image.\n", + "The overall impression is one of cuteness and natural beauty.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-flash`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=178, prompt_tokens=105, total_tokens=283, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=998, prompt_tokens=264, total_tokens=1262, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=797, rejected_prediction_tokens=None, text_tokens=201, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=6, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"# Image Description\\n\\nThis adorable image shows a **Cavalier King Charles Spaniel puppy** with the classic Blenheim coloring (chestnut brown and white markings). \\n\\n**Key features visible:**\\n- Large, expressive dark eyes\\n- Soft, fluffy ears with reddish-brown fur\\n- White blaze down the center of the face\\n- White and brown coat\\n- The puppy is lying on green grass\\n- Purple/lavender flowers (possibly asters) are visible in the background\\n- The puppy has a sweet, gentle expression typical of the breed\\n\\nThe photo appears to be a professional or high-quality portrait shot, capturing the puppy's endearing features and innocent charm. The natural outdoor setting with flowers adds to the overall sweet and peaceful mood of the image.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=178, prompt_tokens=105, total_tokens=283, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"This image features an adorable **puppy** peeking out from behind a bush of **purple flowers**.\\n\\nHere's a more detailed breakdown:\\n\\n* **Main Subject:** A small, fluffy puppy, likely a Cavalier King Charles Spaniel or similar breed, with white fur and large patches of reddish-brown/tan on its floppy ears and around its eyes. It's lying down on green grass, looking directly at the viewer with big, dark, expressive eyes.\\n* **Foreground/Left:** A lush green bush covered in numerous small, delicate light purple or lavender flowers. The puppy is positioned as if it's emerging or hiding behind this bush.\\n* **Ground:** The puppy is resting on vibrant green grass.\\n* **Background (Right):** The background is softly blurred, showing hints of darker, possibly wooden, structures or furniture, suggesting an outdoor garden or patio setting.\\n\\nThe overall impression is one of cuteness and natural beauty.\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=998, prompt_tokens=264, total_tokens=1262, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=797, rejected_prediction_tokens=None, text_tokens=201, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=6, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -3419,7 +2921,7 @@ } ], "source": [ - "chat = Chat(ms[1])\n", + "chat = Chat(ms[2])\n", "chat(['Whats in this img?',img_fn.read_bytes()])" ] }, @@ -3448,21 +2950,21 @@ { "data": { "text/markdown": [ - "Your name is R E D A C T E D\n", + "I can't spell your name because I don't know what it is!\n", "\n", - "I don't actually know your name - you haven't told me what it is yet! If you'd like me to spell your name, please let me know what it is first.\n", + "If you tell me your name, I'd be happy to spell it for you.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-flash`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=47, prompt_tokens=16, total_tokens=63, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=323, prompt_tokens=4, total_tokens=327, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=287, rejected_prediction_tokens=None, text_tokens=36, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Your name is R E D A C T E D\\n\\nI don't actually know your name - you haven't told me what it is yet! If you'd like me to spell your name, please let me know what it is first.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=47, prompt_tokens=16, total_tokens=63, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"I can't spell your name because I don't know what it is!\\n\\nIf you tell me your name, I'd be happy to spell it for you.\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=323, prompt_tokens=4, total_tokens=327, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=287, rejected_prediction_tokens=None, text_tokens=36, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=4, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -3471,7 +2973,7 @@ } ], "source": [ - "chat = Chat(ms[1])\n", + "chat = Chat(ms[2])\n", "chat(\"Spell my name\",prefill=\"Your name is R E\")" ] }, @@ -3488,20 +2990,9 @@ "execution_count": null, "id": "dfbf54ca", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Message(content=\"Your name is R E D A C T E D\\n\\nI don't actually know your name - you haven't told me what it is yet! If you'd like me to spell your name, please let me know what it is first.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "chat.hist[-1]" + "# chat.hist[-1]" ] }, { @@ -3543,14 +3034,14 @@ "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", + "- model: `gemini-2.5-flash`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=17, prompt_tokens=11, total_tokens=28, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- usage: `Usage(completion_tokens=39, prompt_tokens=5, total_tokens=44, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='1, 2, 3, 4, 5', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=17, prompt_tokens=11, total_tokens=28, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='1, 2, 3, 4, 5', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=39, prompt_tokens=5, total_tokens=44, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "metadata": {}, @@ -3558,7 +3049,7 @@ } ], "source": [ - "chat = Chat(model)\n", + "chat = Chat(ms[2])\n", "stream_gen = chat(\"Count to 5\", stream=True)\n", "for chunk in stream_gen:\n", " if isinstance(chunk, ModelResponse): display(chunk)\n", @@ -3578,41 +3069,12 @@ "execution_count": null, "id": "834c058f", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Okay! 6, 7, 8, 9, 10" - ] - }, - { - "data": { - "text/markdown": [ - "Okay! 6, 7, 8, 9, 10\n", - "\n", - "
\n", - "\n", - "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", - "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=12, prompt_tokens=44, total_tokens=56, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", - "\n", - "
" - ], - "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Okay! 6, 7, 8, 9, 10', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=12, prompt_tokens=44, total_tokens=56, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ - "stream_gen = chat(\"Continue counting to 10\",\"Okay! 6, 7\",stream=True)\n", - "for chunk in stream_gen:\n", - " if isinstance(chunk, ModelResponse): display(chunk)\n", - " else: print(delta_text(chunk) or '',end='')" + "# stream_gen = chat(\"Continue counting to 10\",\"Okay! 6, 7\",stream=True)\n", + "# for chunk in stream_gen:\n", + "# if isinstance(chunk, ModelResponse): display(chunk)\n", + "# else: print(delta_text(chunk) or '',end='')" ] }, { @@ -3640,7 +3102,11 @@ { "data": { "text/plain": [ - "['gemini/gemini-2.5-flash', 'claude-sonnet-4-5', 'openai/gpt-4.1']" + "['gemini/gemini-3-pro-preview',\n", + " 'gemini/gemini-2.5-pro',\n", + " 'gemini/gemini-2.5-flash',\n", + " 'claude-sonnet-4-5',\n", + " 'openai/gpt-4.1']" ] }, "execution_count": null, @@ -3661,7 +3127,7 @@ { "data": { "text/markdown": [ - "**gemini/gemini-2.5-flash:**" + "**gemini/gemini-2.5-pro:**" ], "text/plain": [ "" @@ -3673,24 +3139,59 @@ { "data": { "text/markdown": [ + "I have successfully completed the goal.\n", "\n", - "The `simple_add` tool was used to calculate 5 + 3.\n", + "To find the sum of 5 + 3, I used the `simple_add` tool. I provided the inputs `a=5` and `b=3`. The tool processed these numbers and returned the result, which is 8.\n", "\n", - "The result is 8.The `simple_add` tool was used to calculate 5 + 3.\n", + "Therefore, 5 + 3 = 8.\n", "\n", - "The result is 8.\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=787, prompt_tokens=214, total_tokens=1001, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=716, rejected_prediction_tokens=None, text_tokens=71, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=214, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='I have successfully completed the goal.\\n\\nTo find the sum of 5 + 3, I used the `simple_add` tool. I provided the inputs `a=5` and `b=3`. The tool processed these numbers and returned the result, which is 8.\\n\\nTherefore, 5 + 3 = 8.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=787, prompt_tokens=214, total_tokens=1001, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=716, rejected_prediction_tokens=None, text_tokens=71, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=214, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "**gemini/gemini-2.5-flash:**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "I used the `simple_add` tool to calculate 5 + 3.\n", + "The tool returned the result 8.\n", + "\n", + "Therefore, 5 + 3 = 8.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", "- model: `gemini-2.5-flash`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=48, prompt_tokens=160, total_tokens=208, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=160, image_tokens=None))`\n", + "- usage: `Usage(completion_tokens=133, prompt_tokens=160, total_tokens=293, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=95, rejected_prediction_tokens=None, text_tokens=38, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=160, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\nThe `simple_add` tool was used to calculate 5 + 3.\\n\\nThe result is 8.The `simple_add` tool was used to calculate 5 + 3.\\n\\nThe result is 8.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=48, prompt_tokens=160, total_tokens=208, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=160, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='I used the `simple_add` tool to calculate 5 + 3.\\nThe tool returned the result 8.\\n\\nTherefore, 5 + 3 = 8.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=133, prompt_tokens=160, total_tokens=293, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=95, rejected_prediction_tokens=None, text_tokens=38, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=160, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -3711,28 +3212,32 @@ { "data": { "text/markdown": [ + "## Summary\n", "\n", + "I successfully completed the calculation using the `simple_add` tool.\n", "\n", - "**Answer: 5 + 3 = 8**\n", + "**Result: 5 + 3 = 8**\n", "\n", "**Explanation:**\n", - "I used the `simple_add` function with two parameters:\n", + "The `simple_add` function took two parameters:\n", "- `a = 5` (the first operand)\n", "- `b = 3` (the second operand)\n", "\n", - "The function performed the addition operation and returned the result of **8**, which is the correct sum of 5 and 3.\n", + "The function performed the addition operation and returned **8** as the result.\n", + "\n", + "The goal has been fully completed - no further work is needed.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", "- model: `claude-sonnet-4-5-20250929`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=93, prompt_tokens=765, total_tokens=858, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=110, prompt_tokens=770, total_tokens=880, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\n**Answer: 5 + 3 = 8**\\n\\n**Explanation:**\\nI used the `simple_add` function with two parameters:\\n- `a = 5` (the first operand)\\n- `b = 3` (the second operand)\\n\\nThe function performed the addition operation and returned the result of **8**, which is the correct sum of 5 and 3.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=93, prompt_tokens=765, total_tokens=858, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='## Summary\\n\\nI successfully completed the calculation using the `simple_add` tool.\\n\\n**Result: 5 + 3 = 8**\\n\\n**Explanation:**\\nThe `simple_add` function took two parameters:\\n- `a = 5` (the first operand)\\n- `b = 3` (the second operand)\\n\\nThe function performed the addition operation and returned **8** as the result.\\n\\nThe goal has been fully completed - no further work is needed.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=110, prompt_tokens=770, total_tokens=880, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" ] }, "metadata": {}, @@ -3753,21 +3258,25 @@ { "data": { "text/markdown": [ - "The result of 5 + 3 is 8.\n", + "I used the simple_add tool to calculate 5 + 3, and the result is 8.\n", "\n", - "Explanation: The simple_add tool takes two numbers (in this case, 5 and 3) and adds them together. The sum of 5 and 3 is 8.\n", + "Summary:\n", + "- I successfully completed the calculation using the tool.\n", + "- 5 + 3 = 8.\n", + "\n", + "No further work is needed for this task. If you have more calculations or questions, please let me know!\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", "- model: `gpt-4.1-2025-04-14`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=49, prompt_tokens=156, total_tokens=205, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", + "- usage: `Usage(completion_tokens=65, prompt_tokens=156, total_tokens=221, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='stop', index=0, message=Message(content='The result of 5 + 3 is 8.\\n\\nExplanation: The simple_add tool takes two numbers (in this case, 5 and 3) and adds them together. The sum of 5 and 3 is 8.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=49, prompt_tokens=156, total_tokens=205, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='stop', index=0, message=Message(content='I used the simple_add tool to calculate 5 + 3, and the result is 8.\\n\\nSummary:\\n- I successfully completed the calculation using the tool.\\n- 5 + 3 = 8.\\n\\nNo further work is needed for this task. If you have more calculations or questions, please let me know!', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=65, prompt_tokens=156, total_tokens=221, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" ] }, "metadata": {}, @@ -3775,7 +3284,7 @@ } ], "source": [ - "for m in ms:\n", + "for m in ms[1:]:\n", " display(Markdown(f'**{m}:**'))\n", " chat = Chat(m, tools=[simple_add])\n", " res = chat(\"What's 5 + 3? Use the `simple_add` tool. Explain.\")\n", @@ -3801,20 +3310,20 @@ "text/markdown": [ "\n", "\n", - "🔧 simple_add({\"a\": 5, \"b\": 3})\n", + "🔧 simple_add({\"b\": 3, \"a\": 5})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=120, prompt_tokens=639, total_tokens=759, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=39, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=161, prompt_tokens=74, total_tokens=235, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=141, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=74, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'toolu_01U52aTJmhJ5ziQXjr9xwr5S', 'type': 'function'}], function_call=None, reasoning_content='The user is asking me to add 5 and 3. I have access to a simple_add function that can do this. Let me call it with a=5 and b=3.', thinking_blocks=[{'type': 'thinking', 'thinking': 'The user is asking me to add 5 and 3. I have access to a simple_add function that can do this. Let me call it with a=5 and b=3.', 'signature': 'EqgCCkYIChgCKkBb/qeb9ImWx6toh7lcTqzaOM9Iuoh9xlUozf6xH/aThr53E0jZhXFGGLda5tAjpPIzS1anKsbeeMI4M1Q4I+a1EgxleWqGZl0PdprTPwQaDB+3HcdK9yWXWoWxYyIwHegVAFYVJbXiID+ieHFViQf6Z+jKHMvfil5GQ1rcCre+P3pJ+fQudt58r1MJzYrVKo8BUIcj9IE8DoIy7WcOXU/ZrF12uXTn99vLLwRSej7QSekhXE4nZf55e5/kw41X/FXEkY2Hj20xCrSoBH8dDgJ800z6joF5GnZlyX+0+jEo0oHgzjvd1Za7Uu548/lQ2ODKVEIl4kn1NqYajUUJTzfGic4R3/uQbmR7nn0nVqmMLhufYa3xdYW/hj3Xcbl55oYYAQ=='}], provider_specific_fields={'citations': None, 'thinking_blocks': [{'type': 'thinking', 'thinking': 'The user is asking me to add 5 and 3. I have access to a simple_add function that can do this. Let me call it with a=5 and b=3.', 'signature': 'EqgCCkYIChgCKkBb/qeb9ImWx6toh7lcTqzaOM9Iuoh9xlUozf6xH/aThr53E0jZhXFGGLda5tAjpPIzS1anKsbeeMI4M1Q4I+a1EgxleWqGZl0PdprTPwQaDB+3HcdK9yWXWoWxYyIwHegVAFYVJbXiID+ieHFViQf6Z+jKHMvfil5GQ1rcCre+P3pJ+fQudt58r1MJzYrVKo8BUIcj9IE8DoIy7WcOXU/ZrF12uXTn99vLLwRSej7QSekhXE4nZf55e5/kw41X/FXEkY2Hj20xCrSoBH8dDgJ800z6joF5GnZlyX+0+jEo0oHgzjvd1Za7Uu548/lQ2ODKVEIl4kn1NqYajUUJTzfGic4R3/uQbmR7nn0nVqmMLhufYa3xdYW/hj3Xcbl55oYYAQ=='}]}))], usage=Usage(completion_tokens=120, prompt_tokens=639, total_tokens=759, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=39, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'provider_specific_fields': {'thought_signature': 'CuADAXLI2nyWO0CQDly8P3vsYo3Yc3eyV4gDjYb9yvQq7t//BS2bq6LWPXkVwXQTIju4kxHkgd4HdjaBPmAY5qxePsaO+31uqUH2jPAQBjn2pQxigo91xFkIyHXWgquKKmLCWRmr3ozZ33OHRgc82C4/KxoIFqL2VQHh9k5Gz92yk4OvQbIj+BCw0F88lG/t/Azh1VmLBJfai1tgdSP2AeXOmxqvW7do7hwqXu/THDwajeHSk4t2pb2s92QVhumfcXTEuDvcjZh25BeTDVmtxRKSSGS4rFzxJIN5e+J1F9Gw28b658P7XXjU/JAOzv397F3vePR5bdLH0m0MEZZcZBog61E3P/HW2dUUnVbRemKfFvWxg4GIzXTmEZ2PlvIEhvJyApQwbED3BakRNOznb34qwBODtT3Sni642obxsLym3+SPyzwh8qapiZmjtxfIVZJ7Y6+TTQkw3c9lCjiksxqnmaA6XTtNpShsBx8qjGLHSduJkpzoeLHJWE9jZZNeKonvuH5m78EvCu8BeDi0z/LbaERbs/XCpvOfaqtOSfrvnrCzsF1ZnTgDDIv5V/+dbPxiQZaf8hqqjEVuhPlJ/jSL98qjDdBZzogunT2It7WuB+1wxVMESmE0OjZveJt/+eMD'}, 'function': {'arguments': '{\"b\": 3, \"a\": 5}', 'name': 'simple_add'}, 'id': 'call_471bb1c91f3c4eaba1d4a8428b28__thought__CuADAXLI2nyWO0CQDly8P3vsYo3Yc3eyV4gDjYb9yvQq7t//BS2bq6LWPXkVwXQTIju4kxHkgd4HdjaBPmAY5qxePsaO+31uqUH2jPAQBjn2pQxigo91xFkIyHXWgquKKmLCWRmr3ozZ33OHRgc82C4/KxoIFqL2VQHh9k5Gz92yk4OvQbIj+BCw0F88lG/t/Azh1VmLBJfai1tgdSP2AeXOmxqvW7do7hwqXu/THDwajeHSk4t2pb2s92QVhumfcXTEuDvcjZh25BeTDVmtxRKSSGS4rFzxJIN5e+J1F9Gw28b658P7XXjU/JAOzv397F3vePR5bdLH0m0MEZZcZBog61E3P/HW2dUUnVbRemKfFvWxg4GIzXTmEZ2PlvIEhvJyApQwbED3BakRNOznb34qwBODtT3Sni642obxsLym3+SPyzwh8qapiZmjtxfIVZJ7Y6+TTQkw3c9lCjiksxqnmaA6XTtNpShsBx8qjGLHSduJkpzoeLHJWE9jZZNeKonvuH5m78EvCu8BeDi0z/LbaERbs/XCpvOfaqtOSfrvnrCzsF1ZnTgDDIv5V/+dbPxiQZaf8hqqjEVuhPlJ/jSL98qjDdBZzogunT2It7WuB+1wxVMESmE0OjZveJt/+eMD', 'type': 'function'}], function_call=None, images=[], reasoning_content='**My Reasoning on this Simple Addition Problem**\\n\\nOkay, so I\\'m looking at this problem and it\\'s pretty straightforward. First, the user\\'s asking about addition, which immediately points me towards the `simple_add` tool. Seems like a good fit for this task.\\n\\nNext, I need to understand the inputs. The user gives me two numbers: 5 and 3. I quickly see that `simple_add` takes two parameters, conveniently named `a` and `b`. I can easily map the user\\'s \"5\" to the parameter `a` and the user\\'s \"3\" to the parameter `b`.\\n\\nFinally, to get the answer, I need to execute the addition. I know I can call the function with the assigned parameters. So I\\'ll just formulate the correct function call: `print(simple_add(a=5, b=3))` should do the trick. That\\'ll give me the sum. Easy peasy.\\n', thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"simple_add\", \"args\": {\"b\": 3, \"a\": 5}}}', 'signature': 'CuADAXLI2nyWO0CQDly8P3vsYo3Yc3eyV4gDjYb9yvQq7t//BS2bq6LWPXkVwXQTIju4kxHkgd4HdjaBPmAY5qxePsaO+31uqUH2jPAQBjn2pQxigo91xFkIyHXWgquKKmLCWRmr3ozZ33OHRgc82C4/KxoIFqL2VQHh9k5Gz92yk4OvQbIj+BCw0F88lG/t/Azh1VmLBJfai1tgdSP2AeXOmxqvW7do7hwqXu/THDwajeHSk4t2pb2s92QVhumfcXTEuDvcjZh25BeTDVmtxRKSSGS4rFzxJIN5e+J1F9Gw28b658P7XXjU/JAOzv397F3vePR5bdLH0m0MEZZcZBog61E3P/HW2dUUnVbRemKfFvWxg4GIzXTmEZ2PlvIEhvJyApQwbED3BakRNOznb34qwBODtT3Sni642obxsLym3+SPyzwh8qapiZmjtxfIVZJ7Y6+TTQkw3c9lCjiksxqnmaA6XTtNpShsBx8qjGLHSduJkpzoeLHJWE9jZZNeKonvuH5m78EvCu8BeDi0z/LbaERbs/XCpvOfaqtOSfrvnrCzsF1ZnTgDDIv5V/+dbPxiQZaf8hqqjEVuhPlJ/jSL98qjDdBZzogunT2It7WuB+1wxVMESmE0OjZveJt/+eMD'}], provider_specific_fields=None))], usage=Usage(completion_tokens=161, prompt_tokens=74, total_tokens=235, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=141, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=74, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -3823,7 +3332,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01U52aTJmhJ5ziQXjr9xwr5S',\n", + "{'tool_call_id': 'call_471bb1c91f3c4eaba1d4a8428b28__thought__CuADAXLI2nyWO0CQDly8P3vsYo3Yc3eyV4gDjYb9yvQq7t//BS2bq6LWPXkVwXQTIju4kxHkgd4HdjaBPmAY5qxePsaO+31uqUH2jPAQBjn2pQxigo91xFkIyHXWgquKKmLCWRmr3ozZ33OHRgc82C4/KxoIFqL2VQHh9k5Gz92yk4OvQbIj+BCw0F88lG/t/Azh1VmLBJfai1tgdSP2AeXOmxqvW7do7hwqXu/THDwajeHSk4t2pb2s92QVhumfcXTEuDvcjZh25BeTDVmtxRKSSGS4rFzxJIN5e+J1F9Gw28b658P7XXjU/JAOzv397F3vePR5bdLH0m0MEZZcZBog61E3P/HW2dUUnVbRemKfFvWxg4GIzXTmEZ2PlvIEhvJyApQwbED3BakRNOznb34qwBODtT3Sni642obxsLym3+SPyzwh8qapiZmjtxfIVZJ7Y6+TTQkw3c9lCjiksxqnmaA6XTtNpShsBx8qjGLHSduJkpzoeLHJWE9jZZNeKonvuH5m78EvCu8BeDi0z/LbaERbs/XCpvOfaqtOSfrvnrCzsF1ZnTgDDIv5V/+dbPxiQZaf8hqqjEVuhPlJ/jSL98qjDdBZzogunT2It7WuB+1wxVMESmE0OjZveJt/+eMD',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '8'}" @@ -3835,21 +3344,19 @@ { "data": { "text/markdown": [ - "\n", - "\n", - "5 + 3 = **8**\n", + "Based on my tool use, the sum of 5 and 3 is 8.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=14, prompt_tokens=812, total_tokens=826, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=476, prompt_tokens=361, total_tokens=837, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=458, rejected_prediction_tokens=None, text_tokens=18, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=361, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\n5 + 3 = **8**', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=14, prompt_tokens=812, total_tokens=826, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Based on my tool use, the sum of 5 and 3 is 8.', role='assistant', tool_calls=None, function_call=None, images=[], reasoning_content='**Summarizing My Reasoning**\\n\\nOkay, so the user wants a summary of what just happened. Let\\'s see... first, I need to recognize the user\\'s implicit request for a recap. They want a concise explanation of how I arrived at the answer. Let\\'s dig in. I see the user asked \"What\\'s 5 + 3?\" My call to the `simple_add` tool was quite straightforward: `simple_add(a=5, b=3)`, and the tool spit back `8`.\\n\\nMy analysis is simple: the user presented an elementary math problem. I leveraged a dedicated tool, the `simple_add` function, to do the heavy lifting of the computation. That function then took the input `5` and `3`, and the output was `8`, which is the correct sum.\\n\\nNow to formulate the summary. Since this is an expert level interaction, I\\'m thinking the user wants a no-nonsense, technically correct summary. The core information to convey is the question, the tool used, and the result. However, the tool use isn\\'t strictly necessary here. Given the brevity of the process, a concise statement like \"The sum of 5 and 3 is 8\" is the ideal answer. This is direct, accurate, and completely fulfills the objective. Plus, the goal was achieved, so no further explanation is necessary.\\n', thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=476, prompt_tokens=361, total_tokens=837, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=458, rejected_prediction_tokens=None, text_tokens=18, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=361, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -3857,7 +3364,7 @@ } ], "source": [ - "chat = Chat(model, tools=[simple_add])\n", + "chat = Chat(ms[1], tools=[simple_add])\n", "res = chat(\"What's 5 + 3?\",think='l',return_all=True)\n", "display(*res)" ] @@ -3880,33 +3387,37 @@ "name": "stdout", "output_type": "stream", "text": [ - "Otters are carnivorous mammals in the subfamily Lutrinae, part of the weasel family. There are 14 extant otter species, all semiaquatic, found on every continent except Australia and Antarctica.\n", + "Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. A member of the weasel family, there are 13 different species of otters found in various aquatic habitats around the world.\n", + "\n", + "Key characteristics of otters include their long, slender bodies, short legs with powerful webbed feet for swimming, and a strong tail that helps them move through the water. They are also distinguished by their very dense fur, which traps air to keep them warm and buoyant, as they lack a layer of blubber for insulation like other marine mammals. In fact, sea otters have the thickest fur of any animal.\n", "\n", - "They have long, slim bodies with powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\n", + "Their diet consists mainly of fish, but can also include frogs, crayfish, and crabs. Otters are a keystone species, meaning they play a critical role in their ecosystem, such as controlling sea urchin populations which in turn protects kelp forests.\n", "\n", - "All otters are expert hunters that eat fish, crustaceans, and other critters. They're known for being playful animals, engaging in activities like sliding into water on natural slides. Otters live up to 16 years, and their young stay with their mothers for about a year." + "Otters can be found in a variety of environments, including freshwater rivers, lakes, and marshes, as well as coastal marine habitats. They build dens, known as holts, in riverbanks or under tree roots. While some otter populations have faced declines, reintroduction programs have been successful in some areas." ] }, { "data": { "text/markdown": [ - "Otters are [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") carnivorous mammals in the subfamily Lutrinae, part of the weasel family. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") There are 14 extant otter species, all semiaquatic, found on every continent except Australia and Antarctica.\n", + "Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. A member of the weasel family, there are 13 different species of otters found in various aquatic habitats around the world.\n", "\n", - "[*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") They have long, slim bodies with powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\n", + "Key characteristics of otters include their long, slender bodies, short legs with powerful webbed feet for swimming, and a strong tail that helps them move through the water. They are also distinguished by their very dense fur, which traps air to keep them warm and buoyant, as they lack a layer of blubber for insulation like other marine mammals. In fact, sea otters have the thickest fur of any animal.\n", "\n", - "[*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") All otters are expert hunters that eat fish, crustaceans, and other critters. They're known for being [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") playful animals, engaging in activities like sliding into water on natural slides. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") Otters live up to 16 years, and their young stay with their mothers for about a year.\n", + "Their diet consists mainly of fish, but can also include frogs, crayfish, and crabs. Otters are a keystone species, meaning they play a critical role in their ecosystem, such as controlling sea urchin populations which in turn protects kelp forests.\n", + "\n", + "Otters can be found in a variety of environments, including freshwater rivers, lakes, and marshes, as well as coastal marine habitats. They build dens, known as holts, in riverbanks or under tree roots. While some otter populations have faced declines, reintroduction programs have been successful in some areas.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=342, prompt_tokens=13761, total_tokens=14103, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", + "- usage: `Usage(completion_tokens=411, prompt_tokens=12, total_tokens=423, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None)`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Otters are [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") carnivorous mammals in the subfamily Lutrinae, part of the weasel family. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") There are 14 extant otter species, all semiaquatic, found on every continent except Australia and Antarctica.\\n\\n[*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") They have long, slim bodies with powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\\n\\n[*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") All otters are expert hunters that eat fish, crustaceans, and other critters. They\\'re known for being [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") playful animals, engaging in activities like sliding into water on natural slides. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") Otters live up to 16 years, and their young stay with their mothers for about a year.', role='assistant', tool_calls=[], function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=342, prompt_tokens=13761, total_tokens=14103, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Otters are carnivorous mammals known for their playful behavior and adaptations to a semi-aquatic life. A member of the weasel family, there are 13 different species of otters found in various aquatic habitats around the world.\\n\\nKey characteristics of otters include their long, slender bodies, short legs with powerful webbed feet for swimming, and a strong tail that helps them move through the water. They are also distinguished by their very dense fur, which traps air to keep them warm and buoyant, as they lack a layer of blubber for insulation like other marine mammals. In fact, sea otters have the thickest fur of any animal.\\n\\nTheir diet consists mainly of fish, but can also include frogs, crayfish, and crabs. Otters are a keystone species, meaning they play a critical role in their ecosystem, such as controlling sea urchin populations which in turn protects kelp forests.\\n\\nOtters can be found in a variety of environments, including freshwater rivers, lakes, and marshes, as well as coastal marine habitats. They build dens, known as holts, in riverbanks or under tree roots. While some otter populations have faced declines, reintroduction programs have been successful in some areas.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None, annotations=[{'type': 'url_citation', 'url_citation': {'start_index': 104, 'end_index': 227, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH5_gxxY7d1_3GK3lPurE3-AI0487RmhHqt3hzDN1NnYk2b3DScF7_6Wi3_-O4c_HGoczctHH_VwM7_iyUjTRA7B3Dtslpi5Vz7aqB5S_M3sPStnh--hqj_dH9SL7Q8JWOgYr0MtHkSjYqxmu1NfN1tWB1PDK8vnS3doYEVah_PKao=', 'title': 'doi.gov'}}, {'type': 'url_citation', 'url_citation': {'start_index': 229, 'end_index': 403, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGkX41Wyp8epNwOW65lhQ-7iOla1ORY5hBQSIxt0oY6IpBM9omX0XKJTLzkuEHJBvQBjlsXLISjX7m-dKXwpMC1VaYUmawxMP6fJ7RucjDEhRvxEzOvWkK9DIYMvws=', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 404, 'end_index': 577, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGkX41Wyp8epNwOW65lhQ-7iOla1ORY5hBQSIxt0oY6IpBM9omX0XKJTLzkuEHJBvQBjlsXLISjX7m-dKXwpMC1VaYUmawxMP6fJ7RucjDEhRvxEzOvWkK9DIYMvws=', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 578, 'end_index': 634, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH5_gxxY7d1_3GK3lPurE3-AI0487RmhHqt3hzDN1NnYk2b3DScF7_6Wi3_-O4c_HGoczctHH_VwM7_iyUjTRA7B3Dtslpi5Vz7aqB5S_M3sPStnh--hqj_dH9SL7Q8JWOgYr0MtHkSjYqxmu1NfN1tWB1PDK8vnS3doYEVah_PKao=', 'title': 'doi.gov'}}, {'type': 'url_citation', 'url_citation': {'start_index': 636, 'end_index': 720, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQGkX41Wyp8epNwOW65lhQ-7iOla1ORY5hBQSIxt0oY6IpBM9omX0XKJTLzkuEHJBvQBjlsXLISjX7m-dKXwpMC1VaYUmawxMP6fJ7RucjDEhRvxEzOvWkK9DIYMvws=', 'title': 'wikipedia.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 721, 'end_index': 885, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQH5_gxxY7d1_3GK3lPurE3-AI0487RmhHqt3hzDN1NnYk2b3DScF7_6Wi3_-O4c_HGoczctHH_VwM7_iyUjTRA7B3Dtslpi5Vz7aqB5S_M3sPStnh--hqj_dH9SL7Q8JWOgYr0MtHkSjYqxmu1NfN1tWB1PDK8vnS3doYEVah_PKao=', 'title': 'doi.gov'}}, {'type': 'url_citation', 'url_citation': {'start_index': 887, 'end_index': 1021, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHqQTGheYeURsOfADjIFXqIczhpXYC1zvv5OnfQYNGtQlwXHxG1eHC3zdc5CybwdYz5lu9uJ-jbgbZ0kf1oxxSTqO2It1mHso5uAWtOwtv5wyvyXGtAj7bp2k8gfNQh99jypyPDAsqgdTTxErPVZ-9yZ_p3PTqY7Q==', 'title': 'crittercarewildlife.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 1022, 'end_index': 1089, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHqQTGheYeURsOfADjIFXqIczhpXYC1zvv5OnfQYNGtQlwXHxG1eHC3zdc5CybwdYz5lu9uJ-jbgbZ0kf1oxxSTqO2It1mHso5uAWtOwtv5wyvyXGtAj7bp2k8gfNQh99jypyPDAsqgdTTxErPVZ-9yZ_p3PTqY7Q==', 'title': 'crittercarewildlife.org'}}, {'type': 'url_citation', 'url_citation': {'start_index': 1090, 'end_index': 1199, 'url': 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AUZIYQHqQTGheYeURsOfADjIFXqIczhpXYC1zvv5OnfQYNGtQlwXHxG1eHC3zdc5CybwdYz5lu9uJ-jbgbZ0kf1oxxSTqO2It1mHso5uAWtOwtv5wyvyXGtAj7bp2k8gfNQh99jypyPDAsqgdTTxErPVZ-9yZ_p3PTqY7Q==', 'title': 'crittercarewildlife.org'}}]))], usage=Usage(completion_tokens=411, prompt_tokens=12, total_tokens=423, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))" ] }, "metadata": {}, @@ -3914,7 +3425,7 @@ } ], "source": [ - "chat = Chat(model)\n", + "chat = Chat(ms[1])\n", "res = chat(\"Search the web and tell me very briefly about otters\", search='l', stream=True)\n", "for o in res:\n", " if isinstance(o, ModelResponse): sleep(0.01); display(o)\n", @@ -3946,24 +3457,22 @@ { "data": { "text/markdown": [ - "I'll solve this step by step using the addition function.\n", "\n", - "**Step 1:** First, let me calculate 5 + 3\n", "\n", - "🔧 simple_add({\"a\": 5, \"b\": 3})\n", + "🔧 simple_add({\"b\": 3, \"a\": 5})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=100, prompt_tokens=617, total_tokens=717, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=421, prompt_tokens=83, total_tokens=504, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=401, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=83, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I'll solve this step by step using the addition function.\\n\\n**Step 1:** First, let me calculate 5 + 3\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'toolu_01W1CJsg3A4DqUrtHpS9mGqs', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=100, prompt_tokens=617, total_tokens=717, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'provider_specific_fields': {'thought_signature': 'Cq8JAXLI2nyI1OmWr9kdtN0PS13ZhCWAfCRxf4MjaoInzCR/bwzqgYAwIMWbSpvvcpsFOeCp0BVKgD612WkQS5UYEsLGebguKY7QxSm/sKX+hVaRfySu/INlef29rks6OJ6Jl7KyH4ux+v0h0RoRtv5mPARXfQArRpO4Z41Mp51KSqJY7f4HcYdfVt3ORp1IZPBFfZ5YTvtyT194V1FWhzRxc5fagOwt8LP22c+6ppmE8tNQMojPgX3KbaMIEisu5FRXd09A43JEUOz2hdLTijvbwvGtDtVLcujOSgJ88sIZr7q8cQBYNXV+jygwYH60XakR+zifT6vdzzYF0QQTXT4fCtriZdxRLh63alrvbVQHTTBHwhrQjz5pgMZk3IvgoFpsZbjhfBadJIUC0k5mvFCkQ/ehQdUgnNlzIy57+PchqAnpeoZ/L3ZpGdi3C3wwrcjwj5nxu9lktSWlv7A4XzC7SWMMQIfGVbMuCEOLcOaJlri2ZWqVBepG+eY5qYZ6wfiyx/XF4VyWPHhnmmqX3X97zBulVf7wRWG0yeiTHdPAbPH6s8H3qrbVdJvX1TEZdt5nMDgCxa7hXiMc9uLfE1UDMQS/h4I01Pe8gCn9feHZXizrk3GP2FMfkdO1v0vp3xjzDroGzbWe75ecqrH+dD1zpwC3dBL3kgi3e2wJsiKars+Vjnd1VRhMyylgh+q1bDo1q82NzkeXmrnRpjEE99IYxlAL8id89BVv2t2fMuBxu740uFjNW/pNlg47Yv78XM0oscqfgbj2jtm2ulBRqb7gKtoaDMeyhhzxms1medlJJ2TLSptzJuNEdrPpumKOlE0lDnYfU58QPp0fM7tGp/cApMC1nLkSsLKyra9Y76G4nTA8nTlYt4+C1a1i1rsZ3BLorfEhl0+jo/3yWxxlB2gisk5W5kwZSPZgkkFjYARadruhc8spE+VV8BOw7A548HDxvlm/kFe9AXfIx+sckdYu+r112QcJq77KL5fZAQlCGOT1h33jIW+Z0HmIlSMyIrn0eCQUwZH3s3e7unFIyKKI2DwF5huUAhFgWf8NdCqNxxuMs3wFPWw1oI6h8iMwbSUyejMc/V5gswDUU1I7Q+9LMRq5C2mrPjAQX8PdcngApiSzbODXmIPSKeXjctC0mlDZ5v622LRsgA++UDQ5L+MPpqJRzaVMXL0VsK8OQzpBpZ81dhV2QxdsHyd1vqjQ772sqF/AFmW0FBZyWooXEKMK063PBLBlTMTYT56b9C7FiA6JhqjgZkSiF+7UIuBDuAot7r/eayENYEBbI8IiDbqmvHBLqDAcnV9Wdw4iLlZzcqj/c/h4/R9NoSxOrAhaZuTwpGrZsbCaUCou7KTST4ASdvLeB1jk2h96dNLQw5ns1IHkmOVVf0kC0kVzcvIzKB3M7UpbOOCFDnsjODLNQqL3RLSwj/b+l03X6U55AViorZe9KipS59/X8YKMKQygPEY6a1d4y5EP0yLHJWPARA4Qc/iWYWA1gJAvprYchS1OoiF+2gPCFUmmTL30tbVJ1dsf7SGaGgGiPAFWvWP4/nRGG2Xu9XlwW1ZhSpU8XRMkKOVLRp30jsObCZXMkkc8a9U='}, 'function': {'arguments': '{\"b\": 3, \"a\": 5}', 'name': 'simple_add'}, 'id': 'call_cc7b6042491049d087b5b2c52b5b__thought__Cq8JAXLI2nyI1OmWr9kdtN0PS13ZhCWAfCRxf4MjaoInzCR/bwzqgYAwIMWbSpvvcpsFOeCp0BVKgD612WkQS5UYEsLGebguKY7QxSm/sKX+hVaRfySu/INlef29rks6OJ6Jl7KyH4ux+v0h0RoRtv5mPARXfQArRpO4Z41Mp51KSqJY7f4HcYdfVt3ORp1IZPBFfZ5YTvtyT194V1FWhzRxc5fagOwt8LP22c+6ppmE8tNQMojPgX3KbaMIEisu5FRXd09A43JEUOz2hdLTijvbwvGtDtVLcujOSgJ88sIZr7q8cQBYNXV+jygwYH60XakR+zifT6vdzzYF0QQTXT4fCtriZdxRLh63alrvbVQHTTBHwhrQjz5pgMZk3IvgoFpsZbjhfBadJIUC0k5mvFCkQ/ehQdUgnNlzIy57+PchqAnpeoZ/L3ZpGdi3C3wwrcjwj5nxu9lktSWlv7A4XzC7SWMMQIfGVbMuCEOLcOaJlri2ZWqVBepG+eY5qYZ6wfiyx/XF4VyWPHhnmmqX3X97zBulVf7wRWG0yeiTHdPAbPH6s8H3qrbVdJvX1TEZdt5nMDgCxa7hXiMc9uLfE1UDMQS/h4I01Pe8gCn9feHZXizrk3GP2FMfkdO1v0vp3xjzDroGzbWe75ecqrH+dD1zpwC3dBL3kgi3e2wJsiKars+Vjnd1VRhMyylgh+q1bDo1q82NzkeXmrnRpjEE99IYxlAL8id89BVv2t2fMuBxu740uFjNW/pNlg47Yv78XM0oscqfgbj2jtm2ulBRqb7gKtoaDMeyhhzxms1medlJJ2TLSptzJuNEdrPpumKOlE0lDnYfU58QPp0fM7tGp/cApMC1nLkSsLKyra9Y76G4nTA8nTlYt4+C1a1i1rsZ3BLorfEhl0+jo/3yWxxlB2gisk5W5kwZSPZgkkFjYARadruhc8spE+VV8BOw7A548HDxvlm/kFe9AXfIx+sckdYu+r112QcJq77KL5fZAQlCGOT1h33jIW+Z0HmIlSMyIrn0eCQUwZH3s3e7unFIyKKI2DwF5huUAhFgWf8NdCqNxxuMs3wFPWw1oI6h8iMwbSUyejMc/V5gswDUU1I7Q+9LMRq5C2mrPjAQX8PdcngApiSzbODXmIPSKeXjctC0mlDZ5v622LRsgA++UDQ5L+MPpqJRzaVMXL0VsK8OQzpBpZ81dhV2QxdsHyd1vqjQ772sqF/AFmW0FBZyWooXEKMK063PBLBlTMTYT56b9C7FiA6JhqjgZkSiF+7UIuBDuAot7r/eayENYEBbI8IiDbqmvHBLqDAcnV9Wdw4iLlZzcqj/c/h4/R9NoSxOrAhaZuTwpGrZsbCaUCou7KTST4ASdvLeB1jk2h96dNLQw5ns1IHkmOVVf0kC0kVzcvIzKB3M7UpbOOCFDnsjODLNQqL3RLSwj/b+l03X6U55AViorZe9KipS59/X8YKMKQygPEY6a1d4y5EP0yLHJWPARA4Qc/iWYWA1gJAvprYchS1OoiF+2gPCFUmmTL30tbVJ1dsf7SGaGgGiPAFWvWP4/nRGG2Xu9XlwW1ZhSpU8XRMkKOVLRp30jsObCZXMkkc8a9U=', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"simple_add\", \"args\": {\"b\": 3, \"a\": 5}}}', 'signature': 'Cq8JAXLI2nyI1OmWr9kdtN0PS13ZhCWAfCRxf4MjaoInzCR/bwzqgYAwIMWbSpvvcpsFOeCp0BVKgD612WkQS5UYEsLGebguKY7QxSm/sKX+hVaRfySu/INlef29rks6OJ6Jl7KyH4ux+v0h0RoRtv5mPARXfQArRpO4Z41Mp51KSqJY7f4HcYdfVt3ORp1IZPBFfZ5YTvtyT194V1FWhzRxc5fagOwt8LP22c+6ppmE8tNQMojPgX3KbaMIEisu5FRXd09A43JEUOz2hdLTijvbwvGtDtVLcujOSgJ88sIZr7q8cQBYNXV+jygwYH60XakR+zifT6vdzzYF0QQTXT4fCtriZdxRLh63alrvbVQHTTBHwhrQjz5pgMZk3IvgoFpsZbjhfBadJIUC0k5mvFCkQ/ehQdUgnNlzIy57+PchqAnpeoZ/L3ZpGdi3C3wwrcjwj5nxu9lktSWlv7A4XzC7SWMMQIfGVbMuCEOLcOaJlri2ZWqVBepG+eY5qYZ6wfiyx/XF4VyWPHhnmmqX3X97zBulVf7wRWG0yeiTHdPAbPH6s8H3qrbVdJvX1TEZdt5nMDgCxa7hXiMc9uLfE1UDMQS/h4I01Pe8gCn9feHZXizrk3GP2FMfkdO1v0vp3xjzDroGzbWe75ecqrH+dD1zpwC3dBL3kgi3e2wJsiKars+Vjnd1VRhMyylgh+q1bDo1q82NzkeXmrnRpjEE99IYxlAL8id89BVv2t2fMuBxu740uFjNW/pNlg47Yv78XM0oscqfgbj2jtm2ulBRqb7gKtoaDMeyhhzxms1medlJJ2TLSptzJuNEdrPpumKOlE0lDnYfU58QPp0fM7tGp/cApMC1nLkSsLKyra9Y76G4nTA8nTlYt4+C1a1i1rsZ3BLorfEhl0+jo/3yWxxlB2gisk5W5kwZSPZgkkFjYARadruhc8spE+VV8BOw7A548HDxvlm/kFe9AXfIx+sckdYu+r112QcJq77KL5fZAQlCGOT1h33jIW+Z0HmIlSMyIrn0eCQUwZH3s3e7unFIyKKI2DwF5huUAhFgWf8NdCqNxxuMs3wFPWw1oI6h8iMwbSUyejMc/V5gswDUU1I7Q+9LMRq5C2mrPjAQX8PdcngApiSzbODXmIPSKeXjctC0mlDZ5v622LRsgA++UDQ5L+MPpqJRzaVMXL0VsK8OQzpBpZ81dhV2QxdsHyd1vqjQ772sqF/AFmW0FBZyWooXEKMK063PBLBlTMTYT56b9C7FiA6JhqjgZkSiF+7UIuBDuAot7r/eayENYEBbI8IiDbqmvHBLqDAcnV9Wdw4iLlZzcqj/c/h4/R9NoSxOrAhaZuTwpGrZsbCaUCou7KTST4ASdvLeB1jk2h96dNLQw5ns1IHkmOVVf0kC0kVzcvIzKB3M7UpbOOCFDnsjODLNQqL3RLSwj/b+l03X6U55AViorZe9KipS59/X8YKMKQygPEY6a1d4y5EP0yLHJWPARA4Qc/iWYWA1gJAvprYchS1OoiF+2gPCFUmmTL30tbVJ1dsf7SGaGgGiPAFWvWP4/nRGG2Xu9XlwW1ZhSpU8XRMkKOVLRp30jsObCZXMkkc8a9U='}], provider_specific_fields=None))], usage=Usage(completion_tokens=421, prompt_tokens=83, total_tokens=504, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=401, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=83, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -3972,7 +3481,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01W1CJsg3A4DqUrtHpS9mGqs',\n", + "{'tool_call_id': 'call_cc7b6042491049d087b5b2c52b5b__thought__Cq8JAXLI2nyI1OmWr9kdtN0PS13ZhCWAfCRxf4MjaoInzCR/bwzqgYAwIMWbSpvvcpsFOeCp0BVKgD612WkQS5UYEsLGebguKY7QxSm/sKX+hVaRfySu/INlef29rks6OJ6Jl7KyH4ux+v0h0RoRtv5mPARXfQArRpO4Z41Mp51KSqJY7f4HcYdfVt3ORp1IZPBFfZ5YTvtyT194V1FWhzRxc5fagOwt8LP22c+6ppmE8tNQMojPgX3KbaMIEisu5FRXd09A43JEUOz2hdLTijvbwvGtDtVLcujOSgJ88sIZr7q8cQBYNXV+jygwYH60XakR+zifT6vdzzYF0QQTXT4fCtriZdxRLh63alrvbVQHTTBHwhrQjz5pgMZk3IvgoFpsZbjhfBadJIUC0k5mvFCkQ/ehQdUgnNlzIy57+PchqAnpeoZ/L3ZpGdi3C3wwrcjwj5nxu9lktSWlv7A4XzC7SWMMQIfGVbMuCEOLcOaJlri2ZWqVBepG+eY5qYZ6wfiyx/XF4VyWPHhnmmqX3X97zBulVf7wRWG0yeiTHdPAbPH6s8H3qrbVdJvX1TEZdt5nMDgCxa7hXiMc9uLfE1UDMQS/h4I01Pe8gCn9feHZXizrk3GP2FMfkdO1v0vp3xjzDroGzbWe75ecqrH+dD1zpwC3dBL3kgi3e2wJsiKars+Vjnd1VRhMyylgh+q1bDo1q82NzkeXmrnRpjEE99IYxlAL8id89BVv2t2fMuBxu740uFjNW/pNlg47Yv78XM0oscqfgbj2jtm2ulBRqb7gKtoaDMeyhhzxms1medlJJ2TLSptzJuNEdrPpumKOlE0lDnYfU58QPp0fM7tGp/cApMC1nLkSsLKyra9Y76G4nTA8nTlYt4+C1a1i1rsZ3BLorfEhl0+jo/3yWxxlB2gisk5W5kwZSPZgkkFjYARadruhc8spE+VV8BOw7A548HDxvlm/kFe9AXfIx+sckdYu+r112QcJq77KL5fZAQlCGOT1h33jIW+Z0HmIlSMyIrn0eCQUwZH3s3e7unFIyKKI2DwF5huUAhFgWf8NdCqNxxuMs3wFPWw1oI6h8iMwbSUyejMc/V5gswDUU1I7Q+9LMRq5C2mrPjAQX8PdcngApiSzbODXmIPSKeXjctC0mlDZ5v622LRsgA++UDQ5L+MPpqJRzaVMXL0VsK8OQzpBpZ81dhV2QxdsHyd1vqjQ772sqF/AFmW0FBZyWooXEKMK063PBLBlTMTYT56b9C7FiA6JhqjgZkSiF+7UIuBDuAot7r/eayENYEBbI8IiDbqmvHBLqDAcnV9Wdw4iLlZzcqj/c/h4/R9NoSxOrAhaZuTwpGrZsbCaUCou7KTST4ASdvLeB1jk2h96dNLQw5ns1IHkmOVVf0kC0kVzcvIzKB3M7UpbOOCFDnsjODLNQqL3RLSwj/b+l03X6U55AViorZe9KipS59/X8YKMKQygPEY6a1d4y5EP0yLHJWPARA4Qc/iWYWA1gJAvprYchS1OoiF+2gPCFUmmTL30tbVJ1dsf7SGaGgGiPAFWvWP4/nRGG2Xu9XlwW1ZhSpU8XRMkKOVLRp30jsObCZXMkkc8a9U=',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '8'}" @@ -3984,22 +3493,158 @@ { "data": { "text/markdown": [ - "**Step 2:** Now I'll add 7 to that result (8 + 7)\n", "\n", - "🔧 simple_add({\"a\": 8, \"b\": 7})\n", + "\n", + "🔧 simple_add({\"b\": 7, \"a\": 8})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `tool_calls`\n", + "- usage: `Usage(completion_tokens=20, prompt_tokens=117, total_tokens=137, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=117, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'function': {'arguments': '{\"b\": 7, \"a\": 8}', 'name': 'simple_add'}, 'id': 'call_bdec035c5b3e451699ac8c627aa0', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=20, prompt_tokens=117, total_tokens=137, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=117, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "{'tool_call_id': 'call_bdec035c5b3e451699ac8c627aa0',\n", + " 'role': 'tool',\n", + " 'name': 'simple_add',\n", + " 'content': '15'}" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "\n", + "\n", + "🔧 simple_add({\"b\": 11, \"a\": 15})\n", + "\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `tool_calls`\n", + "- usage: `Usage(completion_tokens=22, prompt_tokens=152, total_tokens=174, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=152, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'function': {'arguments': '{\"b\": 11, \"a\": 15}', 'name': 'simple_add'}, 'id': 'call_09cf279e666c45b992795ceec2b0', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=22, prompt_tokens=152, total_tokens=174, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=152, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "{'tool_call_id': 'call_09cf279e666c45b992795ceec2b0',\n", + " 'role': 'tool',\n", + " 'name': 'simple_add',\n", + " 'content': '26'}" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "OK, let's break it down.\n", + "\n", + "First, we add 5 and 3, which equals 8.\n", + "Then, we add 7 to that, which equals 15.\n", + "Finally, we add 11 to that, which equals 26.\n", + "\n", + "Therefore, ((5 + 3)+7)+11 = 26.\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-2.5-pro`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=76, prompt_tokens=189, total_tokens=265, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=189, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"OK, let's break it down.\\n\\nFirst, we add 5 and 3, which equals 8.\\nThen, we add 7 to that, which equals 15.\\nFinally, we add 11 to that, which equals 26.\\n\\nTherefore, ((5 + 3)+7)+11 = 26.\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=76, prompt_tokens=189, total_tokens=265, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=189, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "chat = Chat(model, tools=[simple_add])\n", + "res = chat(\"What's ((5 + 3)+7)+11? Work step by step\", return_all=True, max_steps=5)\n", + "for r in res: display(r)" + ] + }, + { + "cell_type": "markdown", + "id": "a9f66101", + "metadata": {}, + "source": [ + "Some models support parallel tool calling. I.e. sending multiple tool call requests in one conversation step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ec77539", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "\n", + "\n", + "🔧 simple_add({\"a\": 5, \"b\": 3})\n", + "\n", + "\n", + "\n", + "🔧 simple_add({\"a\": 7, \"b\": 2})\n", + "\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gpt-4.1-2025-04-14`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=93, prompt_tokens=730, total_tokens=823, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=52, prompt_tokens=110, total_tokens=162, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"**Step 2:** Now I'll add 7 to that result (8 + 7)\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 8, \"b\": 7}', 'name': 'simple_add'}, 'id': 'toolu_01DwP5Yx8wtAMvUo1SuNJSp4', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=93, prompt_tokens=730, total_tokens=823, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'call_ZkjKhtlfgQlLS7sYaBogidfb', 'type': 'function'}, {'function': {'arguments': '{\"a\": 7, \"b\": 2}', 'name': 'simple_add'}, 'id': 'call_tno0OSF22ZQI3ShG1xFdGDxT', 'type': 'function'}], function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=52, prompt_tokens=110, total_tokens=162, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "{'tool_call_id': 'call_ZkjKhtlfgQlLS7sYaBogidfb',\n", + " 'role': 'tool',\n", + " 'name': 'simple_add',\n", + " 'content': '8'}" ] }, "metadata": {}, @@ -4008,10 +3653,10 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01DwP5Yx8wtAMvUo1SuNJSp4',\n", + "{'tool_call_id': 'call_tno0OSF22ZQI3ShG1xFdGDxT',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", - " 'content': '15'}" + " 'content': '9'}" ] }, "metadata": {}, @@ -4020,22 +3665,22 @@ { "data": { "text/markdown": [ - "**Step 3:** Finally, I'll add 11 to that result (15 + 11)\n", "\n", - "🔧 simple_add({\"a\": 15, \"b\": 11})\n", + "\n", + "🔧 multiply({\"a\":8,\"b\":9})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gpt-4.1-2025-04-14`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=94, prompt_tokens=836, total_tokens=930, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=17, prompt_tokens=178, total_tokens=195, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"**Step 3:** Finally, I'll add 11 to that result (15 + 11)\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 15, \"b\": 11}', 'name': 'simple_add'}, 'id': 'toolu_012K1WV9pFBrjJHAGHxFwqwC', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=94, prompt_tokens=836, total_tokens=930, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'function': {'arguments': '{\"a\":8,\"b\":9}', 'name': 'multiply'}, 'id': 'call_osF7ih5fUnuMrWC0PAae8ik4', 'type': 'function'}], function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=17, prompt_tokens=178, total_tokens=195, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" ] }, "metadata": {}, @@ -4044,10 +3689,10 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_012K1WV9pFBrjJHAGHxFwqwC',\n", + "{'tool_call_id': 'call_osF7ih5fUnuMrWC0PAae8ik4',\n", " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '26'}" + " 'name': 'multiply',\n", + " 'content': '72'}" ] }, "metadata": {}, @@ -4056,24 +3701,21 @@ { "data": { "text/markdown": [ - "**Answer:** ((5 + 3) + 7) + 11 = **26**\n", + "(5 + 3) = 8 and (7 + 2) = 9. Multiplying them gives 8 × 9 = 72. \n", "\n", - "Here's the breakdown:\n", - "- 5 + 3 = 8\n", - "- 8 + 7 = 15\n", - "- 15 + 11 = 26\n", + "So, (5 + 3) * (7 + 2) = 72.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gpt-4.1-2025-04-14`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=67, prompt_tokens=943, total_tokens=1010, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=54, prompt_tokens=203, total_tokens=257, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"**Answer:** ((5 + 3) + 7) + 11 = **26**\\n\\nHere's the breakdown:\\n- 5 + 3 = 8\\n- 8 + 7 = 15\\n- 15 + 11 = 26\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=67, prompt_tokens=943, total_tokens=1010, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='stop', index=0, message=Message(content='(5 + 3) = 8 and (7 + 2) = 9. Multiplying them gives 8 × 9 = 72. \\n\\nSo, (5 + 3) * (7 + 2) = 72.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=54, prompt_tokens=203, total_tokens=257, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" ] }, "metadata": {}, @@ -4081,23 +3723,19 @@ } ], "source": [ - "chat = Chat(model, tools=[simple_add])\n", - "res = chat(\"What's ((5 + 3)+7)+11? Work step by step\", return_all=True, max_steps=5)\n", + "def multiply(a: int, b: int) -> int:\n", + " \"Multiply two numbers\"\n", + " return a * b\n", + "\n", + "chat = Chat('openai/gpt-4.1', tools=[simple_add, multiply])\n", + "res = chat(\"Calculate (5 + 3) * (7 + 2)\", max_steps=5, return_all=True)\n", "for r in res: display(r)" ] }, - { - "cell_type": "markdown", - "id": "a9f66101", - "metadata": {}, - "source": [ - "Some models support parallel tool calling. I.e. sending multiple tool call requests in one conversation step." - ] - }, { "cell_type": "code", "execution_count": null, - "id": "3ec77539", + "id": "6f3d9d99", "metadata": {}, "outputs": [ { @@ -4115,14 +3753,14 @@ "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `gpt-4.1-2025-04-14`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=52, prompt_tokens=110, total_tokens=162, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", + "- usage: `Usage(completion_tokens=437, prompt_tokens=133, total_tokens=570, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=397, rejected_prediction_tokens=None, text_tokens=40, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=133, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'call_ZkjKhtlfgQlLS7sYaBogidfb', 'type': 'function'}, {'function': {'arguments': '{\"a\": 7, \"b\": 2}', 'name': 'simple_add'}, 'id': 'call_tno0OSF22ZQI3ShG1xFdGDxT', 'type': 'function'}], function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=52, prompt_tokens=110, total_tokens=162, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'provider_specific_fields': {'thought_signature': 'CsoJAXLI2nyi+SWD/EIPkeNQgWjtEr0fmsi/pcJlk7Oyg+2sPpofUjK097Mg5fGTgPmllZru8StllpOcJVLLPPvWzpKkkeaGVj4ZDnRFvSfJSlVo7GMyRTWimJNRASCEAWKTUCQ5A/vyGqhYDRQQKAEnBtdcqfG/+QvhHvUr77XMLAWJTR4kgaczZV1igOnGjrhddo5d5Ti/bl2CrwGIem5FQXR1R0SOjrtsXDDD3KJFX386MecRiCNHmmBLnTL8C3hJ24jds5IxqDvKvXcxYxdTFVdF5vwN2IslkxaBrrHToNTlCj508WC0a8mRNqtkWGV6GQAfnSubqhhCMp9Et7djmDQgfl+F1fw7YfIyaPDIJgVgatLZhOE4Xyceku0fGY3Aqxmj27i702bMaLgVvOkB4BTgsmtKMPeYXxPaOapAYdgAw9Y64y2sjd/3yBvt+rX0V/rCX5z4WISavzvu0Bv5Bi0XZ91HzeqhFdR7vj/X24HBZ7peszVXdx+9V0rscIFmJGWulSVFtL1LFz0T60ig44ms1yECZQCsbn1ChJ+Eev5H6XhCbnRBvqht920GGVSo/XwHEzCg59b1vhFi59Ndt5PSwVmxIWydxSsUYT6Tfv2cFX2HhA1tyPB2Fch/Fbb2vx3pFX/CfHAWmub/SBCxhgG6zZqJ4CiXDTbKtbf636fTO8OtnC+SAbVBKjaylzx/6iRtx0nnz4wdAI9j3qcX6duXQWhTtlghHU/xBzmT6HovR/O60Oc4v2tzPig/qtWIFjKdLLorB0F5ElQQNqtvpsctZJIEJX9e1+1SXH0LdBERe2DdpamnSxX1U9yKCcCcZfwQkjNpvKDVbwCZ6gsASe2LzKhoRKzaZ+hYw1loksQdFto1Y2QHVLB+cxhhAZ+ADnBL5cdufwmpVi2kFDzM/rcUxYXPKll0f85RxNNT3uXvtpfmQoiGLvq9rY7KKVf6N0/AS/6RvIj50PSXWoPax2DzRRFp7etN1ehTF041iXAxTNwZJD7uWXkZIQ0eeBWTm3IeWyw72jhDfnxmssWzKDRUUYIFaN9sONIJP8lRvUikVvsmmh9POZNuycugYMHKM1Z/jJgOuissZGY/wwthpjTdLs9aX9PSOY8ClfjOMwDq7TpbYVNZRpej0dD9r8BJl1TVn0jM+Qsz6x+tuLtDsJnIPlx8m2MKYx9qYrDY4AimTcJ1IIPGe7xAeC494r5LtTZ7GKdD2eS+JWsn7prstD3/hxrYt/Y1VDc3L6wW11XgSZugWCK21nW800AR19aIqVw9xqycZ29qzkXXVcAlKzyIz3xW6HeL1ZSm1VehnunPIbDdj9PYrYc+aWe4op2trDesPLwd9u7Wj3azkIgNLwjhiDbbYFe6iTziEgQW4C42aYLlPOlsDO0QIb3wBWSh2hWkb65JblhQwlpNEOhFSWb4oVYR4XHNXlgX98yg13mrUx9pvSV5V0K5W52qyOg1vv8HRatQ1pERP0oPJwao0Mu5GLQTEB3V+zCKVr/uUgE/zfwIwhYsarRwVCqX9wFy+y6QxgImjBgiPkfS1BrblO+tTR2p/XSDzr/5glVy6t8nIfgvu9bTf/mrvNzS/y4ZHXIj7xvbahWcNYCK0cbVX2Fnk+6tH0HeO88KrgEBcsjafFlbendVA5x2dMVfBPV89SuXazNAD1AnvHXSrSYEPbpQMfXVqZj8es5lsjrecWk3hfUOOJCPYyCehpC6oI+l1xXC/XU/b5e53Avdtqg4+NvwIJa1FZkf2wm9jAFamvQZsOchRSAHHF/KwM4AOw+RCBwCOODRm+XESbhvA5m6p7mwclV/ell/gdl98cGaapFFUGue2mHMezPIUe2Gf7i068X1bQjm2COqmRoKXwFyyNp8Fc2fQ6ide/xeMpRt1JChE7n15+5qkf/OxwiBcmsrCELf1vqqy0ZAXTphc5al6hKSN1gz0Bc6fEvwn2BxYxEgXVjvFEP35GCKwcnef65ak7NqbfhuKdQVzb6o'}, 'function': {'arguments': '{\"a\": 5, \"b\": 3}', 'name': 'simple_add'}, 'id': 'call_d1dd2a0b14c542cf9b199aa37b7f__thought__CsoJAXLI2nyi+SWD/EIPkeNQgWjtEr0fmsi/pcJlk7Oyg+2sPpofUjK097Mg5fGTgPmllZru8StllpOcJVLLPPvWzpKkkeaGVj4ZDnRFvSfJSlVo7GMyRTWimJNRASCEAWKTUCQ5A/vyGqhYDRQQKAEnBtdcqfG/+QvhHvUr77XMLAWJTR4kgaczZV1igOnGjrhddo5d5Ti/bl2CrwGIem5FQXR1R0SOjrtsXDDD3KJFX386MecRiCNHmmBLnTL8C3hJ24jds5IxqDvKvXcxYxdTFVdF5vwN2IslkxaBrrHToNTlCj508WC0a8mRNqtkWGV6GQAfnSubqhhCMp9Et7djmDQgfl+F1fw7YfIyaPDIJgVgatLZhOE4Xyceku0fGY3Aqxmj27i702bMaLgVvOkB4BTgsmtKMPeYXxPaOapAYdgAw9Y64y2sjd/3yBvt+rX0V/rCX5z4WISavzvu0Bv5Bi0XZ91HzeqhFdR7vj/X24HBZ7peszVXdx+9V0rscIFmJGWulSVFtL1LFz0T60ig44ms1yECZQCsbn1ChJ+Eev5H6XhCbnRBvqht920GGVSo/XwHEzCg59b1vhFi59Ndt5PSwVmxIWydxSsUYT6Tfv2cFX2HhA1tyPB2Fch/Fbb2vx3pFX/CfHAWmub/SBCxhgG6zZqJ4CiXDTbKtbf636fTO8OtnC+SAbVBKjaylzx/6iRtx0nnz4wdAI9j3qcX6duXQWhTtlghHU/xBzmT6HovR/O60Oc4v2tzPig/qtWIFjKdLLorB0F5ElQQNqtvpsctZJIEJX9e1+1SXH0LdBERe2DdpamnSxX1U9yKCcCcZfwQkjNpvKDVbwCZ6gsASe2LzKhoRKzaZ+hYw1loksQdFto1Y2QHVLB+cxhhAZ+ADnBL5cdufwmpVi2kFDzM/rcUxYXPKll0f85RxNNT3uXvtpfmQoiGLvq9rY7KKVf6N0/AS/6RvIj50PSXWoPax2DzRRFp7etN1ehTF041iXAxTNwZJD7uWXkZIQ0eeBWTm3IeWyw72jhDfnxmssWzKDRUUYIFaN9sONIJP8lRvUikVvsmmh9POZNuycugYMHKM1Z/jJgOuissZGY/wwthpjTdLs9aX9PSOY8ClfjOMwDq7TpbYVNZRpej0dD9r8BJl1TVn0jM+Qsz6x+tuLtDsJnIPlx8m2MKYx9qYrDY4AimTcJ1IIPGe7xAeC494r5LtTZ7GKdD2eS+JWsn7prstD3/hxrYt/Y1VDc3L6wW11XgSZugWCK21nW800AR19aIqVw9xqycZ29qzkXXVcAlKzyIz3xW6HeL1ZSm1VehnunPIbDdj9PYrYc+aWe4op2trDesPLwd9u7Wj3azkIgNLwjhiDbbYFe6iTziEgQW4C42aYLlPOlsDO0QIb3wBWSh2hWkb65JblhQwlpNEOhFSWb4oVYR4XHNXlgX98yg13mrUx9pvSV5V0K5W52qyOg1vv8HRatQ1pERP0oPJwao0Mu5GLQTEB3V+zCKVr/uUgE/zfwIwhYsarRwVCqX9wFy+y6QxgImjBgiPkfS1BrblO+tTR2p/XSDzr/5glVy6t8nIfgvu9bTf/mrvNzS/y4ZHXIj7xvbahWcNYCK0cbVX2Fnk+6tH0HeO88KrgEBcsjafFlbendVA5x2dMVfBPV89SuXazNAD1AnvHXSrSYEPbpQMfXVqZj8es5lsjrecWk3hfUOOJCPYyCehpC6oI+l1xXC/XU/b5e53Avdtqg4+NvwIJa1FZkf2wm9jAFamvQZsOchRSAHHF/KwM4AOw+RCBwCOODRm+XESbhvA5m6p7mwclV/ell/gdl98cGaapFFUGue2mHMezPIUe2Gf7i068X1bQjm2COqmRoKXwFyyNp8Fc2fQ6ide/xeMpRt1JChE7n15+5qkf/OxwiBcmsrCELf1vqqy0ZAXTphc5al6hKSN1gz0Bc6fEvwn2BxYxEgXVjvFEP35GCKwcnef65ak7NqbfhuKdQVzb6o', 'type': 'function'}, {'index': 1, 'function': {'arguments': '{\"a\": 7, \"b\": 2}', 'name': 'simple_add'}, 'id': 'call_cb54d00213cd4633af9c69bdb5b0', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"simple_add\", \"args\": {\"a\": 5, \"b\": 3}}}', 'signature': 'CsoJAXLI2nyi+SWD/EIPkeNQgWjtEr0fmsi/pcJlk7Oyg+2sPpofUjK097Mg5fGTgPmllZru8StllpOcJVLLPPvWzpKkkeaGVj4ZDnRFvSfJSlVo7GMyRTWimJNRASCEAWKTUCQ5A/vyGqhYDRQQKAEnBtdcqfG/+QvhHvUr77XMLAWJTR4kgaczZV1igOnGjrhddo5d5Ti/bl2CrwGIem5FQXR1R0SOjrtsXDDD3KJFX386MecRiCNHmmBLnTL8C3hJ24jds5IxqDvKvXcxYxdTFVdF5vwN2IslkxaBrrHToNTlCj508WC0a8mRNqtkWGV6GQAfnSubqhhCMp9Et7djmDQgfl+F1fw7YfIyaPDIJgVgatLZhOE4Xyceku0fGY3Aqxmj27i702bMaLgVvOkB4BTgsmtKMPeYXxPaOapAYdgAw9Y64y2sjd/3yBvt+rX0V/rCX5z4WISavzvu0Bv5Bi0XZ91HzeqhFdR7vj/X24HBZ7peszVXdx+9V0rscIFmJGWulSVFtL1LFz0T60ig44ms1yECZQCsbn1ChJ+Eev5H6XhCbnRBvqht920GGVSo/XwHEzCg59b1vhFi59Ndt5PSwVmxIWydxSsUYT6Tfv2cFX2HhA1tyPB2Fch/Fbb2vx3pFX/CfHAWmub/SBCxhgG6zZqJ4CiXDTbKtbf636fTO8OtnC+SAbVBKjaylzx/6iRtx0nnz4wdAI9j3qcX6duXQWhTtlghHU/xBzmT6HovR/O60Oc4v2tzPig/qtWIFjKdLLorB0F5ElQQNqtvpsctZJIEJX9e1+1SXH0LdBERe2DdpamnSxX1U9yKCcCcZfwQkjNpvKDVbwCZ6gsASe2LzKhoRKzaZ+hYw1loksQdFto1Y2QHVLB+cxhhAZ+ADnBL5cdufwmpVi2kFDzM/rcUxYXPKll0f85RxNNT3uXvtpfmQoiGLvq9rY7KKVf6N0/AS/6RvIj50PSXWoPax2DzRRFp7etN1ehTF041iXAxTNwZJD7uWXkZIQ0eeBWTm3IeWyw72jhDfnxmssWzKDRUUYIFaN9sONIJP8lRvUikVvsmmh9POZNuycugYMHKM1Z/jJgOuissZGY/wwthpjTdLs9aX9PSOY8ClfjOMwDq7TpbYVNZRpej0dD9r8BJl1TVn0jM+Qsz6x+tuLtDsJnIPlx8m2MKYx9qYrDY4AimTcJ1IIPGe7xAeC494r5LtTZ7GKdD2eS+JWsn7prstD3/hxrYt/Y1VDc3L6wW11XgSZugWCK21nW800AR19aIqVw9xqycZ29qzkXXVcAlKzyIz3xW6HeL1ZSm1VehnunPIbDdj9PYrYc+aWe4op2trDesPLwd9u7Wj3azkIgNLwjhiDbbYFe6iTziEgQW4C42aYLlPOlsDO0QIb3wBWSh2hWkb65JblhQwlpNEOhFSWb4oVYR4XHNXlgX98yg13mrUx9pvSV5V0K5W52qyOg1vv8HRatQ1pERP0oPJwao0Mu5GLQTEB3V+zCKVr/uUgE/zfwIwhYsarRwVCqX9wFy+y6QxgImjBgiPkfS1BrblO+tTR2p/XSDzr/5glVy6t8nIfgvu9bTf/mrvNzS/y4ZHXIj7xvbahWcNYCK0cbVX2Fnk+6tH0HeO88KrgEBcsjafFlbendVA5x2dMVfBPV89SuXazNAD1AnvHXSrSYEPbpQMfXVqZj8es5lsjrecWk3hfUOOJCPYyCehpC6oI+l1xXC/XU/b5e53Avdtqg4+NvwIJa1FZkf2wm9jAFamvQZsOchRSAHHF/KwM4AOw+RCBwCOODRm+XESbhvA5m6p7mwclV/ell/gdl98cGaapFFUGue2mHMezPIUe2Gf7i068X1bQjm2COqmRoKXwFyyNp8Fc2fQ6ide/xeMpRt1JChE7n15+5qkf/OxwiBcmsrCELf1vqqy0ZAXTphc5al6hKSN1gz0Bc6fEvwn2BxYxEgXVjvFEP35GCKwcnef65ak7NqbfhuKdQVzb6o'}], provider_specific_fields=None))], usage=Usage(completion_tokens=437, prompt_tokens=133, total_tokens=570, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=397, rejected_prediction_tokens=None, text_tokens=40, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=133, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4131,7 +3769,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'call_ZkjKhtlfgQlLS7sYaBogidfb',\n", + "{'tool_call_id': 'call_d1dd2a0b14c542cf9b199aa37b7f__thought__CsoJAXLI2nyi+SWD/EIPkeNQgWjtEr0fmsi/pcJlk7Oyg+2sPpofUjK097Mg5fGTgPmllZru8StllpOcJVLLPPvWzpKkkeaGVj4ZDnRFvSfJSlVo7GMyRTWimJNRASCEAWKTUCQ5A/vyGqhYDRQQKAEnBtdcqfG/+QvhHvUr77XMLAWJTR4kgaczZV1igOnGjrhddo5d5Ti/bl2CrwGIem5FQXR1R0SOjrtsXDDD3KJFX386MecRiCNHmmBLnTL8C3hJ24jds5IxqDvKvXcxYxdTFVdF5vwN2IslkxaBrrHToNTlCj508WC0a8mRNqtkWGV6GQAfnSubqhhCMp9Et7djmDQgfl+F1fw7YfIyaPDIJgVgatLZhOE4Xyceku0fGY3Aqxmj27i702bMaLgVvOkB4BTgsmtKMPeYXxPaOapAYdgAw9Y64y2sjd/3yBvt+rX0V/rCX5z4WISavzvu0Bv5Bi0XZ91HzeqhFdR7vj/X24HBZ7peszVXdx+9V0rscIFmJGWulSVFtL1LFz0T60ig44ms1yECZQCsbn1ChJ+Eev5H6XhCbnRBvqht920GGVSo/XwHEzCg59b1vhFi59Ndt5PSwVmxIWydxSsUYT6Tfv2cFX2HhA1tyPB2Fch/Fbb2vx3pFX/CfHAWmub/SBCxhgG6zZqJ4CiXDTbKtbf636fTO8OtnC+SAbVBKjaylzx/6iRtx0nnz4wdAI9j3qcX6duXQWhTtlghHU/xBzmT6HovR/O60Oc4v2tzPig/qtWIFjKdLLorB0F5ElQQNqtvpsctZJIEJX9e1+1SXH0LdBERe2DdpamnSxX1U9yKCcCcZfwQkjNpvKDVbwCZ6gsASe2LzKhoRKzaZ+hYw1loksQdFto1Y2QHVLB+cxhhAZ+ADnBL5cdufwmpVi2kFDzM/rcUxYXPKll0f85RxNNT3uXvtpfmQoiGLvq9rY7KKVf6N0/AS/6RvIj50PSXWoPax2DzRRFp7etN1ehTF041iXAxTNwZJD7uWXkZIQ0eeBWTm3IeWyw72jhDfnxmssWzKDRUUYIFaN9sONIJP8lRvUikVvsmmh9POZNuycugYMHKM1Z/jJgOuissZGY/wwthpjTdLs9aX9PSOY8ClfjOMwDq7TpbYVNZRpej0dD9r8BJl1TVn0jM+Qsz6x+tuLtDsJnIPlx8m2MKYx9qYrDY4AimTcJ1IIPGe7xAeC494r5LtTZ7GKdD2eS+JWsn7prstD3/hxrYt/Y1VDc3L6wW11XgSZugWCK21nW800AR19aIqVw9xqycZ29qzkXXVcAlKzyIz3xW6HeL1ZSm1VehnunPIbDdj9PYrYc+aWe4op2trDesPLwd9u7Wj3azkIgNLwjhiDbbYFe6iTziEgQW4C42aYLlPOlsDO0QIb3wBWSh2hWkb65JblhQwlpNEOhFSWb4oVYR4XHNXlgX98yg13mrUx9pvSV5V0K5W52qyOg1vv8HRatQ1pERP0oPJwao0Mu5GLQTEB3V+zCKVr/uUgE/zfwIwhYsarRwVCqX9wFy+y6QxgImjBgiPkfS1BrblO+tTR2p/XSDzr/5glVy6t8nIfgvu9bTf/mrvNzS/y4ZHXIj7xvbahWcNYCK0cbVX2Fnk+6tH0HeO88KrgEBcsjafFlbendVA5x2dMVfBPV89SuXazNAD1AnvHXSrSYEPbpQMfXVqZj8es5lsjrecWk3hfUOOJCPYyCehpC6oI+l1xXC/XU/b5e53Avdtqg4+NvwIJa1FZkf2wm9jAFamvQZsOchRSAHHF/KwM4AOw+RCBwCOODRm+XESbhvA5m6p7mwclV/ell/gdl98cGaapFFUGue2mHMezPIUe2Gf7i068X1bQjm2COqmRoKXwFyyNp8Fc2fQ6ide/xeMpRt1JChE7n15+5qkf/OxwiBcmsrCELf1vqqy0ZAXTphc5al6hKSN1gz0Bc6fEvwn2BxYxEgXVjvFEP35GCKwcnef65ak7NqbfhuKdQVzb6o',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '8'}" @@ -4143,7 +3781,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'call_tno0OSF22ZQI3ShG1xFdGDxT',\n", + "{'tool_call_id': 'call_cb54d00213cd4633af9c69bdb5b0',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '9'}" @@ -4157,20 +3795,20 @@ "text/markdown": [ "\n", "\n", - "🔧 multiply({\"a\":8,\"b\":9})\n", + "🔧 multiply({\"b\": 9, \"a\": 8})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `gpt-4.1-2025-04-14`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=17, prompt_tokens=178, total_tokens=195, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", + "- usage: `Usage(completion_tokens=18, prompt_tokens=200, total_tokens=218, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=200, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'function': {'arguments': '{\"a\":8,\"b\":9}', 'name': 'multiply'}, 'id': 'call_osF7ih5fUnuMrWC0PAae8ik4', 'type': 'function'}], function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=17, prompt_tokens=178, total_tokens=195, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'function': {'arguments': '{\"b\": 9, \"a\": 8}', 'name': 'multiply'}, 'id': 'call_2d4ae946753b4fd092a79e486d51', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=18, prompt_tokens=200, total_tokens=218, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=200, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4179,7 +3817,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'call_osF7ih5fUnuMrWC0PAae8ik4',\n", + "{'tool_call_id': 'call_2d4ae946753b4fd092a79e486d51',\n", " 'role': 'tool',\n", " 'name': 'multiply',\n", " 'content': '72'}" @@ -4191,21 +3829,20 @@ { "data": { "text/markdown": [ - "(5 + 3) = 8 and (7 + 2) = 9. Multiplying them gives 8 × 9 = 72. \n", + "(5 + 3) * (7 + 2) = 72\n", "\n", - "So, (5 + 3) * (7 + 2) = 72.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `gpt-4.1-2025-04-14`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=54, prompt_tokens=203, total_tokens=257, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n", + "- usage: `Usage(completion_tokens=17, prompt_tokens=231, total_tokens=248, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=231, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4.1-2025-04-14', object='chat.completion', system_fingerprint='fp_09249d7c7b', choices=[Choices(finish_reason='stop', index=0, message=Message(content='(5 + 3) = 8 and (7 + 2) = 9. Multiplying them gives 8 × 9 = 72. \\n\\nSo, (5 + 3) * (7 + 2) = 72.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=54, prompt_tokens=203, total_tokens=257, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='(5 + 3) * (7 + 2) = 72\\n', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=17, prompt_tokens=231, total_tokens=248, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=231, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4213,11 +3850,7 @@ } ], "source": [ - "def multiply(a: int, b: int) -> int:\n", - " \"Multiply two numbers\"\n", - " return a * b\n", - "\n", - "chat = Chat('openai/gpt-4.1', tools=[simple_add, multiply])\n", + "chat = Chat(model, tools=[simple_add, multiply])\n", "res = chat(\"Calculate (5 + 3) * (7 + 2)\", max_steps=5, return_all=True)\n", "for r in res: display(r)" ] @@ -4227,7 +3860,7 @@ "id": "dba4958f", "metadata": {}, "source": [ - "See it did the additions in one go!" + "See it did the additions in one go!l" ] }, { @@ -4235,7 +3868,7 @@ "id": "33b17e71", "metadata": {}, "source": [ - "We don't want the model to keep running tools indefinitely. Lets showcase how we can force thee model to stop after our specified number of toolcall rounds:" + "We don't want the model to keep running tools indefinitely. Lets showcase how we can force the model to stop after our specified number of toolcall rounds:" ] }, { @@ -4247,30 +3880,22 @@ { "data": { "text/markdown": [ - "I'll calculate this step by step, following the order of operations.\n", - "\n", - "**Step 1:** Calculate the inner parentheses first\n", - "- (10 + 5) = ?\n", - "- (2 + 1) = ?\n", - "\n", - "🔧 simple_add({\"a\": 10, \"b\": 5})\n", "\n", "\n", - "\n", - "🔧 simple_add({\"a\": 2, \"b\": 1})\n", + "🔧 simple_add({\"b\": 5, \"a\": 10})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=173, prompt_tokens=792, total_tokens=965, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=282, prompt_tokens=196, total_tokens=478, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=261, rejected_prediction_tokens=None, text_tokens=21, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=196, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I'll calculate this step by step, following the order of operations.\\n\\n**Step 1:** Calculate the inner parentheses first\\n- (10 + 5) = ?\\n- (2 + 1) = ?\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 10, \"b\": 5}', 'name': 'simple_add'}, 'id': 'toolu_01E79vEXyzAZRNKZV8Npihpk', 'type': 'function'}, {'index': 2, 'function': {'arguments': '{\"a\": 2, \"b\": 1}', 'name': 'simple_add'}, 'id': 'toolu_01SdiNubsdUZWUFho98S1EFN', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=173, prompt_tokens=792, total_tokens=965, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'provider_specific_fields': {'thought_signature': 'CvsFAXLI2ny8YLvBawTd8D7gBw9VmF8EuI2U6Cjg78oPBdDYqQ6SJ9VeYHkwte8J9GJUOSj0XxmWCDSnIUCE4k1ywaBXjKvCBEULv91iDTIlWtLzfAxqoVce6byGsTL6JRCyeLWtLn8pjbvQ2dCfMEc+JbwZqmjgkA5kkRVXbMSS+clU8gJ4eZzDEM8ybvBIbbK0SpHlcKqwnpADIGlRiAH/3DXaZlEnIqSlaVZKFtQIbx4K0t3R1+BuOt2nIuss1i0Vwxa68ywc+UEOfl8eBmVcozr2VDTDb6ce5cW13yQ6gpwf3VDUMb5C7R5qFRgtUijGac4QncRTnQMreakSctMA/wdL6DxhDs82I67+9EP0edctTO3DCv/VjwIAejTUa67rH9O6tmteIcNC0Re2ylsDXPH0rZYHH+FQcNIYGBJQkXL3oDXe/AcT4YWGjCMQ1TNNPiOgyYiLwejgzCi5/mgddl75RNKf+j8m4tx9yJSi97ATkj4h6NBR5XjiMDXxZwwVCpKF89csyiP6zSqddweWcIStJ+VLZEy4PEgGjKeC+YGEqMQmr2GCYAZtsaRsixvDpZnvqy4vBbWz7h31DyTZIs7KhQPBdp1B2EklwCoUhPTId2nD3xfTTC+BEsCWylp7tpvvzDL31A5D3mxUMnxnNYhA3RW9W8Vru1LICx8ChdHFkjhkZHO3PFcAShL4SiGGMbltDUKqVVAks1m+sFheO1kX5yc5GLtA39CSel6ZWsgyIwDecufseaCRXDkW5KnoakQmA+UMHos8yNi8tsIFITWEerc5oOWnmpcZxuL2z5YnJ+ntOnFREvY6z77lzELTjkOFPmGOnqBoECiFatj2ZC1o0B8tnWtCIkXMYKDYYnK2PlhCRfwlTa7lub9qtVo+4SblW+lcyTPifPy7w7uhT2Wkv1sFwykB9LUfnpcqExS6AO8lkng3JOjBGa5w2l2uoBa+232aI0ukPI6XdyStCffI4FRp1QoDVzLj0EZtvJc+iPSeHgAeJYMsCQ=='}, 'function': {'arguments': '{\"b\": 5, \"a\": 10}', 'name': 'simple_add'}, 'id': 'call_e5966420acdb4a2397f3c6082298__thought__CvsFAXLI2ny8YLvBawTd8D7gBw9VmF8EuI2U6Cjg78oPBdDYqQ6SJ9VeYHkwte8J9GJUOSj0XxmWCDSnIUCE4k1ywaBXjKvCBEULv91iDTIlWtLzfAxqoVce6byGsTL6JRCyeLWtLn8pjbvQ2dCfMEc+JbwZqmjgkA5kkRVXbMSS+clU8gJ4eZzDEM8ybvBIbbK0SpHlcKqwnpADIGlRiAH/3DXaZlEnIqSlaVZKFtQIbx4K0t3R1+BuOt2nIuss1i0Vwxa68ywc+UEOfl8eBmVcozr2VDTDb6ce5cW13yQ6gpwf3VDUMb5C7R5qFRgtUijGac4QncRTnQMreakSctMA/wdL6DxhDs82I67+9EP0edctTO3DCv/VjwIAejTUa67rH9O6tmteIcNC0Re2ylsDXPH0rZYHH+FQcNIYGBJQkXL3oDXe/AcT4YWGjCMQ1TNNPiOgyYiLwejgzCi5/mgddl75RNKf+j8m4tx9yJSi97ATkj4h6NBR5XjiMDXxZwwVCpKF89csyiP6zSqddweWcIStJ+VLZEy4PEgGjKeC+YGEqMQmr2GCYAZtsaRsixvDpZnvqy4vBbWz7h31DyTZIs7KhQPBdp1B2EklwCoUhPTId2nD3xfTTC+BEsCWylp7tpvvzDL31A5D3mxUMnxnNYhA3RW9W8Vru1LICx8ChdHFkjhkZHO3PFcAShL4SiGGMbltDUKqVVAks1m+sFheO1kX5yc5GLtA39CSel6ZWsgyIwDecufseaCRXDkW5KnoakQmA+UMHos8yNi8tsIFITWEerc5oOWnmpcZxuL2z5YnJ+ntOnFREvY6z77lzELTjkOFPmGOnqBoECiFatj2ZC1o0B8tnWtCIkXMYKDYYnK2PlhCRfwlTa7lub9qtVo+4SblW+lcyTPifPy7w7uhT2Wkv1sFwykB9LUfnpcqExS6AO8lkng3JOjBGa5w2l2uoBa+232aI0ukPI6XdyStCffI4FRp1QoDVzLj0EZtvJc+iPSeHgAeJYMsCQ==', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"simple_add\", \"args\": {\"b\": 5, \"a\": 10}}}', 'signature': 'CvsFAXLI2ny8YLvBawTd8D7gBw9VmF8EuI2U6Cjg78oPBdDYqQ6SJ9VeYHkwte8J9GJUOSj0XxmWCDSnIUCE4k1ywaBXjKvCBEULv91iDTIlWtLzfAxqoVce6byGsTL6JRCyeLWtLn8pjbvQ2dCfMEc+JbwZqmjgkA5kkRVXbMSS+clU8gJ4eZzDEM8ybvBIbbK0SpHlcKqwnpADIGlRiAH/3DXaZlEnIqSlaVZKFtQIbx4K0t3R1+BuOt2nIuss1i0Vwxa68ywc+UEOfl8eBmVcozr2VDTDb6ce5cW13yQ6gpwf3VDUMb5C7R5qFRgtUijGac4QncRTnQMreakSctMA/wdL6DxhDs82I67+9EP0edctTO3DCv/VjwIAejTUa67rH9O6tmteIcNC0Re2ylsDXPH0rZYHH+FQcNIYGBJQkXL3oDXe/AcT4YWGjCMQ1TNNPiOgyYiLwejgzCi5/mgddl75RNKf+j8m4tx9yJSi97ATkj4h6NBR5XjiMDXxZwwVCpKF89csyiP6zSqddweWcIStJ+VLZEy4PEgGjKeC+YGEqMQmr2GCYAZtsaRsixvDpZnvqy4vBbWz7h31DyTZIs7KhQPBdp1B2EklwCoUhPTId2nD3xfTTC+BEsCWylp7tpvvzDL31A5D3mxUMnxnNYhA3RW9W8Vru1LICx8ChdHFkjhkZHO3PFcAShL4SiGGMbltDUKqVVAks1m+sFheO1kX5yc5GLtA39CSel6ZWsgyIwDecufseaCRXDkW5KnoakQmA+UMHos8yNi8tsIFITWEerc5oOWnmpcZxuL2z5YnJ+ntOnFREvY6z77lzELTjkOFPmGOnqBoECiFatj2ZC1o0B8tnWtCIkXMYKDYYnK2PlhCRfwlTa7lub9qtVo+4SblW+lcyTPifPy7w7uhT2Wkv1sFwykB9LUfnpcqExS6AO8lkng3JOjBGa5w2l2uoBa+232aI0ukPI6XdyStCffI4FRp1QoDVzLj0EZtvJc+iPSeHgAeJYMsCQ=='}], provider_specific_fields=None))], usage=Usage(completion_tokens=282, prompt_tokens=196, total_tokens=478, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=261, rejected_prediction_tokens=None, text_tokens=21, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=196, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4279,7 +3904,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01E79vEXyzAZRNKZV8Npihpk',\n", + "{'tool_call_id': 'call_e5966420acdb4a2397f3c6082298__thought__CvsFAXLI2ny8YLvBawTd8D7gBw9VmF8EuI2U6Cjg78oPBdDYqQ6SJ9VeYHkwte8J9GJUOSj0XxmWCDSnIUCE4k1ywaBXjKvCBEULv91iDTIlWtLzfAxqoVce6byGsTL6JRCyeLWtLn8pjbvQ2dCfMEc+JbwZqmjgkA5kkRVXbMSS+clU8gJ4eZzDEM8ybvBIbbK0SpHlcKqwnpADIGlRiAH/3DXaZlEnIqSlaVZKFtQIbx4K0t3R1+BuOt2nIuss1i0Vwxa68ywc+UEOfl8eBmVcozr2VDTDb6ce5cW13yQ6gpwf3VDUMb5C7R5qFRgtUijGac4QncRTnQMreakSctMA/wdL6DxhDs82I67+9EP0edctTO3DCv/VjwIAejTUa67rH9O6tmteIcNC0Re2ylsDXPH0rZYHH+FQcNIYGBJQkXL3oDXe/AcT4YWGjCMQ1TNNPiOgyYiLwejgzCi5/mgddl75RNKf+j8m4tx9yJSi97ATkj4h6NBR5XjiMDXxZwwVCpKF89csyiP6zSqddweWcIStJ+VLZEy4PEgGjKeC+YGEqMQmr2GCYAZtsaRsixvDpZnvqy4vBbWz7h31DyTZIs7KhQPBdp1B2EklwCoUhPTId2nD3xfTTC+BEsCWylp7tpvvzDL31A5D3mxUMnxnNYhA3RW9W8Vru1LICx8ChdHFkjhkZHO3PFcAShL4SiGGMbltDUKqVVAks1m+sFheO1kX5yc5GLtA39CSel6ZWsgyIwDecufseaCRXDkW5KnoakQmA+UMHos8yNi8tsIFITWEerc5oOWnmpcZxuL2z5YnJ+ntOnFREvY6z77lzELTjkOFPmGOnqBoECiFatj2ZC1o0B8tnWtCIkXMYKDYYnK2PlhCRfwlTa7lub9qtVo+4SblW+lcyTPifPy7w7uhT2Wkv1sFwykB9LUfnpcqExS6AO8lkng3JOjBGa5w2l2uoBa+232aI0ukPI6XdyStCffI4FRp1QoDVzLj0EZtvJc+iPSeHgAeJYMsCQ==',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '15'}" @@ -4288,37 +3913,25 @@ "metadata": {}, "output_type": "display_data" }, - { - "data": { - "text/plain": [ - "{'tool_call_id': 'toolu_01SdiNubsdUZWUFho98S1EFN',\n", - " 'role': 'tool',\n", - " 'name': 'simple_add',\n", - " 'content': '3'}" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, { "data": { "text/markdown": [ - "**Step 2:** Now multiply 15 * 3\n", "\n", - "🔧 multiply({\"a\": 15, \"b\": 3})\n", + "\n", + "🔧 simple_add({\"b\": 1, \"a\": 2})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=82, prompt_tokens=1030, total_tokens=1112, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=98, prompt_tokens=232, total_tokens=330, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=78, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=232, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content='**Step 2:** Now multiply 15 * 3', role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 15, \"b\": 3}', 'name': 'multiply'}, 'id': 'toolu_01SmNbhdqa12MDdjC1XqAepY', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=82, prompt_tokens=1030, total_tokens=1112, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[{'index': 0, 'provider_specific_fields': {'thought_signature': 'CoQCAXLI2nyE+/Pk8/Z5JYl9tai+rE0NrCA+TEcxB3NU/pknbVuQttrpWaN+hjPEAbEHo1+q4wCLeIkIto5Al8x2UCrssbg0b2b03Wh0MOpIM2Y3Xf13adEbXQN8FvL5ORxt5oxWrnGq19kzf9Ln9/SRQK1JsfK4TB61yp+XI8wIWHIPpNCAhGf/+XYfxL7FmAsMMtt5F7FQAkXUgzLZwhPGeXZ1yMQCyAU1RfHvc9lBswHldffGPvdL/C2juLH0IZy85aBSyjXJFkawjWZ50qUnQP2HiYedyKXJoz1irGU45jUM5QphiLSxXW/1Jan/s+HLsuYuGhy4Wa/BwUhepxjTyu0oby4='}, 'function': {'arguments': '{\"b\": 1, \"a\": 2}', 'name': 'simple_add'}, 'id': 'call_dc56f25b34a64356a08b9404a1aa__thought__CoQCAXLI2nyE+/Pk8/Z5JYl9tai+rE0NrCA+TEcxB3NU/pknbVuQttrpWaN+hjPEAbEHo1+q4wCLeIkIto5Al8x2UCrssbg0b2b03Wh0MOpIM2Y3Xf13adEbXQN8FvL5ORxt5oxWrnGq19kzf9Ln9/SRQK1JsfK4TB61yp+XI8wIWHIPpNCAhGf/+XYfxL7FmAsMMtt5F7FQAkXUgzLZwhPGeXZ1yMQCyAU1RfHvc9lBswHldffGPvdL/C2juLH0IZy85aBSyjXJFkawjWZ50qUnQP2HiYedyKXJoz1irGU45jUM5QphiLSxXW/1Jan/s+HLsuYuGhy4Wa/BwUhepxjTyu0oby4=', 'type': 'function'}], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"simple_add\", \"args\": {\"b\": 1, \"a\": 2}}}', 'signature': 'CoQCAXLI2nyE+/Pk8/Z5JYl9tai+rE0NrCA+TEcxB3NU/pknbVuQttrpWaN+hjPEAbEHo1+q4wCLeIkIto5Al8x2UCrssbg0b2b03Wh0MOpIM2Y3Xf13adEbXQN8FvL5ORxt5oxWrnGq19kzf9Ln9/SRQK1JsfK4TB61yp+XI8wIWHIPpNCAhGf/+XYfxL7FmAsMMtt5F7FQAkXUgzLZwhPGeXZ1yMQCyAU1RfHvc9lBswHldffGPvdL/C2juLH0IZy85aBSyjXJFkawjWZ50qUnQP2HiYedyKXJoz1irGU45jUM5QphiLSxXW/1Jan/s+HLsuYuGhy4Wa/BwUhepxjTyu0oby4='}], provider_specific_fields=None))], usage=Usage(completion_tokens=98, prompt_tokens=232, total_tokens=330, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=78, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=232, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4327,10 +3940,10 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01SmNbhdqa12MDdjC1XqAepY',\n", + "{'tool_call_id': 'call_dc56f25b34a64356a08b9404a1aa__thought__CoQCAXLI2nyE+/Pk8/Z5JYl9tai+rE0NrCA+TEcxB3NU/pknbVuQttrpWaN+hjPEAbEHo1+q4wCLeIkIto5Al8x2UCrssbg0b2b03Wh0MOpIM2Y3Xf13adEbXQN8FvL5ORxt5oxWrnGq19kzf9Ln9/SRQK1JsfK4TB61yp+XI8wIWHIPpNCAhGf/+XYfxL7FmAsMMtt5F7FQAkXUgzLZwhPGeXZ1yMQCyAU1RfHvc9lBswHldffGPvdL/C2juLH0IZy85aBSyjXJFkawjWZ50qUnQP2HiYedyKXJoz1irGU45jUM5QphiLSxXW/1Jan/s+HLsuYuGhy4Wa/BwUhepxjTyu0oby4=',\n", " 'role': 'tool',\n", - " 'name': 'multiply',\n", - " 'content': '45'}" + " 'name': 'simple_add',\n", + " 'content': '3'}" ] }, "metadata": {}, @@ -4340,20 +3953,35 @@ "data": { "text/markdown": [ "\n", + "Of course, let's calculate the final answer step by step.\n", + "\n", + "**Step 1: Solve the first part in parentheses.**\n", + "* 10 + 5 = 15\n", "\n", - "**Step 3:** Finally, divide 45 / 3\n", + "**Step 2: Solve the second part in parentheses.**\n", + "* 2 + 1 = 3\n", + "\n", + "**Step 3: Perform the multiplication.**\n", + "* Now we take the result from Step 1 and multiply it by 3:\n", + "* 15 * 3 = 45\n", + "\n", + "**Step 4: Perform the division.**\n", + "* Finally, we take the result from Step 3 and divide it by the result from Step 2:\n", + "* 45 / 3 = 15\n", + "\n", + "**Final Answer:** The result of the calculation is **15**.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=24, prompt_tokens=1139, total_tokens=1163, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=303, prompt_tokens=280, total_tokens=583, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=139, rejected_prediction_tokens=None, text_tokens=164, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=280, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\n**Step 3:** Finally, divide 45 / 3', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=24, prompt_tokens=1139, total_tokens=1163, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"\\nOf course, let's calculate the final answer step by step.\\n\\n**Step 1: Solve the first part in parentheses.**\\n* 10 + 5 = 15\\n\\n**Step 2: Solve the second part in parentheses.**\\n* 2 + 1 = 3\\n\\n**Step 3: Perform the multiplication.**\\n* Now we take the result from Step 1 and multiply it by 3:\\n* 15 * 3 = 45\\n\\n**Step 4: Perform the division.**\\n* Finally, we take the result from Step 3 and divide it by the result from Step 2:\\n* 45 / 3 = 15\\n\\n**Final Answer:** The result of the calculation is **15**.\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=303, prompt_tokens=280, total_tokens=583, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=139, rejected_prediction_tokens=None, text_tokens=164, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=280, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4398,7 +4026,7 @@ "metadata": {}, "outputs": [], "source": [ - "pr = \"What is 1+2, and then the result of adding +2, and then +3 to it? Use tools to calculate!\"\n", + "pr = \"What is 1+2, and then the result of adding +2, and then +3 to it? Use tools to make the calculations!\"\n", "c = Chat(model, tools=[simple_add])" ] }, @@ -4411,21 +4039,27 @@ { "data": { "text/markdown": [ + "Based on my initial calculation, I found that:\n", "\n", + "* 1 + 2 = 3\n", "\n", - "Let me continue with the next calculation. Now I'll add 2 to the result (3+2):\n", + "However, I did not complete the full goal you requested. I was unable to perform the subsequent additions.\n", + "\n", + "To complete the problem, the following steps still need to be done:\n", + "1. Take the result of 3 and add 2 to it.\n", + "2. Take that new result and add 3 to it.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=33, prompt_tokens=777, total_tokens=810, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=659, prompt_tokens=169, total_tokens=828, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=570, rejected_prediction_tokens=None, text_tokens=89, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=169, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"\\n\\nLet me continue with the next calculation. Now I'll add 2 to the result (3+2):\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=33, prompt_tokens=777, total_tokens=810, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Based on my initial calculation, I found that:\\n\\n* 1 + 2 = 3\\n\\nHowever, I did not complete the full goal you requested. I was unable to perform the subsequent additions.\\n\\nTo complete the problem, the following steps still need to be done:\\n1. Take the result of 3 and add 2 to it.\\n2. Take that new result and add 3 to it.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=659, prompt_tokens=169, total_tokens=828, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=570, rejected_prediction_tokens=None, text_tokens=89, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=169, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -4445,7 +4079,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert c.hist[-2]['content'] == _final_prompt" + "assert c.hist[-2] == _final_prompt" ] }, { @@ -4526,6 +4160,7 @@ " tools=self.tool_schemas, reasoning_effort=effort.get(think), tool_choice=tool_choice,\n", " # temperature is not supported when reasoning\n", " temperature=None if think else ifnone(temp,self.temp), \n", + " caching=self.cache and 'claude' not in self.model,\n", " **kwargs)\n", " if stream:\n", " if prefill: yield _mk_prefill(prefill)\n", @@ -4593,19 +4228,19 @@ { "data": { "text/markdown": [ - "2+2 = 4\n", + "2 + 2 = 4\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=11, prompt_tokens=14, total_tokens=25, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=217, prompt_tokens=8, total_tokens=225, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=210, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=8, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='2+2 = 4', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=11, prompt_tokens=14, total_tokens=25, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='2 + 2 = 4', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=217, prompt_tokens=8, total_tokens=225, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=210, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=8, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -4650,20 +4285,20 @@ "text/markdown": [ "\n", "\n", - "🔧 async_add({\"a\": 5, \"b\": 7})\n", + "🔧 async_add({\"b\": 7, \"a\": 5})\n", "\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `tool_calls`\n", - "- usage: `Usage(completion_tokens=70, prompt_tokens=607, total_tokens=677, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=236, prompt_tokens=73, total_tokens=309, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=216, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=73, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=0, function=Function(arguments='{\"a\": 5, \"b\": 7}', name='async_add'), id='toolu_012aT8B41h8fFsVGp9yFncEy', type='function')], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=70, prompt_tokens=607, total_tokens=677, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=0, provider_specific_fields={'thought_signature': 'CsEGAXLI2nziimeXQJFA6Ji4jJ4nO1RDNAAKHUFLT55IihZXAWXS9ZGJT/nD3XibbDf0+MDbIhpJ+RZOLRZSvHPcdWNEscg8mINDnZu2jaWAbd3MLM63mQTLwaKIGQ0mM/JzqNwCqhPFtdldHEFz+zYXM23dr/9Epon0d3/fbFtA3cErty8xC/Y9j7sNf4ATlRYn6rm1Geb6sZajuSjkqwAsguOwdDm0r1jwHprKK1ucco1tte6w1OyB0nYZPm24847ob7haFd5hc9MTvonmBMzrywALc1h2xrOEedOO4wsd/DkeYWWkOuKBxDBwvUMbBCumZMTcA7VzxG2o2rbmprntAMPB4wO1+oWFvvQ4HehLJ03Vv19M8FOUhc2GR/ZuIvYgx4Oz+DzwXkqLyHFZkn9WwX8ZMqTGkEUTaPoyUWS2g+qCx8kRSF5NQ2zcUV/qvC45jaf34+R3WLEgk8GXdOAGsYpkjUijiio5l1PVzqsCmk8cakl4qrKHFUHSIX6LnicSxduU7K8Iqmdlpo0sqVSuPoLgh2I414nLcxGqznni5bm1sMun+UVxx4F9PVJOMq3xmD6TbGMK5Kxco9MYnkO9rSx6bipyjNElc+6h1YIahP9qumV9mbmVZdPVA5gjRbCyRycuhRPC5pqIgy07MfHXMyljz16PCzonWtYg7qeRFBxdgx/+rH3rjJZ+O/n/afwlWpKhQ78I0ozH8x/TpsTxM8qhTgOFa0KHrj3HLE9o67Ru/NMhgKH39Rz/lAculny8QsNT0NijLF1pLyiHvm9hN5Qz505sUSPnpJAoUG40YqGB0z0u0UlXoKv+VhgGWEAZmnmL4+i5b0dGEJjHpNDAFK0/SPSc4B+Q4P6Dad0fFneUJQsR8VvumUxQJR8ioRyIENa+qPQuLLCoA/uN8wanuicCJv2iNeLwvS1Y62rQNPvFokQBL2yiZMWmJFzvLk83AYTZS7t6Gi+r6qeR//rTC1fvqIdrX3nVLwxGRiFEvVMuvKh03qXOXJJ6mOd6AzVU1m0F3yV5QSEZjnT2K7vseS9Ci8b+U0QPxuYOKfWg1R5NYj0uGjHF6eqoeKHwiNiWBJTJzMbtBRhK/0l5LlzXYgA='}, function=Function(arguments='{\"b\": 7, \"a\": 5}', name='async_add'), id='call_8df6ca172d63473f943f6ef05516__thought__CsEGAXLI2nziimeXQJFA6Ji4jJ4nO1RDNAAKHUFLT55IihZXAWXS9ZGJT/nD3XibbDf0+MDbIhpJ+RZOLRZSvHPcdWNEscg8mINDnZu2jaWAbd3MLM63mQTLwaKIGQ0mM/JzqNwCqhPFtdldHEFz+zYXM23dr/9Epon0d3/fbFtA3cErty8xC/Y9j7sNf4ATlRYn6rm1Geb6sZajuSjkqwAsguOwdDm0r1jwHprKK1ucco1tte6w1OyB0nYZPm24847ob7haFd5hc9MTvonmBMzrywALc1h2xrOEedOO4wsd/DkeYWWkOuKBxDBwvUMbBCumZMTcA7VzxG2o2rbmprntAMPB4wO1+oWFvvQ4HehLJ03Vv19M8FOUhc2GR/ZuIvYgx4Oz+DzwXkqLyHFZkn9WwX8ZMqTGkEUTaPoyUWS2g+qCx8kRSF5NQ2zcUV/qvC45jaf34+R3WLEgk8GXdOAGsYpkjUijiio5l1PVzqsCmk8cakl4qrKHFUHSIX6LnicSxduU7K8Iqmdlpo0sqVSuPoLgh2I414nLcxGqznni5bm1sMun+UVxx4F9PVJOMq3xmD6TbGMK5Kxco9MYnkO9rSx6bipyjNElc+6h1YIahP9qumV9mbmVZdPVA5gjRbCyRycuhRPC5pqIgy07MfHXMyljz16PCzonWtYg7qeRFBxdgx/+rH3rjJZ+O/n/afwlWpKhQ78I0ozH8x/TpsTxM8qhTgOFa0KHrj3HLE9o67Ru/NMhgKH39Rz/lAculny8QsNT0NijLF1pLyiHvm9hN5Qz505sUSPnpJAoUG40YqGB0z0u0UlXoKv+VhgGWEAZmnmL4+i5b0dGEJjHpNDAFK0/SPSc4B+Q4P6Dad0fFneUJQsR8VvumUxQJR8ioRyIENa+qPQuLLCoA/uN8wanuicCJv2iNeLwvS1Y62rQNPvFokQBL2yiZMWmJFzvLk83AYTZS7t6Gi+r6qeR//rTC1fvqIdrX3nVLwxGRiFEvVMuvKh03qXOXJJ6mOd6AzVU1m0F3yV5QSEZjnT2K7vseS9Ci8b+U0QPxuYOKfWg1R5NYj0uGjHF6eqoeKHwiNiWBJTJzMbtBRhK/0l5LlzXYgA=', type='function')], function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"functionCall\": {\"name\": \"async_add\", \"args\": {\"b\": 7, \"a\": 5}}}', 'signature': 'CsEGAXLI2nziimeXQJFA6Ji4jJ4nO1RDNAAKHUFLT55IihZXAWXS9ZGJT/nD3XibbDf0+MDbIhpJ+RZOLRZSvHPcdWNEscg8mINDnZu2jaWAbd3MLM63mQTLwaKIGQ0mM/JzqNwCqhPFtdldHEFz+zYXM23dr/9Epon0d3/fbFtA3cErty8xC/Y9j7sNf4ATlRYn6rm1Geb6sZajuSjkqwAsguOwdDm0r1jwHprKK1ucco1tte6w1OyB0nYZPm24847ob7haFd5hc9MTvonmBMzrywALc1h2xrOEedOO4wsd/DkeYWWkOuKBxDBwvUMbBCumZMTcA7VzxG2o2rbmprntAMPB4wO1+oWFvvQ4HehLJ03Vv19M8FOUhc2GR/ZuIvYgx4Oz+DzwXkqLyHFZkn9WwX8ZMqTGkEUTaPoyUWS2g+qCx8kRSF5NQ2zcUV/qvC45jaf34+R3WLEgk8GXdOAGsYpkjUijiio5l1PVzqsCmk8cakl4qrKHFUHSIX6LnicSxduU7K8Iqmdlpo0sqVSuPoLgh2I414nLcxGqznni5bm1sMun+UVxx4F9PVJOMq3xmD6TbGMK5Kxco9MYnkO9rSx6bipyjNElc+6h1YIahP9qumV9mbmVZdPVA5gjRbCyRycuhRPC5pqIgy07MfHXMyljz16PCzonWtYg7qeRFBxdgx/+rH3rjJZ+O/n/afwlWpKhQ78I0ozH8x/TpsTxM8qhTgOFa0KHrj3HLE9o67Ru/NMhgKH39Rz/lAculny8QsNT0NijLF1pLyiHvm9hN5Qz505sUSPnpJAoUG40YqGB0z0u0UlXoKv+VhgGWEAZmnmL4+i5b0dGEJjHpNDAFK0/SPSc4B+Q4P6Dad0fFneUJQsR8VvumUxQJR8ioRyIENa+qPQuLLCoA/uN8wanuicCJv2iNeLwvS1Y62rQNPvFokQBL2yiZMWmJFzvLk83AYTZS7t6Gi+r6qeR//rTC1fvqIdrX3nVLwxGRiFEvVMuvKh03qXOXJJ6mOd6AzVU1m0F3yV5QSEZjnT2K7vseS9Ci8b+U0QPxuYOKfWg1R5NYj0uGjHF6eqoeKHwiNiWBJTJzMbtBRhK/0l5LlzXYgA='}], provider_specific_fields=None))], usage=Usage(completion_tokens=236, prompt_tokens=73, total_tokens=309, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=216, rejected_prediction_tokens=None, text_tokens=20, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=73, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4672,7 +4307,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_012aT8B41h8fFsVGp9yFncEy',\n", + "{'tool_call_id': 'call_8df6ca172d63473f943f6ef05516__thought__CsEGAXLI2nziimeXQJFA6Ji4jJ4nO1RDNAAKHUFLT55IihZXAWXS9ZGJT/nD3XibbDf0+MDbIhpJ+RZOLRZSvHPcdWNEscg8mINDnZu2jaWAbd3MLM63mQTLwaKIGQ0mM/JzqNwCqhPFtdldHEFz+zYXM23dr/9Epon0d3/fbFtA3cErty8xC/Y9j7sNf4ATlRYn6rm1Geb6sZajuSjkqwAsguOwdDm0r1jwHprKK1ucco1tte6w1OyB0nYZPm24847ob7haFd5hc9MTvonmBMzrywALc1h2xrOEedOO4wsd/DkeYWWkOuKBxDBwvUMbBCumZMTcA7VzxG2o2rbmprntAMPB4wO1+oWFvvQ4HehLJ03Vv19M8FOUhc2GR/ZuIvYgx4Oz+DzwXkqLyHFZkn9WwX8ZMqTGkEUTaPoyUWS2g+qCx8kRSF5NQ2zcUV/qvC45jaf34+R3WLEgk8GXdOAGsYpkjUijiio5l1PVzqsCmk8cakl4qrKHFUHSIX6LnicSxduU7K8Iqmdlpo0sqVSuPoLgh2I414nLcxGqznni5bm1sMun+UVxx4F9PVJOMq3xmD6TbGMK5Kxco9MYnkO9rSx6bipyjNElc+6h1YIahP9qumV9mbmVZdPVA5gjRbCyRycuhRPC5pqIgy07MfHXMyljz16PCzonWtYg7qeRFBxdgx/+rH3rjJZ+O/n/afwlWpKhQ78I0ozH8x/TpsTxM8qhTgOFa0KHrj3HLE9o67Ru/NMhgKH39Rz/lAculny8QsNT0NijLF1pLyiHvm9hN5Qz505sUSPnpJAoUG40YqGB0z0u0UlXoKv+VhgGWEAZmnmL4+i5b0dGEJjHpNDAFK0/SPSc4B+Q4P6Dad0fFneUJQsR8VvumUxQJR8ioRyIENa+qPQuLLCoA/uN8wanuicCJv2iNeLwvS1Y62rQNPvFokQBL2yiZMWmJFzvLk83AYTZS7t6Gi+r6qeR//rTC1fvqIdrX3nVLwxGRiFEvVMuvKh03qXOXJJ6mOd6AzVU1m0F3yV5QSEZjnT2K7vseS9Ci8b+U0QPxuYOKfWg1R5NYj0uGjHF6eqoeKHwiNiWBJTJzMbtBRhK/0l5LlzXYgA=',\n", " 'role': 'tool',\n", " 'name': 'async_add',\n", " 'content': '12'}" @@ -4684,21 +4319,23 @@ { "data": { "text/markdown": [ + "Based on the tool usage, I was able to complete the requested calculation.\n", "\n", + "**Finding:** The sum of 5 and 7 is 12.\n", "\n", - "The result of 5 + 7 is **12**.\n", + "The goal was successfully completed, and no further work is needed.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=18, prompt_tokens=731, total_tokens=749, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=624, prompt_tokens=148, total_tokens=772, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=578, rejected_prediction_tokens=None, text_tokens=46, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=148, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\nThe result of 5 + 7 is **12**.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=18, prompt_tokens=731, total_tokens=749, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Based on the tool usage, I was able to complete the requested calculation.\\n\\n**Finding:** The sum of 5 and 7 is 12.\\n\\nThe goal was successfully completed, and no further work is needed.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=624, prompt_tokens=148, total_tokens=772, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=578, rejected_prediction_tokens=None, text_tokens=46, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=148, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "metadata": {}, @@ -4721,7 +4358,7 @@ "data": { "text/plain": [ "[{'role': 'user', 'content': 'What is 2+2?'},\n", - " Message(content='2+2 = 4', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None})]" + " Message(content='2 + 2 = 4', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None)]" ] }, "execution_count": null, @@ -4761,10 +4398,15 @@ "text": [ "\n", "🔧 async_add\n", - "{'tool_call_id': 'toolu_01HNTgufGPMSa2UmbMYBA6RQ', 'role': 'tool', 'name': 'async_add', 'content': '12'}\n", + "{'tool_call_id': 'call_9baa19fcb6c840c38433a09781ec__thought__CiIBcsjafHuwU4cK6Kpus2x9D3E4lG1q4SHrHScUPituJtHYClsBcsjafCAY92mvr+WKuite3oiaiuarKxrYI03XN5hhTV8QXGhOS5MH/nvmUfDScFnQBs2ckrofs8I+7s9HhcMMdctW4itYOTunnVDqzKdhTtkfQPcyWs0TuXgaCowBAXLI2nzyTD5Pj7Z6rZ+CFlKdgzEb4SK5ERJhIV1sh09swtVg71QOPl4GzyHNZiQeWHEawqHUgNQwijzLD6qli+I5H/lpq4vEF1DPQKhUlInKPI11YDCiNkTmTDfE2ShrlORJ94YSegbYD1aCscWugkgof3ac2CbATn06x4Fw80QLSzlxV8/+26b4C4kKhgEBcsjafLrN16P5zwelYxLqLIb+/FCcK8cFbvGNzeNSgo4wSBRmMGAR6B1Jy77dzpazmebiOiE6iOwZNQBi3tm5QIfSY7kBHY9b0ofh56GvRT+FM/FHP6k4dy+ZSM26Urc43MhsX2U8mk+owqirwIq34qVssvjd7xy3VD71vYsiXd4OKSlHQgp8AXLI2nwAdib+UJhos55OM9qf29ALav9EsDuRD9vif9+BnxgBqvCz/mIdNlpRHqAEFTBcdWkBlhseou2ToQXHReQPgJg977puIj5h5khgaiBuRfwxgMhmI5KT8Dc20KdcdsHLZ1D+fdrPyG0L2ZwQz635MX/EXe5fAvqOtgp5AXLI2nz5jn+zGrAwt7POCRxIzyapHU7HUSi7esMjbl9tf1KZwJqOLiDSEn1d/1TmFkfkXN7sQU9c3qP31CXaNQ/GPUv5mgBQZhIYUfTKO8lICMwOAoOMfHZ9a8TPtlsTMRz+dco1ZUg1AIDg5DHyPF3w4UzsK/SWtQqJAQFyyNp8l1jexvzmwnzASjp7S2QTFuWf/KEaVpqevoy4vR8wn9hVrB5YtuydA/kKEq7mdS+Icc8Yr5ADr2d+W6IV6ZFj+TS4GiGUAfrD2SZYGXp858qElYbCCE27Ixcxzlnot3UaWKNu0Bnfpi5LFChmcHkrrrEsXyrO2rckYr6YSva1ZqOqDaTlCosBAXLI2nw26y1xMFVZguIZfNAyJROO5xY7QSA0tp0DThmbgjb+D2r9mPTdHC8/1z+bje8qt4EiL0EMfF1UL6R0GoOndCoD3ovtLVAcIPJN/ZsaDfMUzp06ivdvAg34bnCBgKqEpK3pmxStU+JereMlYbJvydsWOTRpXTRF2J3LRh/T7cCIUafAJVU0rgqWAQFyyNp87zPqN7CCA5TF4CJJt97AM2pzgmKC5d3l1fTu2eHFIheLTc4IjoiuQy4yzerfrrhUJn2c9vZsX5PtdxY1bmj6Yv3c8e17MciPBs+c/Z6yjwUYdqkgMpR3RK4aqirE5GNW6D7rHKuXeBo6tAnVS8sPE0rrJ2DSlA2jHoDvxqqdn56oRYmaqPZLwQc2RTp/KsvQagpFAXLI2nynO6b2A2yWL7jxVhvDjcc/EdfciKpd2mNi+ur13yqGUeMjpIej6ah05MWCBPMdSbVrxatl83/Mz8V7GZzxr+vb', 'role': 'tool', 'name': 'async_add', 'content': '12'}\n", + "Based on the previous turn, I was asked to calculate 5 + 7.\n", + "\n", + "**Findings:**\n", + "I used the `add` tool with the inputs `a=5` and `b=7`. The tool returned the result **12**.\n", "\n", + "My finding is that 5 + 7 = 12.\n", "\n", - "The result of 5 + 7 is **12**." + "The goal was successfully completed, and no further work is needed." ] } ], @@ -4906,20 +4548,18 @@ "#| export\n", "class AsyncStreamFormatter:\n", " def __init__(self, include_usage=False, mx=2000):\n", - " self.outp,self.tcs,self.include_usage,self.think,self.mx = '',{},include_usage,False,mx\n", + " self.outp,self.tcs,self.include_usage,self.mx = '',{},include_usage,mx\n", " \n", " def format_item(self, o):\n", " \"Format a single item from the response stream.\"\n", " res = ''\n", " if isinstance(o, ModelResponseStream):\n", " d = o.choices[0].delta\n", - " if nested_idx(d, 'reasoning_content'): \n", - " self.think = True\n", - " res += '🧠'\n", - " elif self.think:\n", - " self.think = False\n", - " res += '\\n\\n'\n", - " if c:=d.content: res+=c\n", + " if nested_idx(d, 'reasoning_content') and d['reasoning_content']!='{\"text\": \"\"}':\n", + " res+= '🧠' if not self.outp or self.outp[-1]=='🧠' else '\\n\\n🧠' # gemini can interleave reasoning\n", + " elif self.outp and self.outp[-1] == '🧠': res+= '\\n\\n'\n", + " if c:=d.content: # gemini has text content in last reasoning chunk\n", + " res+=f\"\\n\\n{c}\" if res and res[-1] == '🧠' else c\n", " elif isinstance(o, ModelResponse):\n", " if self.include_usage: res += f\"\\nUsage: {o.usage}\"\n", " if c:=getattr(contents(o),'tool_calls',None):\n", @@ -5098,18 +4738,20 @@ { "data": { "text/markdown": [ + "🧠\n", + "\n", "\n", "\n", "
\n", "\n", "```json\n", "{\n", - " \"id\": \"toolu_01HNTgufGPMSa2UmbMYBA6RQ\",\n", + " \"id\": \"call_62bb9e04270944bdb0cd4a9c82a9__thought__CiIBcsjafHuwU4cK6Kpus2x9D3E4lG1q4SHrHScUPituJtHYClsBcsjafCAY92mvr+WKuite3oiaiuarKxrYI03XN5hhTV8QXGhOS5MH/nvmUfDScFnQBs2ckrofs8I+7s9HhcMMdctW4itYOTunnVDqzKdhTtkfQPcyWs0TuXgaCowBAXLI2nzyTD5Pj7Z6rZ+CFlKdgzEb4SK5ERJhIV1sh09swtVg71QOPl4GzyHNZiQeWHEawqHUgNQwijzLD6qli+I5H/lpq4vEF1DPQKhUlInKPI11YDCiNkTmTDfE2ShrlORJ94YSegbYD1aCscWugkgof3ac2CbATn06x4Fw80QLSzlxV8/+26b4C4kKhgEBcsjafLrN16P5zwelYxLqLIb+/FCcK8cFbvGNzeNSgo4wSBRmMGAR6B1Jy77dzpazmebiOiE6iOwZNQBi3tm5QIfSY7kBHY9b0ofh56GvRT+FM/FHP6k4dy+ZSM26Urc43MhsX2U8mk+owqirwIq34qVssvjd7xy3VD71vYsiXd4OKSlHQgp8AXLI2nwAdib+UJhos55OM9qf29ALav9EsDuRD9vif9+BnxgBqvCz/mIdNlpRHqAEFTBcdWkBlhseou2ToQXHReQPgJg977puIj5h5khgaiBuRfwxgMhmI5KT8Dc20KdcdsHLZ1D+fdrPyG0L2ZwQz635MX/EXe5fAvqOtgp5AXLI2nz5jn+zGrAwt7POCRxIzyapHU7HUSi7esMjbl9tf1KZwJqOLiDSEn1d/1TmFkfkXN7sQU9c3qP31CXaNQ/GPUv5mgBQZhIYUfTKO8lICMwOAoOMfHZ9a8TPtlsTMRz+dco1ZUg1AIDg5DHyPF3w4UzsK/SWtQqJAQFyyNp8l1jexvzmwnzASjp7S2QTFuWf/KEaVpqevoy4vR8wn9hVrB5YtuydA/kKEq7mdS+Icc8Yr5ADr2d+W6IV6ZFj+TS4GiGUAfrD2SZYGXp858qElYbCCE27Ixcxzlnot3UaWKNu0Bnfpi5LFChmcHkrrrEsXyrO2rckYr6YSva1ZqOqDaTlCosBAXLI2nw26y1xMFVZguIZfNAyJROO5xY7QSA0tp0DThmbgjb+D2r9mPTdHC8/1z+bje8qt4EiL0EMfF1UL6R0GoOndCoD3ovtLVAcIPJN/ZsaDfMUzp06ivdvAg34bnCBgKqEpK3pmxStU+JereMlYbJvydsWOTRpXTRF2J3LRh/T7cCIUafAJVU0rgqWAQFyyNp87zPqN7CCA5TF4CJJt97AM2pzgmKC5d3l1fTu2eHFIheLTc4IjoiuQy4yzerfrrhUJn2c9vZsX5PtdxY1bmj6Yv3c8e17MciPBs+c/Z6yjwUYdqkgMpR3RK4aqirE5GNW6D7rHKuXeBo6tAnVS8sPE0rrJ2DSlA2jHoDvxqqdn56oRYmaqPZLwQc2RTp/KsvQagpFAXLI2nynO6b2A2yWL7jxVhvDjcc/EdfciKpd2mNi+ur13yqGUeMjpIej6ah05MWCBPMdSbVrxatl83/Mz8V7GZzxr+vb\",\n", " \"call\": {\n", " \"function\": \"async_add\",\n", " \"arguments\": {\n", - " \"a\": \"5\",\n", - " \"b\": \"7\"\n", + " \"b\": \"7\",\n", + " \"a\": \"5\"\n", " }\n", " },\n", " \"result\": \"12\"\n", @@ -5118,9 +4760,14 @@ "\n", "
\n", "\n", + "Based on the previous turn, I was asked to calculate 5 + 7.\n", + "\n", + "**Findings:**\n", + "I used the `add` tool with the inputs `a=5` and `b=7`. The tool returned the result **12**.\n", "\n", + "My finding is that 5 + 7 = 12.\n", "\n", - "The result of 5 + 7 is **12**." + "The goal was successfully completed, and no further work is needed." ], "text/plain": [ "" @@ -5153,21 +4800,16 @@ { "data": { "text/markdown": [ - "🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠🧠\n", + "🧠🧠🧠🧠\n", "\n", - "# Most Efficient: Use Built-in Sort\n", + "For a list of 1000 random integers, the most efficient way is to use the **built-in sorting function** provided by your programming language.\n", "\n", - "For 1000 random integers, **use your language's built-in sort function** (e.g., `sort()` in Python, `Arrays.sort()` in Java, `std::sort()` in C++).\n", + "* **Python:** `my_list.sort()` or `sorted(my_list)`\n", + "* **Java:** `Arrays.sort()`\n", + "* **JavaScript:** `my_array.sort((a, b) => a - b)`\n", + "* **C++:** `std::sort()`\n", "\n", - "**Why:**\n", - "- Highly optimized implementations (Timsort, Introsort)\n", - "- O(n log n) average time complexity\n", - "- Handles random data efficiently\n", - "- No need to reinvent the wheel\n", - "\n", - "**If implementing from scratch:** Quicksort is excellent for random data, averaging O(n log n).\n", - "\n", - "**Note:** For only 1000 elements, even simpler algorithms like merge sort or heap sort would work fine—the difference is negligible at this scale." + "These functions are highly optimized, often using a hybrid algorithm like **Introsort** (a mix of Quicksort, Heapsort, and Insertion Sort). For a small size like 1000, they are practically unbeatable in terms of both implementation speed and performance." ], "text/plain": [ "" @@ -5179,8 +4821,7 @@ ], "source": [ "chat = AsyncChat(model)\n", - "res = await chat(\"Briefly, what's the most efficient way to sort a list of 1000 random integers?\",\n", - " think='l',stream=True)\n", + "res = await chat(\"Briefly, what's the most efficient way to sort a list of 1000 random integers?\", think='l',stream=True)\n", "_ = await adisplay_stream(res)" ] }, @@ -5201,18 +4842,15 @@ { "data": { "text/markdown": [ - "I'll calculate ((10 + 5) * 3) / (2 + 1) step by step, using parallel calls where possible.\n", + "🧠\n", "\n", - "**Batch 1: Calculate the independent additions**\n", - "Let me start by calculating the two addition operations that don't depend on each other:\n", - "- 10 + 5 (for the numerator)\n", - "- 2 + 1 (for the denominator)\n", + "Of course, let's break this down. First, we'll evaluate the expressions in the parentheses, `(10 + 5)` and `(2 + 1)`, in parallel.\n", "\n", "
\n", "\n", "```json\n", "{\n", - " \"id\": \"toolu_01C7ovV4CVVSPKkhoJaQyhZn\",\n", + " \"id\": \"call_003892d8c1fb4d03a39ea8536866\",\n", " \"call\": {\n", " \"function\": \"simple_add\",\n", " \"arguments\": {\n", @@ -5232,7 +4870,7 @@ "\n", "```json\n", "{\n", - " \"id\": \"toolu_017RPubP59iK8LWrCF6XWgwE\",\n", + " \"id\": \"call_b623a221f90e4c54946b846d3772\",\n", " \"call\": {\n", " \"function\": \"simple_add\",\n", " \"arguments\": {\n", @@ -5246,24 +4884,18 @@ "\n", "
\n", "\n", - "**After Batch 1:** We have:\n", - "- 10 + 5 = 15\n", - "- 2 + 1 = 3\n", - "- So our expression is now: (15 * 3) / 3\n", - "\n", - "**Batch 2: Calculate the multiplication**\n", - "Now I need to multiply 15 * 3 before I can do the final division:\n", + "Now that we have the results for the expressions in the parentheses, we can proceed. We have simplified the expression to `15 * 3 / 3`. Next, we will perform the multiplication and division in order from left to right. So, we'll first multiply 15 by 3.\n", "\n", "
\n", "\n", "```json\n", "{\n", - " \"id\": \"toolu_0193kK1ryf5LsDJhGXqn9kpy\",\n", + " \"id\": \"call_808ff344c9e74712bc8e60e444b5\",\n", " \"call\": {\n", " \"function\": \"multiply\",\n", " \"arguments\": {\n", - " \"a\": \"15\",\n", - " \"b\": \"3\"\n", + " \"b\": \"3\",\n", + " \"a\": \"15\"\n", " }\n", " },\n", " \"result\": \"45\"\n", @@ -5273,12 +4905,28 @@ "
\n", "\n", "\n", + "We have now calculated `15 * 3` and have the result `45`. The expression is now `45 / 3`. The final step is to perform the division.\n", + "\n", + "
\n", + "\n", + "```json\n", + "{\n", + " \"id\": \"call_70b5bfad7caf487389f1ccef338f\",\n", + " \"call\": {\n", + " \"function\": \"divide\",\n", + " \"arguments\": {\n", + " \"b\": \"3\",\n", + " \"a\": \"45\"\n", + " }\n", + " },\n", + " \"result\": \"15.0\"\n", + "}\n", + "```\n", + "\n", + "
\n", "\n", - "**After Batch 2:** We have:\n", - "- 15 * 3 = 45\n", - "- So our expression is now: 45 / 3\n", "\n", - "**Batch 3: Calculate the final division**" + "We have now completed the final step of the calculation. The expression `45 / 3` evaluates to `15.0`. Therefore, the final answer is 15.0.\n" ], "text/plain": [ "" @@ -5306,7 +4954,7 @@ { "data": { "text/plain": [ - "Message(content=\"I'll calculate ((10 + 5) * 3) / (2 + 1) step by step, using parallel calls where possible.\\n\\n**Batch 1: Calculate the independent additions**\\nLet me start by calculating the two addition operations that don't depend on each other:\\n- 10 + 5 (for the numerator)\\n- 2 + 1 (for the denominator)\", role='assistant', tool_calls=[{'function': {'arguments': '{\"a\": 10, \"b\": 5}', 'name': 'simple_add'}, 'id': 'toolu_01C7ovV4CVVSPKkhoJaQyhZn', 'type': 'function'}, {'function': {'arguments': '{\"a\": 2, \"b\": 1}', 'name': 'simple_add'}, 'id': 'toolu_017RPubP59iK8LWrCF6XWgwE', 'type': 'function'}], function_call=None, provider_specific_fields=None)" + "Message(content=\"Of course, let's break this down. First, we'll evaluate the expressions in the parentheses, `(10 + 5)` and `(2 + 1)`, in parallel.\", role='assistant', tool_calls=[{'function': {'arguments': '{\"a\": 10, \"b\": 5}', 'name': 'simple_add'}, 'id': 'call_003892d8c1fb4d03a39ea8536866', 'type': 'function'}, {'function': {'arguments': '{\"a\": 2, \"b\": 1}', 'name': 'simple_add'}, 'id': 'call_b623a221f90e4c54946b846d3772', 'type': 'function'}], function_call=None, provider_specific_fields=None, reasoning_content='{\"text\": \"Of course, let\\'s break this down. First, we\\'ll evaluate the expressions in the parentheses, `(10\"}')" ] }, "execution_count": null, @@ -5327,7 +4975,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_01C7ovV4CVVSPKkhoJaQyhZn',\n", + "{'tool_call_id': 'call_003892d8c1fb4d03a39ea8536866',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '15'}" @@ -5351,7 +4999,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_017RPubP59iK8LWrCF6XWgwE',\n", + "{'tool_call_id': 'call_b623a221f90e4c54946b846d3772',\n", " 'role': 'tool',\n", " 'name': 'simple_add',\n", " 'content': '3'}" @@ -5375,7 +5023,7 @@ { "data": { "text/plain": [ - "Message(content='**After Batch 1:** We have:\\n- 10 + 5 = 15\\n- 2 + 1 = 3\\n- So our expression is now: (15 * 3) / 3\\n\\n**Batch 2: Calculate the multiplication**\\nNow I need to multiply 15 * 3 before I can do the final division:', role='assistant', tool_calls=[{'function': {'arguments': '{\"a\": 15, \"b\": 3}', 'name': 'multiply'}, 'id': 'toolu_0193kK1ryf5LsDJhGXqn9kpy', 'type': 'function'}], function_call=None, provider_specific_fields=None)" + "Message(content=\"Now that we have the results for the expressions in the parentheses, we can proceed. We have simplified the expression to `15 * 3 / 3`. Next, we will perform the multiplication and division in order from left to right. So, we'll first multiply 15 by 3.\", role='assistant', tool_calls=[{'function': {'arguments': '{\"b\": 3, \"a\": 15}', 'name': 'multiply'}, 'id': 'call_808ff344c9e74712bc8e60e444b5', 'type': 'function'}], function_call=None, provider_specific_fields=None)" ] }, "execution_count": null, @@ -5396,7 +5044,7 @@ { "data": { "text/plain": [ - "{'tool_call_id': 'toolu_0193kK1ryf5LsDJhGXqn9kpy',\n", + "{'tool_call_id': 'call_808ff344c9e74712bc8e60e444b5',\n", " 'role': 'tool',\n", " 'name': 'multiply',\n", " 'content': '45'}" @@ -5428,34 +5076,33 @@ { "data": { "text/markdown": [ - "We just calculated the mathematical expression **((10 + 5) * 3) / (2 + 1)** step by step!\n", + "We just solved the mathematical expression `(10 + 5) * (2 + 1) / 3` step-by-step using the available tools.\n", "\n", - "Here's what happened:\n", + "Here's a breakdown of the process:\n", "\n", - "1. **First**, I calculated the two additions in parentheses:\n", - " - 10 + 5 = 15\n", - " - 2 + 1 = 3\n", + "1. **Parentheses First:** We first calculated the expressions inside the parentheses:\n", + " * `10 + 5` was solved using `simple_add(a=10, b=5)`, which gave us `15`.\n", + " * `2 + 1` was solved using `simple_add(a=2, b=1)`, which gave us `3`.\n", "\n", - "2. **Then**, I multiplied the first result by 3:\n", - " - 15 * 3 = 45\n", + "2. **Multiplication:** Next, we multiplied the results from the first step:\n", + " * `15 * 3` was solved using `multiply(a=15, b=3)`, resulting in `45`.\n", "\n", - "3. **Finally**, I was about to divide 45 by 3 to get the final answer (which would be 15), but you asked me this question before I completed that last step!\n", + "3. **Division:** Finally, we performed the division:\n", + " * `45 / 3` was solved using `divide(a=45, b=3)`, which gave us the final answer.\n", "\n", - "So we were working through a math problem using the available calculator functions (add, multiply, divide) to evaluate the expression step by step, following the proper order of operations (parentheses first, then multiplication/division from left to right).\n", - "\n", - "Would you like me to finish the calculation and divide 45 by 3 to get the final answer?\n", + "The final result of the entire calculation was **15.0**.\n", "\n", "
\n", "\n", "- id: `chatcmpl-xxx`\n", - "- model: `claude-sonnet-4-5-20250929`\n", + "- model: `gemini-2.5-pro`\n", "- finish_reason: `stop`\n", - "- usage: `Usage(completion_tokens=228, prompt_tokens=1262, total_tokens=1490, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n", + "- usage: `Usage(completion_tokens=712, prompt_tokens=501, total_tokens=1213, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=469, rejected_prediction_tokens=None, text_tokens=243, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=501, image_tokens=None))`\n", "\n", "
" ], "text/plain": [ - "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"We just calculated the mathematical expression **((10 + 5) * 3) / (2 + 1)** step by step!\\n\\nHere's what happened:\\n\\n1. **First**, I calculated the two additions in parentheses:\\n - 10 + 5 = 15\\n - 2 + 1 = 3\\n\\n2. **Then**, I multiplied the first result by 3:\\n - 15 * 3 = 45\\n\\n3. **Finally**, I was about to divide 45 by 3 to get the final answer (which would be 15), but you asked me this question before I completed that last step!\\n\\nSo we were working through a math problem using the available calculator functions (add, multiply, divide) to evaluate the expression step by step, following the proper order of operations (parentheses first, then multiplication/division from left to right).\\n\\nWould you like me to finish the calculation and divide 45 by 3 to get the final answer?\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=228, prompt_tokens=1262, total_tokens=1490, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))" + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-pro', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"We just solved the mathematical expression `(10 + 5) * (2 + 1) / 3` step-by-step using the available tools.\\n\\nHere's a breakdown of the process:\\n\\n1. **Parentheses First:** We first calculated the expressions inside the parentheses:\\n * `10 + 5` was solved using `simple_add(a=10, b=5)`, which gave us `15`.\\n * `2 + 1` was solved using `simple_add(a=2, b=1)`, which gave us `3`.\\n\\n2. **Multiplication:** Next, we multiplied the results from the first step:\\n * `15 * 3` was solved using `multiply(a=15, b=3)`, resulting in `45`.\\n\\n3. **Division:** Finally, we performed the division:\\n * `45 / 3` was solved using `divide(a=45, b=3)`, which gave us the final answer.\\n\\nThe final result of the entire calculation was **15.0**.\", role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[{'type': 'thinking', 'thinking': '{\"text\": \"We just solved the mathematical expression `(10 + 5) * (2 + 1) / 3` step-by-step using the available tools.\\\\n\\\\nHere\\'s a breakdown of the process:\\\\n\\\\n1. **Parentheses First:** We first calculated the expressions inside the parentheses:\\\\n * `10 + 5` was solved using `simple_add(a=10, b=5)`, which gave us `15`.\\\\n * `2 + 1` was solved using `simple_add(a=2, b=1)`, which gave us `3`.\\\\n\\\\n2. **Multiplication:** Next, we multiplied the results from the first step:\\\\n * `15 * 3` was solved using `multiply(a=15, b=3)`, resulting in `45`.\\\\n\\\\n3. **Division:** Finally, we performed the division:\\\\n * `45 / 3` was solved using `divide(a=45, b=3)`, which gave us the final answer.\\\\n\\\\nThe final result of the entire calculation was **15.0**.\"}', 'signature': 'CpMNAXLI2nxhyWYOomgLZEeg1IWdj12zg1vWH+yImYTOlAsRp2iloi8QTebI/hLUiy1lHtY9EmXpHfoWmF+M7iezHjr5t2BWCFzNQrOdLOnOQ4A0hPMdqzMNmWwa7P96Hu4HFN3XwiFUXvU8zqFwMKyshb0uPMO4kqAaEOoG5oeGxA2sHdPx+1z2YQD1NT/vhi7di//Z5rsZRBm97M2z+tSn4LfU0b2vEiv/holF6TdhZe/WmUHppS+oHolG9tAbP80EXXE1eWcc2KbaorV2Lqmzp5HiB349jzRqoiOt1lhVjSfjCL3QlXK/j1cD/jUyMSwx4L8jadW5vm0FIRDhd4K7Xa2M1xI403xjVfJGmz/oUuiraKzXDI+/plL7I+kpO3lt0BGL9hpIXt2O/nzX7BvGzY5gm1B4P1AUDVjAJgtKiWnvjkLQTozPDvBa5yyk5BQ9yN1P/5Ud94n3VKdiUaNm4OO2ZHp1ud9tsE5HNiUb2UQDgdz0L09/Ap8zuLAtRx15TON/bvRrz0T1HB4X98mE7GeYm+eTOaujzNYUizZpAEX8cQ/vZyzFWTi8A/2FCAfDAKAGXYVfFaRTvrncUZo6A574RKYijDrlj//mEr0phKDPR6qTr3PBJigU7iS4dSLZnbSTgmdedLNqzsNOx4U81TXZCA6ZuDbLsqk58qGJKpJzrkpR1Dymrx8brsV/n96SxpOPvYPFi1wvZo3c+bbYJ+/i49XWs/ZWZI8btCYL1faK/nFlJDuTee2avx2qFjpf4uU/R4EQJpOwnqtholvxfGMH3X/30QWwhN7DXEtZE0S+UJhZ7HlKTFO/0B9gA6AiqwOM9KpBFzZvMDXvoZJaLTi8+TjT6rQ2LnhNbQemHMPYlElQX/tSyeNexFmMlMQ47qMOQF3sbvBs1EUX6qhELyZTs5OMrRaznvEp/ycx/FS36m22Z6PTyixuqUf3MRFOqAWKsktUYupHxqDDZt1HV9JkrVLt1C6YuGanCvBoJB/k8R9RC7tFruIagnId2tEERFBFihVIRnpiVCwoY8v9ObsTp32QQZQKZVgMaWMOkMvCVP5rz2zQegBKYjZ8gamNNuVXPlsqgdqGceROnyBZlU9GEAijFZiejwdg2BdsM/RLnWIVzNGMTfaGlt2Hn5uIkryV5OB++PaTiJZoCDxORzzSK5kw/VRw6aWEC0H6u6GqJ8bzF0jIdF6cAPn2g3K1ofIepNvEStD/N3DoByPwouNG/fjwatUxwgQJCrmCKL0mUn5ir9tnyF+aWrgMAVP5gILBVQwWokKZACwFMMz3Nd8r76DsUGi+s08bHVw58oe9qZ5TaMb3J0OHYh2+1TByVkV3fdkbWcNEkCJr5z9HD5cCkgDUHTFzvVDHs1MCGZGZvMYkxOo31L0OmLDGIiXw9SCVrI0z2Dusdx1BZyydoN5ivOvKNHNoK5cR2KxZxFhB6oph9XhVirn815OajMk1eoyQHVHCyU/ROPEYqrxJf0JsshihPp6Z095o3u+IyY+6kpYBBiX2GnCp//i4EEgjvzwHOMwsd4Q5u95TBMqu6p+qJZqNxCNMm/Mra11zb+ovw9W2Oyext+8v0Y69vQInKxMwH19yDzasagdN9dfs6GPbsDvzwiIsTGoDf5FnWWNYkqoXs/y4jJwvJqLIcyHpI5nyav0FqbJm7JHX7RRQ8TKC5ekD8x6WJdfogCtHnzLpKeBGDAdxlYpYhkLp0ngVY2/oWOQbbb3VxckfwAE87wc82hlsqHANQ4HEOpj6kFuHt+FMAqlwM5Fx/boeaQ8hU8042bQPPrV/GvmhBDb5tMzeWpT5G4T/GdUvJPxvZl9JnxoK/kDiHKWiYZOJAEXl5aAR7Zk99cd8FslTvmJajRaVR8Ae7kTWH7zIlc0AAKqx+vYh+dhNg/WfoBoSbMyzsnuzK6r8u5MIjS5VDJGBosqpuu7e0Rjn+zUXf/o1F35hqgLlJx1HuIG0gR/xUozksx5vkhN61sdohHUbrtOzx0o8g7WHe2IQtDBtPxOBkpk5Ww0NIGth2MVamFh5s8M0tkNiLhFknEd/E0GdhcQ4QJTg2Vr1PcnxD3v52YcPzLlLcHcklddRR3jGNeQkekhPeKLM9MLeWimSgHhBRzzet1sa3rVo+iQuxyeU7QGvCdv/IEaP2aXsoAVFlmG4A5oAjAad3A92HNopWLmODW9+pXNDtIexO9fqCd44FCAv6iCSu9tM2NzCbyx/7pyEsp15i55S'}], provider_specific_fields=None))], usage=Usage(completion_tokens=712, prompt_tokens=501, total_tokens=1213, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=469, rejected_prediction_tokens=None, text_tokens=243, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=501, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" ] }, "execution_count": null, @@ -5485,11 +5132,15 @@ { "data": { "text/markdown": [ - "Otters are [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") carnivorous mammals in the subfamily Lutrinae, part of the weasel family. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") There are 14 extant otter species, all semiaquatic, found on every continent except Australia and Antarctica.\n", + "### Weather in New York City: Chilly with a Chance of Snow Showers\n", + "\n", + "**New York, NY** - This morning in New York City is currently cloudy with a temperature of 38°F (3°C), though it feels more like 33°F (1°C). There is a very low chance of snow. The humidity is currently at 54%.\n", + "\n", + "Today's forecast shows a high of around 41°F (5°C) and a low of 21°F (-6°C). Skies will be mostly cloudy, gradually becoming sunny.\n", "\n", - "[*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") They have long, slim bodies with powerful webbed feet used to swim, and seal-like abilities for holding breath underwater. [*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") Otters have the densest fur of any animal—as many as a million hairs per square inch in places.\n", + "Looking ahead, Friday is expected to bring snow showers with temperatures ranging from 30°F to 31°F (-1°C). The weekend forecast shows partly sunny skies with a high of 41°F (5°C) on Saturday and 40°F (4°C) on Sunday.\n", "\n", - "[*](https://www.nationalgeographic.com/animals/mammals/facts/otters-1 \"Otters, facts and information | National Geographic\") All otters are expert hunters that eat fish, crustaceans, and other critters. They're known for being [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") playful animals, engaging in activities like sliding into water on natural slides. [*](https://en.wikipedia.org/wiki/Otter \"Otter - Wikipedia\") Otters live up to 16 years, and their young stay with their mothers for about a year." + "The extended forecast indicates a mix of sun and clouds with a potential for light rain early next week and a chance of a rain and snow mix by next Thursday. A heavy snowstorm is possible next Sunday." ], "text/plain": [ "" @@ -5501,7 +5152,7 @@ ], "source": [ "chat_stream_tools = AsyncChat(model, search='l')\n", - "res = await chat_stream_tools(\"Search the web and tell me very briefly about otters\", stream=True)\n", + "res = await chat_stream_tools(\"Search the weather in NYC\", stream=True)\n", "_=await adisplay_stream(res)" ] }, @@ -5513,114 +5164,285 @@ "### Caching" ] }, + { + "cell_type": "markdown", + "id": "2e071882", + "metadata": {}, + "source": [ + "#### Anthropic\n", + "\n", + "We use explicit caching via cache control checkpoints. Anthropic requires exact match with cached tokens and even a small change results in cache invalidation." + ] + }, { "cell_type": "code", "execution_count": null, - "id": "811571e1", + "id": "64ed32f8", "metadata": {}, "outputs": [], "source": [ - "a,b = random.randint(0,100), random.randint(0,100)\n", - "hist = [[f\"What is {a}+{b}?\\n\" * 200], f\"It's {a+b}\", ['hi'], \"Hello\"]" + "disable_cachy()" ] }, { "cell_type": "code", "execution_count": null, - "id": "38bc2dbb", + "id": "32340dd3", "metadata": {}, "outputs": [], "source": [ - "chat = AsyncChat(model, cache=True, hist=hist)\n", - "rs = await chat('hi again', stream=True, stream_options={\"include_usage\": True})" + "a,b = random.randint(0,100), random.randint(0,100)\n", + "hist = [[f\"What is {a}+{b}?\\n\" * 250], f\"It's {a+b}\", ['hi'], \"Hello\"]\n", + "msgs = mk_msgs(hist)" + ] + }, + { + "cell_type": "markdown", + "id": "cd0c6f1c", + "metadata": {}, + "source": [ + "In this first api call we will see cache creation until the last user msg:" ] }, { "cell_type": "code", "execution_count": null, - "id": "91f9390d", + "id": "d6160c8f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Usage(completion_tokens=13, prompt_tokens=1626, total_tokens=1639, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=1623), cache_creation_input_tokens=1623, cache_read_input_tokens=0)\n" + "Usage(completion_tokens=10, prompt_tokens=2516, total_tokens=2526, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2516, image_tokens=None))\n" ] } ], "source": [ + "sleep(5)\n", + "chat = AsyncChat(ms[2], cache=True, hist=hist)\n", + "rs = await chat('hi again', stream=True, stream_options={\"include_usage\": True})\n", "async for o in rs: \n", " if isinstance(o, ModelResponse): print(o.usage)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e0b68f6", + "metadata": {}, + "outputs": [], + "source": [ + "# flaky test\n", + "# cache_read_toks = o.usage.cache_creation_input_tokens\n", + "# test_eq(cache_read_toks > 1000, True)\n", + "# test_eq(o.usage.cache_read_input_tokens, 0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e4a63ef", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Usage(completion_tokens=38, prompt_tokens=2526, total_tokens=2564, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=31, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2526, image_tokens=None))\n" + ] + } + ], + "source": [ + "hist.extend([['hi again'], 'how may i help you?'])\n", + "chat = AsyncChat(ms[2], cache=True, hist=hist)\n", + "rs = await chat('bye!', stream=True, stream_options={\"include_usage\": True})\n", + "async for o in rs:\n", + " if isinstance(o, ModelResponse): print(o.usage)" + ] + }, { "cell_type": "markdown", - "id": "a3fac1d3", + "id": "528cf7b4", "metadata": {}, "source": [ - "In this first api call we will see cache creation until the last user msg:" + "The subsequent call should re-use the existing cache:" ] }, { "cell_type": "code", "execution_count": null, - "id": "f372cd99", + "id": "fa997bd5", "metadata": {}, "outputs": [], "source": [ - "cache_read_toks = o.usage.cache_creation_input_tokens\n", - "test_eq(cache_read_toks > 1000, True)\n", - "test_eq(o.usage.cache_read_input_tokens, 0)" + "# flaky test\n", + "# test_eq(o.usage.cache_read_input_tokens, cache_read_toks)" + ] + }, + { + "cell_type": "markdown", + "id": "118670c9", + "metadata": {}, + "source": [ + "#### Gemini\n", + "\n", + "Gemini implicit caching supports partial token matches. The usage metadata only shows cache hits with the `cached_tokens` field. So, to view them we need to run completions at least twice." + ] + }, + { + "cell_type": "markdown", + "id": "4cfdad46", + "metadata": {}, + "source": [ + "Testing with `gemini-2.5-flash` until `gemini-3-pro-preview` is more reliable" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6215649", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Usage(completion_tokens=800, prompt_tokens=2526, total_tokens=3326, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=788, rejected_prediction_tokens=None, text_tokens=12, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2526, image_tokens=None))\n" + ] + } + ], + "source": [ + "chat = AsyncChat(ms[1], cache=True, hist=hist)\n", + "rs = await chat('hi again', stream=True, stream_options={\"include_usage\": True})\n", + "async for o in rs: \n", + " if isinstance(o, ModelResponse): print(o.usage)" + ] + }, + { + "cell_type": "markdown", + "id": "6cd85cb9", + "metadata": {}, + "source": [ + "Running the same completion again:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "caa84445", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Usage(completion_tokens=846, prompt_tokens=2526, total_tokens=3372, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=834, rejected_prediction_tokens=None, text_tokens=12, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2526, image_tokens=None))\n" + ] + } + ], + "source": [ + "sleep(5) # it takes a while for cached tokens to be avail.\n", + "chat = AsyncChat(ms[1], cache=True, hist=hist)\n", + "rs = await chat('hi again', stream=True, stream_options={\"include_usage\": True})\n", + "async for o in rs: \n", + " if isinstance(o, ModelResponse): print(o.usage)" ] }, { "cell_type": "code", "execution_count": null, - "id": "fd079aa0", + "id": "cbff9056", "metadata": {}, "outputs": [], "source": [ - "hist.extend([['hi again'], 'how may i help you?'])\n", - "chat = AsyncChat(model, cache=True, hist=hist)\n", - "rs = await chat('bye!', stream=True, stream_options={\"include_usage\": True})" + "# flaky test\n", + "# test_eq(o.usage.prompt_tokens_details.cached_tokens > 1800, True)" ] }, { "cell_type": "code", "execution_count": null, - "id": "9a940a1d", + "id": "e78a1d56", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Usage(completion_tokens=17, prompt_tokens=1640, total_tokens=1657, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=1623, text_tokens=None, image_tokens=None, cache_creation_tokens=14), cache_creation_input_tokens=14, cache_read_input_tokens=1623)\n" + "Usage(completion_tokens=453, prompt_tokens=2536, total_tokens=2989, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=440, rejected_prediction_tokens=None, text_tokens=13, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=2536, image_tokens=None))\n" ] } ], "source": [ + "hist.extend([['hi again'], 'how may i help you?'])\n", + "chat = AsyncChat(ms[1], cache=True, hist=hist)\n", + "rs = await chat('bye!', stream=True, stream_options={\"include_usage\": True})\n", "async for o in rs:\n", " if isinstance(o, ModelResponse): print(o.usage)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "024f5e1f", + "metadata": {}, + "outputs": [], + "source": [ + "# flaky test\n", + "# test_eq(o.usage.prompt_tokens_details.cached_tokens > 2000, True)" + ] + }, { "cell_type": "markdown", - "id": "8c29a5ac", + "id": "0806d459", "metadata": {}, "source": [ - "The subsequent call should re-use the existing cache:" + "Let's modify the cached content and see that partial matching works:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ea4beac", + "metadata": {}, + "outputs": [], + "source": [ + "c = hist[0][0]\n", + "hist[0][0] = c[:int(len(c)*0.75)] + \" Some extra text\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b88eb842", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Usage(completion_tokens=444, prompt_tokens=1921, total_tokens=2365, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=438, rejected_prediction_tokens=None, text_tokens=6, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=1921, image_tokens=None))\n" + ] + } + ], + "source": [ + "hist.extend([['hi again'], 'how may i help you?'])\n", + "chat = AsyncChat(ms[1], cache=True, hist=hist)\n", + "rs = await chat('bye!', stream=True, stream_options={\"include_usage\": True})\n", + "async for o in rs:\n", + " if isinstance(o, ModelResponse): print(o.usage)" ] }, { "cell_type": "code", "execution_count": null, - "id": "d6ae13c5", + "id": "06f82575", "metadata": {}, "outputs": [], "source": [ - "test_eq(o.usage.cache_read_input_tokens, cache_read_toks)" + "# flaky test\n", + "# test_eq(o.usage.prompt_tokens_details.cached_tokens > 900, True)" ] }, { @@ -5651,13 +5473,7 @@ "source": [] } ], - "metadata": { - "kernelspec": { - "display_name": "python3", - "language": "python", - "name": "python3" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/nbs/01_usage.ipynb b/nbs/01_usage.ipynb index 2e7366f..13f9482 100644 --- a/nbs/01_usage.ipynb +++ b/nbs/01_usage.ipynb @@ -44,7 +44,7 @@ "source": [ "import litellm, importlib, httpx\n", "from lisette.core import Chat, AsyncChat, patch_litellm\n", - "from cachy import enable_cachy\n", + "from cachy import enable_cachy, disable_cachy\n", "from fastcore.test import *" ] }, @@ -72,11 +72,32 @@ "execution_count": null, "id": "c9acabfc", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "importlib.reload(litellm); # to re-run the notebook without kernel restart" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "f85329c0", + "metadata": {}, + "outputs": [], + "source": [ + "# litellm._turn_on_debug()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -124,9 +145,17 @@ " def _log_usage(self, response_obj, response_cost, start_time, end_time):\n", " usage = response_obj.usage\n", " ptd = usage.prompt_tokens_details\n", - " self.usage.insert(Usage(timestamp=time.time(), model=response_obj.model, user_id=self.user_id_fn(), prompt_tokens=usage.prompt_tokens, completion_tokens=usage.completion_tokens,\n", - " total_tokens=usage.total_tokens, cached_tokens=ptd.cached_tokens if ptd else 0, cache_creation_tokens=usage.cache_creation_input_tokens, \n", - " cache_read_tokens=usage.cache_read_input_tokens, web_search_requests=nested_idx(usage, 'server_tool_use', 'web_search_requests'), response_cost=response_cost))\n", + " self.usage.insert(Usage(timestamp=time.time(), \n", + " model=response_obj.model, \n", + " user_id=self.user_id_fn(), \n", + " prompt_tokens=usage.prompt_tokens, \n", + " completion_tokens=usage.completion_tokens,\n", + " total_tokens=usage.total_tokens, \n", + " cached_tokens=ptd.cached_tokens if ptd else 0, # used by gemini (read tokens)\n", + " cache_creation_tokens=nested_idx(usage, 'cache_creation_input_tokens'),\n", + " cache_read_tokens=nested_idx(usage, 'cache_read_input_tokens'), # used by anthropic \n", + " web_search_requests=nested_idx(usage, 'server_tool_use', 'web_search_requests'),\n", + " response_cost=response_cost))\n", " \n", " def user_id_fn(self): raise NotImplementedError('Please implement `LisetteUsageLogger.user_id_fn` before initializing, e.g using fastcore.patch.')" ] @@ -218,75 +247,9 @@ "execution_count": null, "id": "35cc0ba6", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "{ 'cache_creation_input_token_cost': 3.75e-06,\n", - " 'cache_creation_input_token_cost_above_200k_tokens': 7.5e-06,\n", - " 'cache_read_input_token_cost': 3e-07,\n", - " 'cache_read_input_token_cost_above_200k_tokens': 6e-07,\n", - " 'input_cost_per_token': 3e-06,\n", - " 'input_cost_per_token_above_200k_tokens': 6e-06,\n", - " 'litellm_provider': 'anthropic',\n", - " 'max_input_tokens': 200000,\n", - " 'max_output_tokens': 64000,\n", - " 'max_tokens': 64000,\n", - " 'mode': 'chat',\n", - " 'output_cost_per_token': 1.5e-05,\n", - " 'output_cost_per_token_above_200k_tokens': 2.25e-05,\n", - " 'search_context_cost_per_query': { 'search_context_size_high': 0.01,\n", - " 'search_context_size_low': 0.01,\n", - " 'search_context_size_medium': 0.01},\n", - " 'supports_assistant_prefill': True,\n", - " 'supports_computer_use': True,\n", - " 'supports_function_calling': True,\n", - " 'supports_pdf_input': True,\n", - " 'supports_prompt_caching': True,\n", - " 'supports_reasoning': True,\n", - " 'supports_response_schema': True,\n", - " 'supports_tool_choice': True,\n", - " 'supports_vision': True,\n", - " 'tool_use_system_prompt_tokens': 346}\n", - "```" - ], - "text/plain": [ - "{'cache_creation_input_token_cost': 3.75e-06,\n", - " 'cache_read_input_token_cost': 3e-07,\n", - " 'input_cost_per_token': 3e-06,\n", - " 'input_cost_per_token_above_200k_tokens': 6e-06,\n", - " 'output_cost_per_token_above_200k_tokens': 2.25e-05,\n", - " 'cache_creation_input_token_cost_above_200k_tokens': 7.5e-06,\n", - " 'cache_read_input_token_cost_above_200k_tokens': 6e-07,\n", - " 'litellm_provider': 'anthropic',\n", - " 'max_input_tokens': 200000,\n", - " 'max_output_tokens': 64000,\n", - " 'max_tokens': 64000,\n", - " 'mode': 'chat',\n", - " 'output_cost_per_token': 1.5e-05,\n", - " 'search_context_cost_per_query': {'search_context_size_high': 0.01,\n", - " 'search_context_size_low': 0.01,\n", - " 'search_context_size_medium': 0.01},\n", - " 'supports_assistant_prefill': True,\n", - " 'supports_computer_use': True,\n", - " 'supports_function_calling': True,\n", - " 'supports_pdf_input': True,\n", - " 'supports_prompt_caching': True,\n", - " 'supports_reasoning': True,\n", - " 'supports_response_schema': True,\n", - " 'supports_tool_choice': True,\n", - " 'supports_vision': True,\n", - " 'tool_use_system_prompt_tokens': 346}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "model_pricing['claude-sonnet-4-5']" + "# model_pricing['claude-sonnet-4-5']" ] }, { @@ -294,93 +257,9 @@ "execution_count": null, "id": "19ff68bd", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "{ 'cache_creation_input_token_cost_above_200k_tokens': 2.5e-07,\n", - " 'cache_read_input_token_cost': 2e-07,\n", - " 'cache_read_input_token_cost_above_200k_tokens': 4e-07,\n", - " 'input_cost_per_token': 2e-06,\n", - " 'input_cost_per_token_above_200k_tokens': 4e-06,\n", - " 'input_cost_per_token_batches': 1e-06,\n", - " 'litellm_provider': 'vertex_ai-language-models',\n", - " 'max_audio_length_hours': 8.4,\n", - " 'max_audio_per_prompt': 1,\n", - " 'max_images_per_prompt': 3000,\n", - " 'max_input_tokens': 1048576,\n", - " 'max_output_tokens': 65535,\n", - " 'max_pdf_size_mb': 30,\n", - " 'max_tokens': 65535,\n", - " 'max_video_length': 1,\n", - " 'max_videos_per_prompt': 10,\n", - " 'mode': 'chat',\n", - " 'output_cost_per_token': 1.2e-05,\n", - " 'output_cost_per_token_above_200k_tokens': 1.8e-05,\n", - " 'output_cost_per_token_batches': 6e-06,\n", - " 'source': 'https://cloud.google.com/vertex-ai/generative-ai/pricing',\n", - " 'supported_endpoints': ['/v1/chat/completions', '/v1/completions', '/v1/batch'],\n", - " 'supported_modalities': ['text', 'image', 'audio', 'video'],\n", - " 'supported_output_modalities': ['text'],\n", - " 'supports_audio_input': True,\n", - " 'supports_function_calling': True,\n", - " 'supports_pdf_input': True,\n", - " 'supports_prompt_caching': True,\n", - " 'supports_reasoning': True,\n", - " 'supports_response_schema': True,\n", - " 'supports_system_messages': True,\n", - " 'supports_tool_choice': True,\n", - " 'supports_video_input': True,\n", - " 'supports_vision': True,\n", - " 'supports_web_search': True}\n", - "```" - ], - "text/plain": [ - "{'cache_read_input_token_cost': 2e-07,\n", - " 'cache_read_input_token_cost_above_200k_tokens': 4e-07,\n", - " 'cache_creation_input_token_cost_above_200k_tokens': 2.5e-07,\n", - " 'input_cost_per_token': 2e-06,\n", - " 'input_cost_per_token_above_200k_tokens': 4e-06,\n", - " 'input_cost_per_token_batches': 1e-06,\n", - " 'litellm_provider': 'vertex_ai-language-models',\n", - " 'max_audio_length_hours': 8.4,\n", - " 'max_audio_per_prompt': 1,\n", - " 'max_images_per_prompt': 3000,\n", - " 'max_input_tokens': 1048576,\n", - " 'max_output_tokens': 65535,\n", - " 'max_pdf_size_mb': 30,\n", - " 'max_tokens': 65535,\n", - " 'max_video_length': 1,\n", - " 'max_videos_per_prompt': 10,\n", - " 'mode': 'chat',\n", - " 'output_cost_per_token': 1.2e-05,\n", - " 'output_cost_per_token_above_200k_tokens': 1.8e-05,\n", - " 'output_cost_per_token_batches': 6e-06,\n", - " 'source': 'https://cloud.google.com/vertex-ai/generative-ai/pricing',\n", - " 'supported_endpoints': (#3) ['/v1/chat/completions','/v1/completions','/v1/batch'],\n", - " 'supported_modalities': (#4) ['text','image','audio','video'],\n", - " 'supported_output_modalities': (#1) ['text'],\n", - " 'supports_audio_input': True,\n", - " 'supports_function_calling': True,\n", - " 'supports_pdf_input': True,\n", - " 'supports_prompt_caching': True,\n", - " 'supports_reasoning': True,\n", - " 'supports_response_schema': True,\n", - " 'supports_system_messages': True,\n", - " 'supports_tool_choice': True,\n", - " 'supports_video_input': True,\n", - " 'supports_vision': True,\n", - " 'supports_web_search': True}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "model_pricing['gemini-3-pro-preview']" + "# model_pricing['gemini-3-pro-preview']" ] }, { @@ -394,12 +273,24 @@ { "cell_type": "code", "execution_count": null, - "id": "47cf6ad5", + "id": "a045f396", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "dbfp = Path('.lisette/litellm-usage.db')\n", - "dbfp.parent.mkdir(exist_ok=True)" + "from tempfile import NamedTemporaryFile\n", + "tf =NamedTemporaryFile(suffix='.db')\n", + "tf" ] }, { @@ -411,7 +302,8 @@ "source": [ "@patch\n", "def user_id_fn(self:LisetteUsageLogger): return 'user-123'\n", - "logger = LisetteUsageLogger(dbfp)\n", + "tf=NamedTemporaryFile(suffix='.db')\n", + "logger = LisetteUsageLogger(tf.name)\n", "litellm.callbacks = [logger]" ] }, @@ -474,7 +366,7 @@ ], "source": [ "chat = Chat('claude-sonnet-4-5-20250929')\n", - "chat(\"What is 2+2?\")" + "r = chat(\"What is 2+2?\")" ] }, { @@ -499,6 +391,27 @@ "u = logger.usage(select=slc)[-1]; u" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "d88fe4d6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Usage(id=1, timestamp=1764862377.089592, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=14, completion_tokens=11, total_tokens=25, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=None, response_cost=0.000207)]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "logger.usage()" + ] + }, { "cell_type": "markdown", "id": "39087125", @@ -864,7 +777,7 @@ "id": "83ce4d9b", "metadata": {}, "source": [ - "Once this [PR](https://github.com/BerriAI/litellm/pull/16826) is merged `web_search_requests` will be included with `stream=True`, and the following test should pass:" + "Once we upgrade to `litellm>1.80.5` `web_search_requests` will be included with `stream=True`, and the following test should pass:" ] }, { @@ -927,6 +840,83 @@ "# test_close((L(logger.usage()).map(lambda o:o.total_cost(sc=0.01)).sum()), 0.086, 1e-3)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a00a720", + "metadata": {}, + "outputs": [], + "source": [ + "disable_cachy()" + ] + }, + { + "cell_type": "markdown", + "id": "b1e57bed", + "metadata": {}, + "source": [ + "A simple Gemini example (requires min tokens and running twice to see `cached_tokens`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ad22437", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "2 + 2 = 4\n", + "\n", + "
\n", + "\n", + "- id: `chatcmpl-xxx`\n", + "- model: `gemini-2.5-flash`\n", + "- finish_reason: `stop`\n", + "- usage: `Usage(completion_tokens=41, prompt_tokens=7010, total_tokens=7051, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=34, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=3058, text_tokens=3952, image_tokens=None))`\n", + "\n", + "
" + ], + "text/plain": [ + "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='2 + 2 = 4', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=41, prompt_tokens=7010, total_tokens=7051, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=34, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=3058, text_tokens=3952, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chat = Chat('gemini/gemini-2.5-flash')\n", + "chat(\"What is 2+2?\"* 500)\n", + "time.sleep(5)\n", + "chat(\"What is 2+2?\"* 500)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa0621b4", + "metadata": {}, + "outputs": [], + "source": [ + "time.sleep(0.3) # wait for callback db write\n", + "u = logger.usage(select=slc)[-1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6133e694", + "metadata": {}, + "outputs": [], + "source": [ + "# flaky test\n", + "# test_eq(len(logger.usage()), 8)\n", + "# test_eq(logger.usage()[-1].cached_tokens > 3000, True)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -934,7 +924,7 @@ "metadata": {}, "outputs": [], "source": [ - "dbfp.parent.delete()" + "tf.close()" ] }, {