|
| 1 | +import os |
| 2 | + |
| 3 | +import cohere |
| 4 | +from dotenv import load_dotenv |
| 5 | + |
| 6 | +from parea import Parea |
| 7 | +from parea.utils.universal_encoder import json_dumps |
| 8 | + |
| 9 | +load_dotenv() |
| 10 | + |
| 11 | +p = Parea(api_key=os.getenv("PAREA_API_KEY")) |
| 12 | +co = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) |
| 13 | +p.wrap_cohere_client(co) |
| 14 | + |
| 15 | + |
| 16 | +def web_search(query: str) -> list[dict]: |
| 17 | + # your code for performing a web search goes here |
| 18 | + return [{"url": "https://en.wikipedia.org/wiki/Ontario", "text": "The capital of Ontario is Toronto, ..."}] |
| 19 | + |
| 20 | + |
| 21 | +web_search_tool = { |
| 22 | + "name": "web_search", |
| 23 | + "description": "performs a web search with the specified query", |
| 24 | + "parameter_definitions": {"query": {"description": "the query to look up", "type": "str", "required": True}}, |
| 25 | +} |
| 26 | + |
| 27 | +message = "Who is the mayor of the capital of Ontario?" |
| 28 | +model = "command-r-plus" |
| 29 | + |
| 30 | +# STEP 2: Check what tools the model wants to use and how |
| 31 | + |
| 32 | +res = co.chat(model=model, message=message, force_single_step=False, tools=[web_search_tool]) |
| 33 | + |
| 34 | +# as long as the model sends back tool_calls, |
| 35 | +# keep invoking tools and sending the results back to the model |
| 36 | +while res.tool_calls: |
| 37 | + print(res.text) # This will be an observation and a plan with next steps |
| 38 | + tool_results = [] |
| 39 | + for call in res.tool_calls: |
| 40 | + # use the `web_search` tool with the search query the model sent back |
| 41 | + web_search_results = {"call": call, "outputs": web_search(call.parameters["query"])} |
| 42 | + tool_results.append(web_search_results) |
| 43 | + |
| 44 | + # call chat again with tool results |
| 45 | + res = co.chat(model="command-r-plus", chat_history=res.chat_history, message="", force_single_step=False, tools=[web_search_tool], tool_results=tool_results) |
| 46 | + |
| 47 | +print(res.text) # "The mayor of Toronto, the capital of Ontario is Olivia Chow" |
| 48 | + |
| 49 | + |
| 50 | +# tool descriptions that the model has access to |
| 51 | +tools = [ |
| 52 | + { |
| 53 | + "name": "query_daily_sales_report", |
| 54 | + "description": "Connects to a database to retrieve overall sales volumes and sales information for a given day.", |
| 55 | + "parameter_definitions": {"day": {"description": "Retrieves sales data for this day, formatted as YYYY-MM-DD.", "type": "str", "required": True}}, |
| 56 | + }, |
| 57 | + { |
| 58 | + "name": "query_product_catalog", |
| 59 | + "description": "Connects to a a product catalog with information about all the products being sold, including categories, prices, and stock levels.", |
| 60 | + "parameter_definitions": {"category": {"description": "Retrieves product information data for all products in this category.", "type": "str", "required": True}}, |
| 61 | + }, |
| 62 | +] |
| 63 | + |
| 64 | +# preamble containing instructions about the task and the desired style for the output. |
| 65 | +preamble = """ |
| 66 | +## Task & Context |
| 67 | +You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. |
| 68 | +
|
| 69 | +## Style Guide |
| 70 | +Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. |
| 71 | +""" |
| 72 | + |
| 73 | +# user request |
| 74 | +message = "Can you provide a sales summary for 29th September 2023, and also give me some details about the products in the 'Electronics' category, for example their prices and stock levels?" |
| 75 | + |
| 76 | +response = co.chat(message=message, force_single_step=True, tools=tools, preamble=preamble, model="command-r") |
| 77 | +print("The model recommends doing the following tool calls:") |
| 78 | +print("\n".join(str(tool_call) for tool_call in response.tool_calls)) |
| 79 | + |
| 80 | +tool_results = [] |
| 81 | +# Iterate over the tool calls generated by the model |
| 82 | +for tool_call in response.tool_calls: |
| 83 | + # here is where you would call the tool recommended by the model, using the parameters recommended by the model |
| 84 | + output = {"output": f"functions_map[{tool_call.name}]({tool_call.parameters})"} |
| 85 | + # store the output in a list |
| 86 | + outputs = [output] |
| 87 | + # store your tool results in this format |
| 88 | + tool_results.append({"call": tool_call, "outputs": outputs}) |
| 89 | + |
| 90 | + |
| 91 | +print("Tool results that will be fed back to the model in step 4:") |
| 92 | +print(json_dumps(tool_results, indent=4)) |
| 93 | + |
| 94 | +response = co.chat(message=message, tools=tools, tool_results=tool_results, preamble=preamble, model="command-r", temperature=0.3, force_single_step=True) |
| 95 | + |
| 96 | + |
| 97 | +print("Final answer:") |
| 98 | +print(response.text) |
0 commit comments