diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 412c1e8ed..b379dee63 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -87,3 +87,8 @@ jobs: run: | uv run pytest ./tests/test_utils.py if: ${{ success() || failure() }} + + - name: Function type hints utils tests + run: | + uv run pytest ./tests/test_function_type_hints_utils.py + if: ${{ success() || failure() }} diff --git a/README.md b/README.md index c0b14c77b..6da9d4d23 100644 --- a/README.md +++ b/README.md @@ -37,13 +37,13 @@ limitations under the License. ЁЯзСтАНЁЯТ╗ **First-class support for Code Agents**, i.e. agents that write their actions in code (as opposed to "agents being used to write code"). To make it secure, we support executing in sandboxed environments via [E2B](https://e2b.dev/). - On top of this [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) class, we still support the standard [`ToolCallingAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.ToolCallingAgent) that writes actions as JSON/text blobs. -ЁЯдЧ **Hub integrations**: you can share and load tools to/from the Hub, and more is to come! +ЁЯдЧ **Hub integrations**: you can share and load Gradio Spaces as tools to/from the Hub, and more is to come! ЁЯМР **Support for any LLM**: it supports models hosted on the Hub loaded in their `transformers` version or through our inference API, but also supports models from OpenAI, Anthropic and many others via our [LiteLLM](https://www.litellm.ai/) integration. Full documentation can be found [here](https://huggingface.co/docs/smolagents/index). -> [!NOTE] +> [!NOTE] > Check the our [launch blog post](https://huggingface.co/blog/smolagents) to learn more about `smolagents`! ## Quick demo @@ -118,7 +118,7 @@ And commit the changes. To run tests locally, run this command: ```bash -pytest . +make test ``` ## Citing smolagents @@ -127,8 +127,8 @@ If you use `smolagents` in your publication, please cite it by using the followi ```bibtex @Misc{smolagents, - title = {`smolagents`: The easiest way to build efficient agentic systems.}, - author = {Aymeric Roucher and Thomas Wolf and Leandro von Werra and Erik Kaunism├дki}, + title = {`smolagents`: a smol library to build great agentic systems.}, + author = {Aymeric Roucher and Albert Villanova del Moral and Thomas Wolf and Leandro von Werra and Erik Kaunism├дki}, howpublished = {\url{https://github.com/huggingface/smolagents}}, year = {2025} } diff --git a/docs/source/en/conceptual_guides/react.md b/docs/source/en/conceptual_guides/react.md index d85c9cad3..417fb8590 100644 --- a/docs/source/en/conceptual_guides/react.md +++ b/docs/source/en/conceptual_guides/react.md @@ -19,10 +19,33 @@ The ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629 The name is based on the concatenation of two words, "Reason" and "Act." Indeed, agents following this architecture will solve their task in as many steps as needed, each step consisting of a Reasoning step, then an Action step where it formulates tool calls that will bring it closer to solving the task at hand. -React process involves keeping a memory of past steps. +All agents in `smolagents` are based on singular `MultiStepAgent` class, which is an abstraction of ReAct framework. -> [!TIP] -> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about multi-step agents. +On a basic level, this class performs actions on a cycle of following steps, where existing variables and knowledge is incorporated into the agent logs like below: + +Initialization: the system prompt is stored in a `SystemPromptStep`, and the user query is logged into a `TaskStep` . + +While loop (ReAct loop): + +- Use `agent.write_inner_memory_from_logs()` to write the agent logs into a list of LLM-readable [chat messages](https://huggingface.co/docs/transformers/en/chat_templating). +- Send these messages to a `Model` object to get its completion. Parse the completion to get the action (a JSON blob for `ToolCallingAgent`, a code snippet for `CodeAgent`). +- Execute the action and logs result into memory (an `ActionStep`). +- At the end of each step, we run all callback functions defined in `agent.step_callbacks` . + +Optionally, when planning is activated, a plan can be periodically revised and stored in a `PlanningStep` . This includes feeding facts about the task at hand to the memory. + +For a `CodeAgent`, it looks like the figure below. + +
+ + +
Here is a video overview of how that works: @@ -39,9 +62,12 @@ Here is a video overview of how that works: ![Framework of a React Agent](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) -We implement two versions of ToolCallingAgent: -- [`ToolCallingAgent`] generates tool calls as a JSON in its output. -- [`CodeAgent`] is a new type of ToolCallingAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance. +We implement two versions of agents: +- [`CodeAgent`] is the preferred type of agent: it generates its tool calls as blobs of code. +- [`ToolCallingAgent`] generates tool calls as a JSON in its output, as is commonly done in agentic frameworks. We incorporate this option because it can be useful in some narrow cases where you can do fine with only one tool call per step: for instance, for web browsing, you need to wait after each action on the page to monitor how the page changes. + +> [!TIP] +> We also provide an option to run agents in one-shot: just pass `single_step=True` when launching the agent, like `agent.run(your_task, single_step=True)` > [!TIP] -> We also provide an option to run agents in one-shot: just pass `single_step=True` when launching the agent, like `agent.run(your_task, single_step=True)` \ No newline at end of file +> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about multi-step agents. \ No newline at end of file diff --git a/docs/source/en/guided_tour.md b/docs/source/en/guided_tour.md index dd6a8214f..aebb4e23e 100644 --- a/docs/source/en/guided_tour.md +++ b/docs/source/en/guided_tour.md @@ -27,24 +27,25 @@ To initialize a minimal agent, you need at least these two arguments: - [`TransformersModel`] takes a pre-initialized `transformers` pipeline to run inference on your local machine using `transformers`. - [`HfApiModel`] leverages a `huggingface_hub.InferenceClient` under the hood. - [`LiteLLMModel`] lets you call 100+ different models through [LiteLLM](https://docs.litellm.ai/)! + - [`AzureOpenAIServerModel`] allows you to use OpenAI models deployed in [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service). - `tools`, a list of `Tools` that the agent can use to solve the task. It can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`. -Once you have these two arguments, `tools` and `model`, you can create an agent and run it. You can use any LLM you'd like, either through [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), or [LiteLLM](https://www.litellm.ai/). +Once you have these two arguments, `tools` and `model`, you can create an agent and run it. You can use any LLM you'd like, either through [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), [LiteLLM](https://www.litellm.ai/), or [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Hugging Face API is free to use without a token, but then it will have a rate limitation. -To access gated models or rise your rate limits with a PRO account, you need to set the environment variable `HF_TOKEN` or pass `token` variable upon initialization of `HfApiModel`. +To access gated models or rise your rate limits with a PRO account, you need to set the environment variable `HF_TOKEN` or pass `token` variable upon initialization of `HfApiModel`. You can get your token from your [settings page](https://huggingface.co/settings/tokens) ```python from smolagents import CodeAgent, HfApiModel -model_id = "meta-llama/Llama-3.3-70B-Instruct" +model_id = "meta-llama/Llama-3.3-70B-Instruct" -model = HfApiModel(model_id=model_id, token="") +model = HfApiModel(model_id=model_id, token="") # You can choose to not pass any model_id to HfApiModel to use a default free model agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( @@ -55,6 +56,7 @@ agent.run( ```python +# !pip install smolagents[transformers] from smolagents import CodeAgent, TransformersModel model_id = "meta-llama/Llama-3.2-3B-Instruct" @@ -72,6 +74,7 @@ agent.run( To use `LiteLLMModel`, you need to set the environment variable `ANTHROPIC_API_KEY` or `OPENAI_API_KEY`, or pass `api_key` variable upon initialization. ```python +# !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # Could use 'gpt-4o' @@ -85,6 +88,7 @@ agent.run( ```python +# !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( @@ -100,6 +104,49 @@ agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` + + + +To connect to Azure OpenAI, you can either use `AzureOpenAIServerModel` directly, or use `LiteLLMModel` and configure it accordingly. + +To initialize an instance of `AzureOpenAIServerModel`, you need to pass your model deployment name and then either pass the `azure_endpoint`, `api_key`, and `api_version` arguments, or set the environment variables `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`. + +```python +# !pip install smolagents[openai] +from smolagents import CodeAgent, AzureOpenAIServerModel + +model = AzureOpenAIServerModel(model_id="gpt-4o-mini") +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + +Similarly, you can configure `LiteLLMModel` to connect to Azure OpenAI as follows: + +- pass your model deployment name as `model_id`, and make sure to prefix it with `azure/` +- make sure to set the environment variable `AZURE_API_VERSION` +- either pass the `api_base` and `api_key` arguments, or set the environment variables `AZURE_API_KEY`, and `AZURE_API_BASE` + +```python +import os +from smolagents import CodeAgent, LiteLLMModel + +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo-16k-deployment" # example of deployment name + +os.environ["AZURE_API_KEY"] = "" # api_key +os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com" +os.environ["AZURE_API_VERSION"] = "" # "2024-10-01-preview" + +model = LiteLLMModel(model_id="azure/" + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME) +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 170c0222b..90f5c7845 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -29,7 +29,7 @@ This library offers: ЁЯзСтАНЁЯТ╗ **First-class support for Code Agents**, i.e. agents that write their actions in code (as opposed to "agents being used to write code"), [read more here](tutorials/secure_code_execution). -ЁЯдЧ **Hub integrations**: you can share and load tools to/from the Hub, and more is to come! +ЁЯдЧ **Hub integrations**: you can share and load Gradio Spaces as tools to/from the Hub, and more is to come!
diff --git a/docs/source/en/reference/agents.md b/docs/source/en/reference/agents.md index 76b2ecb6b..77a0df176 100644 --- a/docs/source/en/reference/agents.md +++ b/docs/source/en/reference/agents.md @@ -35,7 +35,6 @@ We provide two types of agents, based on the main [`Agent`] class. Both require arguments `model` and list of tools `tools` at initialization. - ### Classes of agents [[autodoc]] MultiStepAgent @@ -44,7 +43,6 @@ Both require arguments `model` and list of tools `tools` at initialization. [[autodoc]] ToolCallingAgent - ### ManagedAgent [[autodoc]] ManagedAgent @@ -55,6 +53,9 @@ Both require arguments `model` and list of tools `tools` at initialization. ### GradioUI +> [!TIP] +> You must have `gradio` installed to use the UI. Please run `pip install smolagents[gradio]` if it's not the case. + [[autodoc]] GradioUI ## Models @@ -99,6 +100,9 @@ print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"])) >>> What a ``` +> [!TIP] +> You must have `transformers` and `torch` installed on your machine. Please run `pip install smolagents[transformers]` if it's not the case. + [[autodoc]] TransformersModel ### HfApiModel @@ -142,7 +146,7 @@ print(model(messages)) [[autodoc]] LiteLLMModel -### OpenAiServerModel +### OpenAIServerModel This class lets you call any OpenAIServer compatible model. Here's how you can set it (you can customise the `api_base` url to point to another server): @@ -154,4 +158,29 @@ model = OpenAIServerModel( api_base="https://api.openai.com/v1", api_key=os.environ["OPENAI_API_KEY"], ) -``` \ No newline at end of file +``` + +[[autodoc]] OpenAIServerModel + +### AzureOpenAIServerModel + +`AzureOpenAIServerModel` allows you to connect to any Azure OpenAI deployment. + +Below you can find an example of how to set it up, note that you can omit the `azure_endpoint`, `api_key`, and `api_version` arguments, provided you've set the corresponding environment variables -- `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`. + +Pay attention to the lack of an `AZURE_` prefix for `OPENAI_API_VERSION`, this is due to the way the underlying [openai](https://github.com/openai/openai-python) package is designed. + +```py +import os + +from smolagents import AzureOpenAIServerModel + +model = AzureOpenAIServerModel( + model_id = os.environ.get("AZURE_OPENAI_MODEL"), + azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"), + api_key=os.environ.get("AZURE_OPENAI_API_KEY"), + api_version=os.environ.get("OPENAI_API_VERSION") +) +``` + +[[autodoc]] AzureOpenAIServerModel \ No newline at end of file diff --git a/docs/source/en/tutorials/building_good_agents.md b/docs/source/en/tutorials/building_good_agents.md index 6cef92d15..bc5165605 100644 --- a/docs/source/en/tutorials/building_good_agents.md +++ b/docs/source/en/tutorials/building_good_agents.md @@ -273,7 +273,7 @@ image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) search_tool = DuckDuckGoSearchTool() agent = CodeAgent( - tools=[search_tool], + tools=[search_tool, image_generation_tool], model=HfApiModel("Qwen/Qwen2.5-72B-Instruct"), planning_interval=3 # This is where you activate planning! ) diff --git a/docs/source/en/tutorials/inspect_runs.md b/docs/source/en/tutorials/inspect_runs.md index 021cf7ba6..1fef9be55 100644 --- a/docs/source/en/tutorials/inspect_runs.md +++ b/docs/source/en/tutorials/inspect_runs.md @@ -34,7 +34,14 @@ We've adopted the [OpenTelemetry](https://opentelemetry.io/) standard for instru This means that you can just run some instrumentation code, then run your agents normally, and everything gets logged into your platform. -Here's how it goes: +Here's how it then looks like on the platform: + +
+ +
+ + +### Setting up telemetry with Arize AI Phoenix First install the required packages. Here we install [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) because that's a good solution to collect and inspect the logs, but there are other OpenTelemetry-compatible platforms that you could use for this collection & inspection part. ```shell @@ -97,7 +104,8 @@ manager_agent.run( "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" ) ``` -And you can then navigate to `http://0.0.0.0:6006/projects/` to inspect your run! +Voil├а! +You can then navigate to `http://0.0.0.0:6006/projects/` to inspect your run! diff --git a/docs/source/en/tutorials/secure_code_execution.md b/docs/source/en/tutorials/secure_code_execution.md index 60887f63a..daa8ee900 100644 --- a/docs/source/en/tutorials/secure_code_execution.md +++ b/docs/source/en/tutorials/secure_code_execution.md @@ -60,7 +60,7 @@ For maximum security, you can use our integration with E2B to run code in a sand For this, you will need to setup your E2B account and set your `E2B_API_KEY` in your environment variables. Head to [E2B's quickstart documentation](https://e2b.dev/docs/quickstart) for more information. -Then you can install it with `pip install e2b-code-interpreter python-dotenv`. +Then you can install it with `pip install "smolagents[e2b]"`. Now you're set! diff --git a/docs/source/hi/_config.py b/docs/source/hi/_config.py new file mode 100644 index 000000000..81f6de049 --- /dev/null +++ b/docs/source/hi/_config.py @@ -0,0 +1,14 @@ +# docstyle-ignore +INSTALL_CONTENT = """ +# Installation +! pip install smolagents +# To install from source instead of the last release, comment the command above and uncomment the following one. +# ! pip install git+https://github.com/huggingface/smolagents.git +""" + +notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] +black_avoid_patterns = { + "{processor_class}": "FakeProcessorClass", + "{model_class}": "FakeModelClass", + "{object_class}": "FakeObjectClass", +} diff --git a/docs/source/hi/_toctree.yml b/docs/source/hi/_toctree.yml new file mode 100644 index 000000000..653085af0 --- /dev/null +++ b/docs/source/hi/_toctree.yml @@ -0,0 +1,36 @@ +- title: Get started + sections: + - local: index + title: ЁЯдЧ Agents + - local: guided_tour + title: рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░ +- title: Tutorials + sections: + - local: tutorials/building_good_agents + title: тЬи рдЕрдЪреНрдЫреЗ Agents рдХрд╛ рдирд┐рд░реНрдорд╛рдг + - local: tutorials/inspect_runs + title: ЁЯУК OpenTelemetry рдХреЗ рд╕рд╛рде runs рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг + - local: tutorials/tools + title: ЁЯЫая╕П Tools - in-depth guide + - local: tutorials/secure_code_execution + title: ЁЯЫбя╕П E2B рдХреЗ рд╕рд╛рде рдЕрдкрдиреЗ рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдХреЛ рд╕реБрд░рдХреНрд╖рд┐рдд рдХрд░реЗрдВ +- title: Conceptual guides + sections: + - local: conceptual_guides/intro_agents + title: ЁЯдЦ Agentic рд╕рд┐рд╕реНрдЯрдо рдХрд╛ рдкрд░рд┐рдЪрдп + - local: conceptual_guides/react + title: ЁЯдФ рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯ рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддреЗ рд╣реИрдВ? +- title: Examples + sections: + - local: examples/text_to_sql + title: рд╕реЗрд▓реНрдл рдХрд░реЗрдХреНрдЯрд┐рдВрдЧ Text-to-SQL + - local: examples/rag + title: рдПрдЬреЗрдВрдЯрд┐рдХ RAG рдХреЗ рд╕рд╛рде рдЕрдкрдиреА рдЬреНрдЮрд╛рди рдЖрдзрд╛рд░рд┐рдд рдХреЛ рдорд╛рд╕реНрдЯрд░ рдХрд░реЗрдВ + - local: examples/multiagents + title: рдПрдХ рдмрд╣реБ-рдПрдЬреЗрдВрдЯ рдкреНрд░рдгрд╛рд▓реА рдХрд╛ рдЖрдпреЛрдЬрди рдХрд░реЗрдВ +- title: Reference + sections: + - local: reference/agents + title: рдПрдЬреЗрдВрдЯ рд╕реЗ рд╕рдВрдмрдВрдзрд┐рдд рдСрдмреНрдЬреЗрдХреНрдЯреНрд╕ + - local: reference/tools + title: рдЯреВрд▓реНрд╕ рд╕реЗ рд╕рдВрдмрдВрдзрд┐рдд рдСрдмреНрдЬреЗрдХреНрдЯреНрд╕ diff --git a/docs/source/hi/conceptual_guides/intro_agents.md b/docs/source/hi/conceptual_guides/intro_agents.md new file mode 100644 index 000000000..15b93798e --- /dev/null +++ b/docs/source/hi/conceptual_guides/intro_agents.md @@ -0,0 +1,115 @@ + +# Agents рдХрд╛ рдкрд░рд┐рдЪрдп + +## ЁЯдФ Agents рдХреНрдпрд╛ рд╣реИрдВ? + +AI рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рд╡рд╛рд▓реА рдХрд┐рд╕реА рднреА рдХреБрд╢рд▓ рдкреНрд░рдгрд╛рд▓реА рдХреЛ LLM рдХреЛ рд╡рд╛рд╕реНрддрд╡рд┐рдХ рджреБрдирд┐рдпрд╛ рддрдХ рдХрд┐рд╕реА рдкреНрд░рдХрд╛рд░ рдХреА рдкрд╣реБрдВрдЪ рдкреНрд░рджрд╛рди рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреА: рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП рдмрд╛рд╣рд░реА рдЬрд╛рдирдХрд╛рд░реА рдкреНрд░рд╛рдкреНрдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рдЦреЛрдЬ рдЯреВрд▓ рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХреА рд╕рдВрднрд╛рд╡рдирд╛, рдпрд╛ рдХрд┐рд╕реА рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХреБрдЫ рдкреНрд░реЛрдЧреНрд░рд╛рдо рдкрд░ рдХрд╛рд░реНрдп рдХрд░рдиреЗ рдХреАред рджреВрд╕рд░реЗ рд╢рдмреНрджреЛрдВ рдореЗрдВ, LLM рдореЗрдВ ***agency*** рд╣реЛрдиреА рдЪрд╛рд╣рд┐рдПред рдПрдЬреЗрдВрдЯрд┐рдХ рдкреНрд░реЛрдЧреНрд░рд╛рдо LLM рдХреЗ рд▓рд┐рдП рдмрд╛рд╣рд░реА рджреБрдирд┐рдпрд╛ рдХрд╛ рдкреНрд░рд╡реЗрд╢ рджреНрд╡рд╛рд░ рд╣реИрдВред + +> [!TIP] +> AI Agents рд╡реЗ **рдкреНрд░реЛрдЧреНрд░рд╛рдо рд╣реИрдВ рдЬрд╣рд╛рдВ LLM рдЖрдЙрдЯрдкреБрдЯ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдХреЛ рдирд┐рдпрдВрддреНрд░рд┐рдд рдХрд░рддреЗ рд╣реИрдВ**ред + +LLM рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рд╡рд╛рд▓реА рдХреЛрдИ рднреА рдкреНрд░рдгрд╛рд▓реА LLM рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдХреЛрдб рдореЗрдВ рдПрдХреАрдХреГрдд рдХрд░реЗрдЧреАред рдХреЛрдб рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдкрд░ LLM рдХреЗ рдЗрдирдкреБрдЯ рдХрд╛ рдкреНрд░рднрд╛рд╡ рд╕рд┐рд╕реНрдЯрдо рдореЗрдВ LLM рдХреА рдПрдЬреЗрдВрд╕реА рдХрд╛ рд╕реНрддрд░ рд╣реИред + +рдзреНрдпрд╛рди рджреЗрдВ рдХрд┐ рдЗрд╕ рдкрд░рд┐рднрд╛рд╖рд╛ рдХреЗ рд╕рд╛рде, "agent" рдПрдХ рдЕрд▓рдЧ, 0 рдпрд╛ 1 рдкрд░рд┐рднрд╛рд╖рд╛ рдирд╣реАрдВ рд╣реИ: рдЗрд╕рдХреЗ рдмрдЬрд╛рдп, "agency" рдПрдХ рдирд┐рд░рдВрддрд░ рд╕реНрдкреЗрдХреНрдЯреНрд░рдо рдкрд░ рд╡рд┐рдХрд╕рд┐рдд рд╣реЛрддреА рд╣реИ, рдЬреИрд╕реЗ-рдЬреИрд╕реЗ рдЖрдк рдЕрдкрдиреЗ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдкрд░ LLM рдХреЛ рдЕрдзрд┐рдХ рдпрд╛ рдХрдо рд╢рдХреНрддрд┐ рджреЗрддреЗ рд╣реИрдВред + +рдиреАрдЪреЗ рджреА рдЧрдИ рддрд╛рд▓рд┐рдХрд╛ рдореЗрдВ рджреЗрдЦреЗрдВ рдХрд┐ рдХреИрд╕реЗ рдПрдЬреЗрдВрд╕реА рд╡рд┐рднрд┐рдиреНрди рдкреНрд░рдгрд╛рд▓рд┐рдпреЛрдВ рдореЗрдВ рднрд┐рдиреНрди рд╣реЛ рд╕рдХрддреА рд╣реИ: + +| рдПрдЬреЗрдВрд╕реА рд╕реНрддрд░ | рд╡рд┐рд╡рд░рдг | рдЗрд╕реЗ рдХреНрдпрд╛ рдХрд╣рд╛ рдЬрд╛рддрд╛ рд╣реИ | рдЙрджрд╛рд╣рд░рдг рдкреИрдЯрд░реНрди | +|------------|---------|-------------------|----------------| +| тШЖтШЖтШЖ | LLM рдЖрдЙрдЯрдкреБрдЯ рдХрд╛ рдкреНрд░реЛрдЧреНрд░рд╛рдо рдкреНрд░рд╡рд╛рд╣ рдкрд░ рдХреЛрдИ рдкреНрд░рднрд╛рд╡ рдирд╣реАрдВ | рд╕рд░рд▓ рдкреНрд░реЛрд╕реЗрд╕рд░ | `process_llm_output(llm_response)` | +| тШЕтШЖтШЖ | LLM рдЖрдЙрдЯрдкреБрдЯ if/else рд╕реНрд╡рд┐рдЪ рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдХрд░рддрд╛ рд╣реИ | рд░рд╛рдЙрдЯрд░ | `if llm_decision(): path_a() else: path_b()` | +| тШЕтШЕтШЖ | LLM рдЖрдЙрдЯрдкреБрдЯ рдлрдВрдХреНрд╢рди рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдХрд░рддрд╛ рд╣реИ | рдЯреВрд▓ рдХреЙрд▓рд░ | `run_function(llm_chosen_tool, llm_chosen_args)` | +| тШЕтШЕтШЕ | LLM рдЖрдЙрдЯрдкреБрдЯ рдкреБрдирд░рд╛рд╡реГрддреНрддрд┐ рдФрд░ рдкреНрд░реЛрдЧреНрд░рд╛рдо рдХреА рдирд┐рд░рдВрддрд░рддрд╛ рдХреЛ рдирд┐рдпрдВрддреНрд░рд┐рдд рдХрд░рддрд╛ рд╣реИ | рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯ | `while llm_should_continue(): execute_next_step()` | +| тШЕтШЕтШЕ | рдПрдХ рдПрдЬреЗрдВрдЯрд┐рдХ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рджреВрд╕рд░реЗ рдПрдЬреЗрдВрдЯрд┐рдХ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдХреЛ рд╢реБрд░реВ рдХрд░ рд╕рдХрддрд╛ рд╣реИ | рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ | `if llm_trigger(): execute_agent()` | + +рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк agent рдХреА рдпрд╣ рдХреЛрдб рд╕рдВрд░рдЪрдирд╛ рд╣реИ: + +```python +memory = [user_defined_task] +while llm_should_continue(memory): # рдпрд╣ рд▓реВрдк рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рднрд╛рдЧ рд╣реИ + action = llm_get_next_action(memory) # рдпрд╣ рдЯреВрд▓-рдХреЙрд▓рд┐рдВрдЧ рднрд╛рдЧ рд╣реИ + observations = execute_action(action) + memory += [action, observations] +``` + +рдпрд╣ рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдПрдХ рд▓реВрдк рдореЗрдВ рдЪрд▓рддрд╛ рд╣реИ, рдкреНрд░рддреНрдпреЗрдХ рдЪрд░рдг рдореЗрдВ рдПрдХ рдирдИ рдХреНрд░рд┐рдпрд╛ рдХреЛ рд╢реБрд░реВ рдХрд░рддрд╛ рд╣реИ (рдХреНрд░рд┐рдпрд╛ рдореЗрдВ рдХреБрдЫ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд *tools* рдХреЛ рдХреЙрд▓ рдХрд░рдирд╛ рд╢рд╛рдорд┐рд▓ рд╣реЛ рд╕рдХрддрд╛ рд╣реИ рдЬреЛ рдХреЗрд╡рд▓ рдлрдВрдХреНрд╢рдВрд╕ рд╣реИрдВ), рдЬрдм рддрдХ рдХрд┐ рдЙрд╕рдХреЗ рдЕрд╡рд▓реЛрдХрди рд╕реЗ рдпрд╣ рд╕реНрдкрд╖реНрдЯ рди рд╣реЛ рдЬрд╛рдП рдХрд┐ рджрд┐рдП рдЧрдП рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рд╕рдВрддреЛрд╖рдЬрдирдХ рд╕реНрдерд┐рддрд┐ рдкреНрд░рд╛рдкреНрдд рдХрд░ рд▓реА рдЧрдИ рд╣реИред + +## тЬЕ Agents рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрдм рдХрд░реЗрдВ / тЫФ рдХрдм рдЙрдирд╕реЗ рдмрдЪреЗрдВ + +Agents рддрдм рдЙрдкрдпреЛрдЧреА рд╣реЛрддреЗ рд╣реИрдВ рдЬрдм рдЖрдкрдХреЛ рдХрд┐рд╕реА рдРрдк рдХреЗ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдХреЛ рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП LLM рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИред рд▓реЗрдХрд┐рди рд╡реЗ рдЕрдХреНрд╕рд░ рдЬрд░реВрд░рдд рд╕реЗ рдЬреНрдпрд╛рджрд╛ рд╣реЛрддреЗ рд╣реИрдВред рд╕рд╡рд╛рд▓ рдпрд╣ рд╣реИ рдХрд┐, рдХреНрдпрд╛ рдореБрдЭреЗ рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ рджрд┐рдП рдЧрдП рдХрд╛рд░реНрдп рдХреЛ рдХреБрд╢рд▓рддрд╛рдкреВрд░реНрд╡рдХ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдореЗрдВ рд▓рдЪреАрд▓реЗрдкрди рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ? +рдпрджрд┐ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдмрд╣реБрдд рдмрд╛рд░ рд╡рд┐рдлрд▓ рд╣реЛрддрд╛ рд╣реИ, рддреЛ рдЗрд╕рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рдЖрдкрдХреЛ рдЕрдзрд┐рдХ рд▓рдЪреАрд▓реЗрдкрди рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИред + +рдЖрдЗрдП рдПрдХ рдЙрджрд╛рд╣рд░рдг рд▓реЗрддреЗ рд╣реИрдВ: рдорд╛рди рд▓реАрдЬрд┐рдП рдЖрдк рдПрдХ рдРрдк рдмрдирд╛ рд░рд╣реЗ рд╣реИрдВ рдЬреЛ рдПрдХ рд╕рд░реНрдлрд┐рдВрдЧ рдЯреНрд░рд┐рдк рд╡реЗрдмрд╕рд╛рдЗрдЯ рдкрд░ рдЧреНрд░рд╛рд╣рдХ рдЕрдиреБрд░реЛрдзреЛрдВ рдХреЛ рд╕рдВрднрд╛рд▓рддрд╛ рд╣реИред + +рдЖрдк рдкрд╣рд▓реЗ рд╕реЗ рдЬрд╛рди рд╕рдХрддреЗ рд╣реИрдВ рдХрд┐ рдЕрдиреБрд░реЛрдз 2 рдореЗрдВ рд╕реЗ рдХрд┐рд╕реА рдПрдХ рд╢реНрд░реЗрдгреА рдореЗрдВ рдЖрдПрдВрдЧреЗ (рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рдХреА рдкрд╕рдВрдж рдХреЗ рдЖрдзрд╛рд░ рдкрд░), рдФрд░ рдЖрдкрдХреЗ рдкрд╛рд╕ рдЗрди 2 рдорд╛рдорд▓реЛрдВ рдореЗрдВ рд╕реЗ рдкреНрд░рддреНрдпреЗрдХ рдХреЗ рд▓рд┐рдП рдПрдХ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рд╣реИред + +1. рдЯреНрд░рд┐рдк рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдХреБрдЫ рдЬрд╛рдирдХрд╛рд░реА рдЪрд╛рд╣рд┐рдП? тЗТ рдЙрдиреНрд╣реЗрдВ рдЕрдкрдиреЗ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рдореЗрдВ рдЦреЛрдЬ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рд╕рд░реНрдЪ рдмрд╛рд░ рддрдХ рдкрд╣реБрдВрдЪ рджреЗрдВ +2. рд╕реЗрд▓реНрд╕ рдЯреАрдо рд╕реЗ рдмрд╛рдд рдХрд░рдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВ? тЗТ рдЙрдиреНрд╣реЗрдВ рдПрдХ рд╕рдВрдкрд░реНрдХ рдлреЙрд░реНрдо рдореЗрдВ рдЯрд╛рдЗрдк рдХрд░рдиреЗ рджреЗрдВред + +рдпрджрд┐ рд╡рд╣ рдирд┐рд░реНрдзрд╛рд░рдгрд╛рддреНрдордХ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рд╕рднреА рдкреНрд░рд╢реНрдиреЛрдВ рдХреЗ рд▓рд┐рдП рдлрд┐рдЯ рдмреИрдарддрд╛ рд╣реИ, рддреЛ рдмреЗрд╢рдХ рдмрд╕ рд╕рдм рдХреБрдЫ рдХреЛрдб рдХрд░реЗрдВ! рдпрд╣ рдЖрдкрдХреЛ рдПрдХ 100% рд╡рд┐рд╢реНрд╡рд╕рдиреАрдп рд╕рд┐рд╕реНрдЯрдо рджреЗрдЧрд╛ рдФрд░ рдПрд▓рдПрд▓рдПрдо рджреНрд╡рд╛рд░рд╛ рдЕрдирдкреЗрдХреНрд╖рд┐рдд рдХрд╛рд░реНрдпрдкреНрд░рд╡рд╛рд╣ рдореЗрдВ рд╣рд╕реНрддрдХреНрд╖реЗрдк рдХрд░рдиреЗ рд╕реЗ рддреНрд░реБрдЯрд┐рдпреЛрдВ рдХрд╛ рдХреЛрдИ рдЬреЛрдЦрд┐рдо рдирд╣реАрдВ рд╣реЛрдЧрд╛ред рд╕рд╛рдзрд╛рд░рдгрддрд╛ рдФрд░ рдордЬрдмреВрддреА рдХреЗ рд▓рд┐рдП, рд╕рд▓рд╛рд╣ рджреА рдЬрд╛рддреА рд╣реИ рдХрд┐ рдПрдЬреЗрдВрдЯрд┐рдХ рд╡реНрдпрд╡рд╣рд╛рд░ рдХрд╛ рдЙрдкрдпреЛрдЧ рди рдХрд┐рдпрд╛ рдЬрд╛рдПред + +рд▓реЗрдХрд┐рди рдХреНрдпрд╛ рд╣реЛрдЧрд╛ рдЕрдЧрд░ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдХреЛ рдкрд╣рд▓реЗ рд╕реЗ рдЗрддрдиреА рдЕрдЪреНрдЫреА рддрд░рд╣ рд╕реЗ рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдирд╣реАрдВ рдХрд┐рдпрд╛ рдЬрд╛ рд╕рдХрддрд╛? + +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдПрдХ рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рдкреВрдЫрдирд╛ рдЪрд╛рд╣рддрд╛ рд╣реИ: `"рдореИрдВ рд╕реЛрдорд╡рд╛рд░ рдХреЛ рдЖ рд╕рдХрддрд╛ рд╣реВрдВ, рд▓реЗрдХрд┐рди рдореИрдВ рдЕрдкрдирд╛ рдкрд╛рд╕рдкреЛрд░реНрдЯ рднреВрд▓ рдЧрдпрд╛ рдЬрд┐рд╕рд╕реЗ рдореБрдЭреЗ рдмреБрдзрд╡рд╛рд░ рддрдХ рджреЗрд░ рд╣реЛ рд╕рдХрддреА рд╣реИ, рдХреНрдпрд╛ рдЖрдк рдореБрдЭреЗ рдФрд░ рдореЗрд░реА рдЪреАрдЬреЛрдВ рдХреЛ рдордВрдЧрд▓рд╡рд╛рд░ рд╕реБрдмрд╣ рд╕рд░реНрдл рдХрд░рдиреЗ рд▓реЗ рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВ, рдХреНрдпрд╛ рдореБрдЭреЗ рдХреИрдВрд╕рд▓реЗрд╢рди рдЗрдВрд╢реНрдпреЛрд░реЗрдВрд╕ рдорд┐рд▓ рд╕рдХрддрд╛ рд╣реИ?"` рдпрд╣ рдкреНрд░рд╢реНрди рдХрдИ рдХрд╛рд░рдХреЛрдВ рдкрд░ рдирд┐рд░реНрднрд░ рдХрд░рддрд╛ рд╣реИ, рдФрд░ рд╢рд╛рдпрдж рдКрдкрд░ рджрд┐рдП рдЧрдП рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдорд╛рдирджрдВрдбреЛрдВ рдореЗрдВ рд╕реЗ рдХреЛрдИ рднреА рдЗрд╕ рдЕрдиреБрд░реЛрдз рдХреЗ рд▓рд┐рдП рдкрд░реНрдпрд╛рдкреНрдд рдирд╣реАрдВ рд╣реЛрдЧрд╛ред + +рдпрджрд┐ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдмрд╣реБрдд рдмрд╛рд░ рд╡рд┐рдлрд▓ рд╣реЛрддрд╛ рд╣реИ, рддреЛ рдЗрд╕рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рдЖрдкрдХреЛ рдЕрдзрд┐рдХ рд▓рдЪреАрд▓реЗрдкрди рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИред + +рдпрд╣реАрдВ рдкрд░ рдПрдХ рдПрдЬреЗрдВрдЯрд┐рдХ рд╕реЗрдЯрдЕрдк рдорджрдж рдХрд░рддрд╛ рд╣реИред + +рдКрдкрд░ рджрд┐рдП рдЧрдП рдЙрджрд╛рд╣рд░рдг рдореЗрдВ, рдЖрдк рдмрд╕ рдПрдХ рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк agent рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬрд┐рд╕рдХреЗ рдкрд╛рд╕ рдореМрд╕рдо рдкреВрд░реНрд╡рд╛рдиреБрдорд╛рди рдХреЗ рд▓рд┐рдП рдПрдХ рдореМрд╕рдо API, рдпрд╛рддреНрд░рд╛ рдХреА рджреВрд░реА рдЬрд╛рдирдиреЗ рдХреЗ рд▓рд┐рдП рдХреЗ рд▓рд┐рдП Google Maps API, рдПрдХ рдХрд░реНрдордЪрд╛рд░реА рдЙрдкрд▓рдмреНрдзрддрд╛ рдбреИрд╢рдмреЛрд░реНрдб рдФрд░ рдЖрдкрдХреЗ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рдкрд░ рдПрдХ RAG рд╕рд┐рд╕реНрдЯрдо рддрдХ рдкрд╣реБрдВрдЪ рд╣реИред + +рд╣рд╛рд▓ рд╣реА рддрдХ, рдХрдВрдкреНрдпреВрдЯрд░ рдкреНрд░реЛрдЧреНрд░рд╛рдо рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рддрдХ рд╕реАрдорд┐рдд рдереЗ, if/else рд╕реНрд╡рд┐рдЪ рдХрд╛ +рдвреЗрд░ рд▓рдЧрд╛рдХрд╛рд░ рдЬрдЯрд┐рд▓рддрд╛ рдХреЛ рд╕рдВрднрд╛рд▓рдиреЗ рдХрд╛ рдкреНрд░рдпрд╛рд╕ рдХрд░ рд░рд╣реЗ рдереЗред рд╡реЗ рдмреЗрд╣рдж рд╕рдВрдХреАрд░реНрдг рдХрд╛рд░реНрдпреЛрдВ рдкрд░ рдХреЗрдВрджреНрд░рд┐рдд рдереЗ, рдЬреИрд╕реЗ "рдЗрди рд╕рдВрдЦреНрдпрд╛рдУрдВ рдХрд╛ рдпреЛрдЧ рдирд┐рдХрд╛рд▓реЗрдВ" рдпрд╛ "рдЗрд╕ рдЧреНрд░рд╛рдлрд╝ рдореЗрдВ рд╕рдмрд╕реЗ рдЫреЛрдЯрд╛ рд░рд╛рд╕реНрддрд╛ рдЦреЛрдЬреЗрдВ"ред рд▓реЗрдХрд┐рди рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ, рдЕрдзрд┐рдХрд╛рдВрд╢ рд╡рд╛рд╕реНрддрд╡рд┐рдХ рдЬреАрд╡рди рдХреЗ рдХрд╛рд░реНрдп, рдЬреИрд╕реЗ рдКрдкрд░ рджрд┐рдпрд╛ рдЧрдпрд╛ рд╣рдорд╛рд░рд╛ рдпрд╛рддреНрд░рд╛ рдЙрджрд╛рд╣рд░рдг, рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдореЗрдВ рдлрд┐рдЯ рдирд╣реАрдВ рд╣реЛрддреЗ рд╣реИрдВред рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЛрдЧреНрд░рд╛рдо рдХреЗ рд▓рд┐рдП рд╡рд╛рд╕реНрддрд╡рд┐рдХ рджреБрдирд┐рдпрд╛ рдХреЗ рдХрд╛рд░реНрдпреЛрдВ рдХреА рд╡рд┐рд╢рд╛рд▓ рджреБрдирд┐рдпрд╛ рдЦреЛрд▓рддреЗ рд╣реИрдВ! + +## рдХреНрдпреЛрдВ `smolagents`? + +рдХреБрдЫ рд▓реЛ-рд▓реЗрд╡рд▓ рдПрдЬреЗрдВрдЯрд┐рдХ рдЙрдкрдпреЛрдЧ рдХреЗ рдорд╛рдорд▓реЛрдВ рдХреЗ рд▓рд┐рдП, рдЬреИрд╕реЗ рдЪреЗрди рдпрд╛ рд░рд╛рдЙрдЯрд░, рдЖрдк рд╕рднреА рдХреЛрдб рдЦреБрдж рд▓рд┐рдЦ рд╕рдХрддреЗ рд╣реИрдВред рдЖрдк рдЗрд╕ рддрд░рд╣ рд╕реЗ рдмрд╣реБрдд рдмреЗрд╣рддрд░ рд╣реЛрдВрдЧреЗ, рдХреНрдпреЛрдВрдХрд┐ рдпрд╣ рдЖрдкрдХреЛ рдЕрдкрдиреЗ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рдмреЗрд╣рддрд░ рдврдВрдЧ рд╕реЗ рдирд┐рдпрдВрддреНрд░рд┐рдд рдФрд░ рд╕рдордЭрдиреЗ рдХреА рдЕрдиреБрдорддрд┐ рджреЗрдЧрд╛ред + +рд▓реЗрдХрд┐рди рдЬреИрд╕реЗ рд╣реА рдЖрдк рдЕрдзрд┐рдХ рдЬрдЯрд┐рд▓ рд╡реНрдпрд╡рд╣рд╛рд░реЛрдВ рдХреА рдУрд░ рдмрдврд╝рддреЗ рд╣реИрдВ рдЬреИрд╕реЗ рдХрд┐ LLM рдХреЛ рдПрдХ рдлрд╝рдВрдХреНрд╢рди рдХреЙрд▓ рдХрд░рдиреЗ рджреЗрдирд╛ (рдпрд╣ "tool calling" рд╣реИ) рдпрд╛ LLM рдХреЛ рдПрдХ while рд▓реВрдк рдЪрд▓рд╛рдиреЗ рджреЗрдирд╛ ("multi-step agent"), рдХреБрдЫ рдПрдмреНрд╕рдЯреНрд░реИрдХреНрд╢рдиреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ: +- рдЯреВрд▓ рдХреЙрд▓рд┐рдВрдЧ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдПрдЬреЗрдВрдЯ рдХреЗ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдкрд╛рд░реНрд╕ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ, рдЗрд╕рд▓рд┐рдП рдЗрд╕ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдПрдХ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдкреНрд░рд╛рд░реВрдк рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ рдЬреИрд╕реЗ "рд╡рд┐рдЪрд╛рд░: рдореБрдЭреЗ 'get_weather' рдЯреВрд▓ рдХреЙрд▓ рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдПред рдХреНрд░рд┐рдпрд╛: get_weather(Paris)ред", рдЬрд┐рд╕реЗ рдЖрдк рдПрдХ рдкреВрд░реНрд╡-рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдлрд╝рдВрдХреНрд╢рди рдХреЗ рд╕рд╛рде рдкрд╛рд░реНрд╕ рдХрд░рддреЗ рд╣реИрдВ, рдФрд░ LLM рдХреЛ рджрд┐рдП рдЧрдП рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдЗрд╕ рдкреНрд░рд╛рд░реВрдк рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рд╕реВрдЪрд┐рдд рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдПред +- рдПрдХ рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯ рдХреЗ рд▓рд┐рдП рдЬрд╣рд╛рдВ LLM рдЖрдЙрдЯрдкреБрдЯ рд▓реВрдк рдХреЛ рдирд┐рд░реНрдзрд╛рд░рд┐рдд рдХрд░рддрд╛ рд╣реИ, рдЖрдкрдХреЛ рдкрд┐рдЫрд▓реЗ рд▓реВрдк рдЗрдЯрд░реЗрд╢рди рдореЗрдВ рдХреНрдпрд╛ рд╣реБрдЖ рдЗрд╕рдХреЗ рдЖрдзрд╛рд░ рдкрд░ LLM рдХреЛ рдПрдХ рдЕрд▓рдЧ рдкреНрд░реЙрдореНрдкреНрдЯ рджреЗрдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ: рдЗрд╕рд▓рд┐рдП рдЖрдкрдХреЛ рдХрд┐рд╕реА рдкреНрд░рдХрд╛рд░ рдХреА рдореЗрдореЛрд░реА рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИред + +рдЗрди рджреЛ рдЙрджрд╛рд╣рд░рдгреЛрдВ рдХреЗ рд╕рд╛рде, рд╣рдордиреЗ рдкрд╣рд▓реЗ рд╣реА рдХреБрдЫ рдЪреАрдЬреЛрдВ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рдХрд╛ рдкрддрд╛ рд▓рдЧрд╛ рд▓рд┐рдпрд╛: + +- рдмреЗрд╢рдХ, рдПрдХ LLM рдЬреЛ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рдкрд╛рд╡рд░ рджреЗрдиреЗ рд╡рд╛рд▓реЗ рдЗрдВрдЬрди рдХреЗ рд░реВрдк рдореЗрдВ рдХрд╛рд░реНрдп рдХрд░рддрд╛ рд╣реИ +- рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рдПрдХреНрд╕реЗрд╕ рдХрд┐рдП рдЬрд╛ рд╕рдХрдиреЗ рд╡рд╛рд▓реЗ рдЯреВрд▓реНрд╕ рдХреА рдПрдХ рд╕реВрдЪреА +- рдПрдХ рдкрд╛рд░реНрд╕рд░ рдЬреЛ LLM рдЖрдЙрдЯрдкреБрдЯ рд╕реЗ рдЯреВрд▓ рдХреЙрд▓ рдХреЛ рдирд┐рдХрд╛рд▓рддрд╛ рд╣реИ +- рдПрдХ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЛрдореНрдкреНрдЯ рдЬреЛ рдкрд╛рд░реНрд╕рд░ рдХреЗ рд╕рд╛рде рд╕рд┐рдВрдХреНрд░рдирд╛рдЗрдЬрд╝ рд╣реЛрддрд╛ рд╣реИ +- рдПрдХ рдореЗрдореЛрд░реА + +рд▓реЗрдХрд┐рди рд░реБрдХрд┐рдП, рдЪреВрдВрдХрд┐ рд╣рдо рдирд┐рд░реНрдгрдпреЛрдВ рдореЗрдВ LLM рдХреЛ рдЬрдЧрд╣ рджреЗрддреЗ рд╣реИрдВ, рдирд┐рд╢реНрдЪрд┐рдд рд░реВрдк рд╕реЗ рд╡реЗ рдЧрд▓рддрд┐рдпрд╛рдВ рдХрд░реЗрдВрдЧреЗ: рдЗрд╕рд▓рд┐рдП рд╣рдореЗрдВ рдПрд░рд░ рд▓реЙрдЧрд┐рдВрдЧ рдФрд░ рдкреБрдирдГ рдкреНрд░рдпрд╛рд╕ рддрдВрддреНрд░ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИред + +рдпреЗ рд╕рднреА рддрддреНрд╡ рдПрдХ рдЕрдЪреНрдЫреЗ рдХрд╛рдордХрд╛рдЬреА рд╕рд┐рд╕реНрдЯрдо рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ-рджреВрд╕рд░реЗ рд╕реЗ рдШрдирд┐рд╖реНрда рд░реВрдк рд╕реЗ рдЬреБрдбрд╝реЗ рд╣реБрдП рд╣реИрдВред рдпрд╣реА рдХрд╛рд░рдг рд╣реИ рдХрд┐ рд╣рдордиреЗ рддрдп рдХрд┐рдпрд╛ рдХрд┐ рдЗрди рд╕рднреА рдЪреАрдЬреЛрдВ рдХреЛ рдПрдХ рд╕рд╛рде рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдмреБрдирд┐рдпрд╛рджреА рдирд┐рд░реНрдорд╛рдг рдмреНрд▓реЙрдХреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИред + +## рдХреЛрдб Agents + +рдПрдХ рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯ рдореЗрдВ, рдкреНрд░рддреНрдпреЗрдХ рдЪрд░рдг рдкрд░, LLM рдмрд╛рд╣рд░реА рдЯреВрд▓реНрд╕ рдХреЛ рдХреБрдЫ рдХреЙрд▓ рдХреЗ рд░реВрдк рдореЗрдВ рдПрдХ рдХреНрд░рд┐рдпрд╛ рд▓рд┐рдЦ рд╕рдХрддрд╛ рд╣реИред рдЗрди рдХреНрд░рд┐рдпрд╛рдУрдВ рдХреЛ рд▓рд┐рдЦрдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рд╕рд╛рдорд╛рдиреНрдп рд╕реНрд╡рд░реВрдк (Anthropic, OpenAI рдФрд░ рдХрдИ рдЕрдиреНрдп рджреНрд╡рд╛рд░рд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ) рдЖрдорддреМрд░ рдкрд░ "рдЯреВрд▓реНрд╕ рдХреЗ рдирд╛рдо рдФрд░ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рддрд░реНрдХреЛрдВ рдХреЗ JSON рдХреЗ рд░реВрдк рдореЗрдВ рдХреНрд░рд┐рдпрд╛рдПрдВ рд▓рд┐рдЦрдиреЗ" рдХреЗ рд╡рд┐рднрд┐рдиреНрди рд░реВрдк рд╣реЛрддреЗ рд╣реИрдВ, рдЬрд┐рдиреНрд╣реЗрдВ рдЖрдк рдлрд┐рд░ рдкрд╛рд░реНрд╕ рдХрд░рддреЗ рд╣реИрдВ рдпрд╣ рдЬрд╛рдирдиреЗ рдХреЗ рд▓рд┐рдП рдХрд┐ рдХреМрди рд╕рд╛ рдЯреВрд▓ рдХрд┐рди рддрд░реНрдХреЛрдВ рдХреЗ рд╕рд╛рде рдирд┐рд╖реНрдкрд╛рджрд┐рдд рдХрд░рдирд╛ рд╣реИ"ред + +[рдХрдИ](https://huggingface.co/papers/2402.01030) [рд╢реЛрдз](https://huggingface.co/papers/2411.01747) [рдкрддреНрд░реЛрдВ](https://huggingface.co/papers/2401.00812) рдиреЗ рджрд┐рдЦрд╛рдпрд╛ рд╣реИ рдХрд┐ рдХреЛрдб рдореЗрдВ рдЯреВрд▓ рдХреЙрд▓рд┐рдВрдЧ LLM рдХрд╛ рд╣реЛрдирд╛ рдмрд╣реБрдд рдмреЗрд╣рддрд░ рд╣реИред + +рдЗрд╕рдХрд╛ рдХрд╛рд░рдг рдмрд╕ рдпрд╣ рд╣реИ рдХрд┐ *рд╣рдордиреЗ рдЕрдкрдиреА рдХреЛрдб рднрд╛рд╖рд╛рдУрдВ рдХреЛ рд╡рд┐рд╢реЗрд╖ рд░реВрдк рд╕реЗ рдХрдВрдкреНрдпреВрдЯрд░ рджреНрд╡рд╛рд░рд╛ рдХрд┐рдП рдЧрдП рдХрд╛рд░реНрдпреЛрдВ рдХреЛ рд╡реНрдпрдХреНрдд рдХрд░рдиреЗ рдХрд╛ рд╕рд░реНрд╡реЛрддреНрддрдо рд╕рдВрднрд╡ рддрд░реАрдХрд╛ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рддреИрдпрд╛рд░ рдХрд┐рдпрд╛*ред рдпрджрд┐ JSON рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдмреЗрд╣рддрд░ рдЕрднрд┐рд╡реНрдпрдХреНрддрд┐ рд╣реЛрддреЗ, рддреЛ JSON рд╢реАрд░реНрд╖ рдкреНрд░реЛрдЧреНрд░рд╛рдорд┐рдВрдЧ рднрд╛рд╖рд╛ рд╣реЛрддреА рдФрд░ рдкреНрд░реЛрдЧреНрд░рд╛рдорд┐рдВрдЧ рдирд░рдХ рдореЗрдВ рд╣реЛрддреАред + +рдиреАрдЪреЗ рджреА рдЧрдИ рдЫрд╡рд┐, [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030) рд╕реЗ рд▓реА рдЧрдИ рд╣реИ, рдЬреЛ рдХреЛрдб рдореЗрдВ рдХреНрд░рд┐рдпрд╛рдПрдВ рд▓рд┐рдЦрдиреЗ рдХреЗ рдХреБрдЫ рдлрд╛рдпрджреЗ рджрд░реНрд╢рд╛рддреА рд╣реИ: + + + +JSON рдЬреИрд╕реЗ рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдХреА рдмрдЬрд╛рдп рдХреЛрдб рдореЗрдВ рдХреНрд░рд┐рдпрд╛рдПрдВ рд▓рд┐рдЦрдиреЗ рд╕реЗ рдмреЗрд╣рддрд░ рдкреНрд░рд╛рдкреНрдд рд╣реЛрддрд╛ рд╣реИ: + +- **рдХрдореНрдкреЛрдЬреЗрдмрд┐рд▓рд┐рдЯреА:** рдХреНрдпрд╛ рдЖрдк JSON рдХреНрд░рд┐рдпрд╛рдУрдВ рдХреЛ рдПрдХ-рджреВрд╕рд░реЗ рдХреЗ рднреАрддрд░ рдиреЗрд╕реНрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдпрд╛ рдмрд╛рдж рдореЗрдВ рдкреБрди: рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП JSON рдХреНрд░рд┐рдпрд╛рдУрдВ рдХрд╛ рдПрдХ рд╕реЗрдЯ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЙрд╕реА рддрд░рд╣ рдЬреИрд╕реЗ рдЖрдк рдмрд╕ рдПрдХ рдкрд╛рдпрдерди рдлрдВрдХреНрд╢рди рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ? +- **рдСрдмреНрдЬреЗрдХреНрдЯ рдкреНрд░рдмрдВрдзрди:** рдЖрдк `generate_image` рдЬреИрд╕реА рдХреНрд░рд┐рдпрд╛ рдХреЗ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ JSON рдореЗрдВ рдХреИрд╕реЗ рд╕реНрдЯреЛрд░ рдХрд░рддреЗ рд╣реИрдВ? +- **рд╕рд╛рдорд╛рдиреНрдпрддрд╛:** рдХреЛрдб рдХреЛ рд╕рд░рд▓ рд░реВрдк рд╕реЗ рдХреБрдЫ рднреА рд╡реНрдпрдХреНрдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдмрдирд╛рдпрд╛ рдЧрдпрд╛ рд╣реИ рдЬреЛ рдЖрдк рдХрдВрдкреНрдпреВрдЯрд░ рд╕реЗ рдХрд░рд╡рд╛ рд╕рдХрддреЗ рд╣реИрдВред +- **LLM рдкреНрд░рд╢рд┐рдХреНрд╖рдг рдбреЗрдЯрд╛ рдореЗрдВ рдкреНрд░рддрд┐рдирд┐рдзрд┐рддреНрд╡:** рдмрд╣реБрдд рд╕рд╛рд░реА рдЧреБрдгрд╡рддреНрддрд╛рдкреВрд░реНрдг рдХреЛрдб рдХреНрд░рд┐рдпрд╛рдПрдВ рдкрд╣рд▓реЗ рд╕реЗ рд╣реА LLM рдХреЗ рдЯреНрд░реЗрдирд┐рдВрдЧ рдбреЗрдЯрд╛ рдореЗрдВ рд╢рд╛рдорд┐рд▓ рд╣реИрдВ рдЬрд┐рд╕рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рд╡реЗ рдЗрд╕рдХреЗ рд▓рд┐рдП рдкрд╣рд▓реЗ рд╕реЗ рд╣реА рдкреНрд░рд╢рд┐рдХреНрд╖рд┐рдд рд╣реИрдВ! \ No newline at end of file diff --git a/docs/source/hi/conceptual_guides/react.md b/docs/source/hi/conceptual_guides/react.md new file mode 100644 index 000000000..c36f17cfe --- /dev/null +++ b/docs/source/hi/conceptual_guides/react.md @@ -0,0 +1,47 @@ + +# рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯреНрд╕ рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддреЗ рд╣реИрдВ? + +ReAct рдлреНрд░реЗрдорд╡рд░реНрдХ ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) рд╡рд░реНрддрдорд╛рди рдореЗрдВ рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдХрд╛ рдореБрдЦреНрдп рджреГрд╖реНрдЯрд┐рдХреЛрдг рд╣реИред + +рдирд╛рдо рджреЛ рд╢рдмреНрджреЛрдВ, "Reason" (рддрд░реНрдХ) рдФрд░ "Act" (рдХреНрд░рд┐рдпрд╛) рдХреЗ рд╕рдВрдпреЛрдЬрди рдкрд░ рдЖрдзрд╛рд░рд┐рдд рд╣реИред рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ, рдЗрд╕ рдЖрд░реНрдХрд┐рдЯреЗрдХреНрдЪрд░ рдХрд╛ рдкрд╛рд▓рди рдХрд░рдиреЗ рд╡рд╛рд▓реЗ рдПрдЬреЗрдВрдЯ рдЕрдкрдиреЗ рдХрд╛рд░реНрдп рдХреЛ рдЙрддрдиреЗ рдЪрд░рдгреЛрдВ рдореЗрдВ рд╣рд▓ рдХрд░реЗрдВрдЧреЗ рдЬрд┐рддрдиреЗ рдЖрд╡рд╢реНрдпрдХ рд╣реЛрдВ, рдкреНрд░рддреНрдпреЗрдХ рдЪрд░рдг рдореЗрдВ рдПрдХ Reasoning рдХрджрдо рд╣реЛрдЧрд╛, рдлрд┐рд░ рдПрдХ Action рдХрджрдо рд╣реЛрдЧрд╛, рдЬрд╣рд╛рдБ рдпрд╣ рдЯреВрд▓ рдХреЙрд▓реНрд╕ рддреИрдпрд╛рд░ рдХрд░реЗрдЧрд╛ рдЬреЛ рдЙрд╕реЗ рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рдХрд░реАрдм рд▓реЗ рдЬрд╛рдПрдВрдЧреЗред + +ReAct рдкреНрд░рдХреНрд░рд┐рдпрд╛ рдореЗрдВ рдкрд┐рдЫрд▓реЗ рдЪрд░рдгреЛрдВ рдХреА рдореЗрдореЛрд░реА рд░рдЦрдирд╛ рд╢рд╛рдорд┐рд▓ рд╣реИред + +> [!TIP] +> рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдЬрд╛рдирдиреЗ рдХреЗ рд▓рд┐рдП [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) рдмреНрд▓реЙрдЧ рдкреЛрд╕реНрдЯ рдкрдврд╝реЗрдВред + +рдпрд╣рд╛рдБ рдПрдХ рд╡реАрдбрд┐рдпреЛ рдУрд╡рд░рд╡реНрдпреВ рд╣реИ рдХрд┐ рдпрд╣ рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ: + +
+ + +
+ +![ReAct рдПрдЬреЗрдВрдЯ рдХрд╛ рдлреНрд░реЗрдорд╡рд░реНрдХ](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) + +рд╣рдо рджреЛ рдкреНрд░рдХрд╛рд░ рдХреЗ ToolCallingAgent рдХреЛ рд▓рд╛рдЧреВ рдХрд░рддреЗ рд╣реИрдВ: +- [`ToolCallingAgent`] рдЕрдкрдиреЗ рдЖрдЙрдЯрдкреБрдЯ рдореЗрдВ рдЯреВрд▓ рдХреЙрд▓ рдХреЛ JSON рдХреЗ рд░реВрдк рдореЗрдВ рдЬрдирд░реЗрдЯ рдХрд░рддрд╛ рд╣реИред +- [`CodeAgent`] ToolCallingAgent рдХрд╛ рдПрдХ рдирдпрд╛ рдкреНрд░рдХрд╛рд░ рд╣реИ рдЬреЛ рдЕрдкрдиреЗ рдЯреВрд▓ рдХреЙрд▓ рдХреЛ рдХреЛрдб рдХреЗ рдмреНрд▓реЙрдмреНрд╕ рдХреЗ рд░реВрдк рдореЗрдВ рдЬрдирд░реЗрдЯ рдХрд░рддрд╛ рд╣реИ, рдЬреЛ рдЙрди LLM рдХреЗ рд▓рд┐рдП рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ рдЕрдЪреНрдЫреА рддрд░рд╣ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ рдЬрд┐рдирдХрд╛ рдХреЛрдбрд┐рдВрдЧ рдкреНрд░рджрд░реНрд╢рди рдордЬрдмреВрдд рд╣реИред + +> [!TIP] +> рд╣рдо рдПрдЬреЗрдВрдЯреНрд╕ рдХреЛ рд╡рди-рд╢реЙрдЯ рдореЗрдВ рдЪрд▓рд╛рдиреЗ рдХрд╛ рд╡рд┐рдХрд▓реНрдк рднреА рдкреНрд░рджрд╛рди рдХрд░рддреЗ рд╣реИрдВ: рдмрд╕ рдПрдЬреЗрдВрдЯ рдХреЛ рд▓реЙрдиреНрдЪ рдХрд░рддреЗ рд╕рдордп `single_step=True` рдкрд╛рд╕ рдХрд░реЗрдВ, рдЬреИрд╕реЗ `agent.run(your_task, single_step=True)` \ No newline at end of file diff --git a/docs/source/hi/examples/multiagents.md b/docs/source/hi/examples/multiagents.md new file mode 100644 index 000000000..33056c8ba --- /dev/null +++ b/docs/source/hi/examples/multiagents.md @@ -0,0 +1,199 @@ + +# рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рдХрд╛ рдЖрдпреЛрдЬрди рдХрд░реЗрдВ ЁЯдЦЁЯдЭЁЯдЦ + +[[open-in-colab]] + +рдЗрд╕ рдиреЛрдЯрдмреБрдХ рдореЗрдВ рд╣рдо рдПрдХ **рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╡реЗрдм рдмреНрд░рд╛рдЙрдЬрд╝рд░ рдмрдирд╛рдПрдВрдЧреЗ: рдПрдХ рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдЬрд┐рд╕рдореЗрдВ рдХрдИ рдПрдЬреЗрдВрдЯ рд╡реЗрдм рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рд╕рдорд╕реНрдпрд╛рдУрдВ рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд╕рд╣рдпреЛрдЧ рдХрд░рддреЗ рд╣реИрдВ!** + +рдпрд╣ рдПрдХ рд╕рд░рд▓ рд╕рдВрд░рдЪрдирд╛ рд╣реЛрдЧреА, рдЬреЛ рдкреНрд░рдмрдВрдзрд┐рдд рд╡реЗрдм рдЦреЛрдЬ рдПрдЬреЗрдВрдЯ рдХреЛ рд░реИрдк рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `ManagedAgent` рдСрдмреНрдЬреЗрдХреНрдЯ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддрд╛ рд╣реИ: + +``` + +----------------+ + | Manager agent | + +----------------+ + | + _______________|______________ + | | + Code interpreter +--------------------------------+ + tool | Managed agent | + | +------------------+ | + | | Web Search agent | | + | +------------------+ | + | | | | + | Web Search tool | | + | Visit webpage tool | + +--------------------------------+ +``` +рдЖрдЗрдП рдЗрд╕ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рд╕реЗрдЯ рдХрд░реЗрдВред + +рдЖрд╡рд╢реНрдпрдХ рдбрд┐рдкреЗрдВрдбреЗрдВрд╕реА рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдиреАрдЪреЗ рджреА рдЧрдИ рд▓рд╛рдЗрди рдЪрд▓рд╛рдПрдВ: + +``` +!pip install markdownify duckduckgo-search smolagents --upgrade -q +``` + +HF Inference API рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд▓реЙрдЧрд┐рди рдХрд░реЗрдВ: + +``` +from huggingface_hub import login + +login() +``` + +тЪбя╕П рд╣рдорд╛рд░рд╛ рдПрдЬреЗрдВрдЯ [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) рджреНрд╡рд╛рд░рд╛ рд╕рдВрдЪрд╛рд▓рд┐рдд рд╣реЛрдЧрд╛ рдЬреЛ `HfApiModel` рдХреНрд▓рд╛рд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддрд╛ рд╣реИ рдЬреЛ HF рдХреЗ Inference API рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддрд╛ рд╣реИ: Inference API рдХрд┐рд╕реА рднреА OS рдореЙрдбрд▓ рдХреЛ рдЬрд▓реНрджреА рдФрд░ рдЖрд╕рд╛рдиреА рд╕реЗ рдЪрд▓рд╛рдиреЗ рдХреА рдЕрдиреБрдорддрд┐ рджреЗрддрд╛ рд╣реИред + +_рдиреЛрдЯ:_ The Inference API рд╡рд┐рднрд┐рдиреНрди рдорд╛рдирджрдВрдбреЛрдВ рдХреЗ рдЖрдзрд╛рд░ рдкрд░ рдореЙрдбрд▓ рд╣реЛрд╕реНрдЯ рдХрд░рддрд╛ рд╣реИ, рдФрд░ рдбрд┐рдкреНрд▓реЙрдп рдХрд┐рдП рдЧрдП рдореЙрдбрд▓ рдмрд┐рдирд╛ рдкреВрд░реНрд╡ рд╕реВрдЪрдирд╛ рдХреЗ рдЕрдкрдбреЗрдЯ рдпрд╛ рдмрджрд▓реЗ рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВред рдЗрд╕рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдЬрд╛рдиреЗрдВ [рдпрд╣рд╛рдВ](https://huggingface.co/docs/api-inference/supported-models)ред + +```py +model_id = "Qwen/Qwen2.5-Coder-32B-Instruct" +``` + +## ЁЯФН рдПрдХ рд╡реЗрдм рд╕рд░реНрдЪ рдЯреВрд▓ рдмрдирд╛рдПрдВ + +рд╡реЗрдм рдмреНрд░рд╛рдЙрдЬрд╝рд┐рдВрдЧ рдХреЗ рд▓рд┐рдП, рд╣рдо рдкрд╣рд▓реЗ рд╕реЗ рдореМрдЬреВрдж [`DuckDuckGoSearchTool`](https://github.com/huggingface/smolagents/blob/main/src/smolagents/default_tools.py#L151-L176) рдЯреВрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ Google search рдХреЗ рд╕рдорд╛рди рд╕реБрд╡рд┐рдзрд╛ рдкреНрд░рджрд╛рди рдХрд░рддрд╛ рд╣реИред + +рд▓реЗрдХрд┐рди рдлрд┐рд░ рд╣рдореЗрдВ `DuckDuckGoSearchTool` рджреНрд╡рд╛рд░рд╛ рдЦреЛрдЬреЗ рдЧрдП рдкреЗрдЬ рдХреЛ рджреЗрдЦрдиреЗ рдореЗрдВ рднреА рд╕рдХреНрд╖рдо рд╣реЛрдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреАред +рдРрд╕рд╛ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рд╣рдо рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдХреЗ рдмрд┐рд▓реНрдЯ-рдЗрди `VisitWebpageTool` рдХреЛ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рд▓реЗрдХрд┐рди рд╣рдо рдЗрд╕реЗ рдлрд┐рд░ рд╕реЗ рдмрдирд╛рдПрдВрдЧреЗ рдпрд╣ рджреЗрдЦрдиреЗ рдХреЗ рд▓рд┐рдП рдХрд┐ рдпрд╣ рдХреИрд╕реЗ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред + +рддреЛ рдЖрдЗрдП `markdownify` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рд╢реБрд░реВ рд╕реЗ рдЕрдкрдирд╛ `VisitWebpageTool` рдЯреВрд▓ рдмрдирд╛рдПрдВред + +```py +import re +import requests +from markdownify import markdownify +from requests.exceptions import RequestException +from smolagents import tool + + +@tool +def visit_webpage(url: str) -> str: + """Visits a webpage at the given URL and returns its content as a markdown string. + + Args: + url: The URL of the webpage to visit. + + Returns: + The content of the webpage converted to Markdown, or an error message if the request fails. + """ + try: + # Send a GET request to the URL + response = requests.get(url) + response.raise_for_status() # Raise an exception for bad status codes + + # Convert the HTML content to Markdown + markdown_content = markdownify(response.text).strip() + + # Remove multiple line breaks + markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) + + return markdown_content + + except RequestException as e: + return f"Error fetching the webpage: {str(e)}" + except Exception as e: + return f"An unexpected error occurred: {str(e)}" +``` + +рдареАрдХ рд╣реИ, рдЕрдм рдЪрд▓рд┐рдП рд╣рдорд╛рд░реЗ рдЯреВрд▓ рдХреЛ рдЯреЗрд╕реНрдЯ рдХрд░реЗрдВ! + +```py +print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500]) +``` + +## рд╣рдорд╛рд░реА рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рдХрд╛ рдирд┐рд░реНрдорд╛рдг рдХрд░реЗрдВ ЁЯдЦЁЯдЭЁЯдЦ + +рдЕрдм рдЬрдм рд╣рдорд╛рд░реЗ рдкрд╛рд╕ рд╕рднреА рдЯреВрд▓реНрд╕ `search` рдФрд░ `visit_webpage` рд╣реИрдВ, рд╣рдо рдЙрдирдХрд╛ рдЙрдкрдпреЛрдЧ рд╡реЗрдм рдПрдЬреЗрдВрдЯ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +рдЗрд╕ рдПрдЬреЗрдВрдЯ рдХреЗ рд▓рд┐рдП рдХреМрди рд╕рд╛ рдХреЙрдиреНрдлрд╝рд┐рдЧрд░реЗрд╢рди рдЪреБрдиреЗрдВ? +- рд╡реЗрдм рдмреНрд░рд╛рдЙрдЬрд╝рд┐рдВрдЧ рдПрдХ рд╕рд┐рдВрдЧрд▓-рдЯрд╛рдЗрдорд▓рд╛рдЗрди рдЯрд╛рд╕реНрдХ рд╣реИ рдЬрд┐рд╕реЗ рд╕рдорд╛рдирд╛рдВрддрд░ рдЯреВрд▓ рдХреЙрд▓ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рдирд╣реАрдВ рд╣реИ, рдЗрд╕рд▓рд┐рдП JSON рдЯреВрд▓ рдХреЙрд▓рд┐рдВрдЧ рдЗрд╕рдХреЗ рд▓рд┐рдП рдЕрдЪреНрдЫреА рддрд░рд╣ рдХрд╛рдо рдХрд░рддреА рд╣реИред рдЗрд╕рд▓рд┐рдП рд╣рдо `JsonAgent` рдЪреБрдирддреЗ рд╣реИрдВред +- рд╕рд╛рде рд╣реА, рдЪреВрдВрдХрд┐ рдХрднреА-рдХрднреА рд╡реЗрдм рд╕рд░реНрдЪ рдореЗрдВ рд╕рд╣реА рдЙрддреНрддрд░ рдЦреЛрдЬрдиреЗ рд╕реЗ рдкрд╣рд▓реЗ рдХрдИ рдкреЗрдЬреЛрдВ рдХреА рд╕рд░реНрдЪ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ, рд╣рдо `max_steps` рдХреЛ рдмрдврд╝рд╛рдХрд░ 10 рдХрд░рдирд╛ рдкрд╕рдВрдж рдХрд░рддреЗ рд╣реИрдВред + +```py +from smolagents import ( + CodeAgent, + ToolCallingAgent, + HfApiModel, + ManagedAgent, + DuckDuckGoSearchTool, + LiteLLMModel, +) + +model = HfApiModel(model_id) + +web_agent = ToolCallingAgent( + tools=[DuckDuckGoSearchTool(), visit_webpage], + model=model, + max_steps=10, +) +``` + +рдлрд┐рд░ рд╣рдо рдЗрд╕ рдПрдЬреЗрдВрдЯ рдХреЛ рдПрдХ `ManagedAgent` рдореЗрдВ рд░реИрдк рдХрд░рддреЗ рд╣реИрдВ рдЬреЛ рдЗрд╕реЗ рдЗрд╕рдХреЗ рдореИрдиреЗрдЬрд░ рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рдХреЙрд▓ рдХрд░рдиреЗ рдпреЛрдЧреНрдп рдмрдирд╛рдПрдЧрд╛ред + +```py +managed_web_agent = ManagedAgent( + agent=web_agent, + name="search", + description="Runs web searches for you. Give it your query as an argument.", +) +``` + +рдЕрдВрдд рдореЗрдВ рд╣рдо рдПрдХ рдореИрдиреЗрдЬрд░ рдПрдЬреЗрдВрдЯ рдмрдирд╛рддреЗ рд╣реИрдВ, рдФрд░ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рд╣рдо рдЕрдкрдиреЗ рдореИрдиреЗрдЬреНрдб рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрд╕рдХреЗ `managed_agents` рдЖрд░реНрдЧреБрдореЗрдВрдЯ рдореЗрдВ рдкрд╛рд╕ рдХрд░рддреЗ рд╣реИрдВред + +рдЪреВрдВрдХрд┐ рдпрд╣ рдПрдЬреЗрдВрдЯ рдпреЛрдЬрдирд╛ рдмрдирд╛рдиреЗ рдФрд░ рд╕реЛрдЪрдиреЗ рдХрд╛ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ, рдЙрдиреНрдирдд рддрд░реНрдХ рд▓рд╛рднрджрд╛рдпрдХ рд╣реЛрдЧрд╛, рдЗрд╕рд▓рд┐рдП `CodeAgent` рд╕рдмрд╕реЗ рдЕрдЪреНрдЫрд╛ рд╡рд┐рдХрд▓реНрдк рд╣реЛрдЧрд╛ред + +рд╕рд╛рде рд╣реА, рд╣рдо рдПрдХ рдРрд╕рд╛ рдкреНрд░рд╢реНрди рдкреВрдЫрдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВ рдЬрд┐рд╕рдореЗрдВ рд╡рд░реНрддрдорд╛рди рд╡рд░реНрд╖ рдФрд░ рдЕрддрд┐рд░рд┐рдХреНрдд рдбреЗрдЯрд╛ рдЧрдгрдирд╛ рд╢рд╛рдорд┐рд▓ рд╣реИ: рдЗрд╕рд▓рд┐рдП рдЖрдЗрдП `additional_authorized_imports=["time", "numpy", "pandas"]` рдЬреЛрдбрд╝реЗрдВ, рдпрджрд┐ рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрди рдкреИрдХреЗрдЬреЛрдВ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛред + +```py +manager_agent = CodeAgent( + tools=[], + model=model, + managed_agents=[managed_web_agent], + additional_authorized_imports=["time", "numpy", "pandas"], +) +``` + +рдмрд╕ рдЗрддрдирд╛ рд╣реА! рдЕрдм рдЪрд▓рд┐рдП рд╣рдорд╛рд░реЗ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рдЪрд▓рд╛рддреЗ рд╣реИрдВ! рд╣рдо рдПрдХ рдРрд╕рд╛ рдкреНрд░рд╢реНрди рдЪреБрдирддреЗ рд╣реИрдВ рдЬрд┐рд╕рдореЗрдВ рдЧрдгрдирд╛ рдФрд░ рд╢реЛрдз рджреЛрдиреЛрдВ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИред + +```py +answer = manager_agent.run("If LLM training continues to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What would that correspond to, compared to some countries? Please provide a source for any numbers used.") +``` + +We get this report as the answer: +``` +Based on current growth projections and energy consumption estimates, if LLM trainings continue to scale up at the +current rhythm until 2030: + +1. The electric power required to power the biggest training runs by 2030 would be approximately 303.74 GW, which +translates to about 2,660,762 GWh/year. + +2. Comparing this to countries' electricity consumption: + - It would be equivalent to about 34% of China's total electricity consumption. + - It would exceed the total electricity consumption of India (184%), Russia (267%), and Japan (291%). + - It would be nearly 9 times the electricity consumption of countries like Italy or Mexico. + +3. Source of numbers: + - The initial estimate of 5 GW for future LLM training comes from AWS CEO Matt Garman. + - The growth projection used a CAGR of 79.80% from market research by Springs. + - Country electricity consumption data is from the U.S. Energy Information Administration, primarily for the year +2021. +``` + +рд▓рдЧрддрд╛ рд╣реИ рдХрд┐ рдпрджрд┐ [рд╕реНрдХреЗрд▓рд┐рдВрдЧ рд╣рд╛рдЗрдкреЛрдерд┐рд╕рд┐рд╕](https://gwern.net/scaling-hypothesis) рд╕рддреНрдп рдмрдиреА рд░рд╣рддреА рд╣реИ рддреЛ рд╣рдореЗрдВ рдХреБрдЫ рдмрдбрд╝реЗ рдкрд╛рд╡рд░рдкреНрд▓рд╛рдВрдЯреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреАред + +рд╣рдорд╛рд░реЗ рдПрдЬреЗрдВрдЯреНрд╕ рдиреЗ рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХреБрд╢рд▓рддрд╛рдкреВрд░реНрд╡рдХ рд╕рд╣рдпреЛрдЧ рдХрд┐рдпрд╛! тЬЕ + +ЁЯТб рдЖрдк рдЗрд╕ рдСрд░реНрдХреЗрд╕реНрдЯреНрд░реЗрд╢рди рдХреЛ рдЖрд╕рд╛рдиреА рд╕реЗ рдЕрдзрд┐рдХ рдПрдЬреЗрдВрдЯреНрд╕ рдореЗрдВ рд╡рд┐рд╕реНрддрд╛рд░рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ: рдПрдХ рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдХрд░рддрд╛ рд╣реИ, рдПрдХ рд╡реЗрдм рд╕рд░реНрдЪ рдХрд░рддрд╛ рд╣реИ, рдПрдХ рдлрд╛рдЗрд▓ рд▓реЛрдбрд┐рдВрдЧ рдХреЛ рд╕рдВрднрд╛рд▓рддрд╛ рд╣реИред diff --git a/docs/source/hi/examples/rag.md b/docs/source/hi/examples/rag.md new file mode 100644 index 000000000..9e7a0e595 --- /dev/null +++ b/docs/source/hi/examples/rag.md @@ -0,0 +1,156 @@ + +# рдПрдЬреЗрдВрдЯрд┐рдХ RAG + +[[open-in-colab]] + +рд░рд┐рдЯреНрд░реАрд╡рд▓-рдСрдЧрдореЗрдВрдЯреЗрдб-рдЬрдирд░реЗрд╢рди (RAG) рд╣реИ "рдПрдХ рдпреВрдЬрд░ рдХреЗ рдкреНрд░рд╢реНрди рдХрд╛ рдЙрддреНрддрд░ рджреЗрдиреЗ рдХреЗ рд▓рд┐рдП LLM рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛, рд▓реЗрдХрд┐рди рдЙрддреНрддрд░ рдХреЛ рдПрдХ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рд╕реЗ рдкреНрд░рд╛рдкреНрдд рдЬрд╛рдирдХрд╛рд░реА рдкрд░ рдЖрдзрд╛рд░рд┐рдд рдХрд░рдирд╛"ред рдЗрд╕рдореЗрдВ рд╡реИрдирд┐рд▓рд╛ рдпрд╛ рдлрд╛рдЗрди-рдЯреНрдпреВрдиреНрдб LLM рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреА рддреБрд▓рдирд╛ рдореЗрдВ рдХрдИ рдлрд╛рдпрджреЗ рд╣реИрдВ: рдХреБрдЫ рдирд╛рдо рд▓реЗрдиреЗ рдХреЗ рд▓рд┐рдП, рдпрд╣ рдЙрддреНрддрд░ рдХреЛ рд╕рддреНрдп рддрдереНрдпреЛрдВ рдкрд░ рдЖрдзрд╛рд░рд┐рдд рдХрд░рдиреЗ рдФрд░ рдХрд╛рд▓реНрдкрдирд┐рдХ рдмрд╛рддреЛрдВ рдХреЛ рдХрдо рдХрд░рдиреЗ рдХреА рдЕрдиреБрдорддрд┐ рджреЗрддрд╛ рд╣реИ, рдпрд╣ LLM рдХреЛ рдбреЛрдореЗрди-рд╡рд┐рд╢рд┐рд╖реНрдЯ рдЬреНрдЮрд╛рди рдкреНрд░рджрд╛рди рдХрд░рдиреЗ рдХреА рдЕрдиреБрдорддрд┐ рджреЗрддрд╛ рд╣реИ, рдФрд░ рдпрд╣ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рд╕реЗ рдЬрд╛рдирдХрд╛рд░реА рддрдХ рдкрд╣реБрдВрдЪ рдХрд╛ рд╕реВрдХреНрд╖реНрдо рдирд┐рдпрдВрддреНрд░рдг рдкреНрд░рджрд╛рди рдХрд░рддрд╛ рд╣реИред + +рд▓реЗрдХрд┐рди рд╡реИрдирд┐рд▓рд╛ RAG рдХреА рд╕реАрдорд╛рдПрдВ рд╣реИрдВ, рд╕рдмрд╕реЗ рдорд╣рддреНрд╡рдкреВрд░реНрдг рдпреЗ рджреЛ: +- рдпрд╣ рдХреЗрд╡рд▓ рдПрдХ рд░рд┐рдЯреНрд░реАрд╡рд▓ рд╕реНрдЯреЗрдк рдХрд░рддрд╛ рд╣реИ: рдпрджрд┐ рдкрд░рд┐рдгрд╛рдо рдЦрд░рд╛рдм рд╣реИрдВ, рддреЛ рдЬрдирд░реЗрд╢рди рднреА рдмрджрд▓реЗ рдореЗрдВ рдЦрд░рд╛рдм рд╣реЛрдЧрд╛ред +- рд╕рд┐рдореЗрдВрдЯрд┐рдХ рд╕рдорд╛рдирддрд╛ рдХреА рдЧрдгрдирд╛ рдпреВрдЬрд░ рдХреЗ рдкреНрд░рд╢реНрди рдХреЛ рд╕рдВрджрд░реНрдн рдХреЗ рд░реВрдк рдореЗрдВ рдХрд░рдХреЗ рдХреА рдЬрд╛рддреА рд╣реИ, рдЬреЛ рдЕрдиреБрдХреВрд▓ рдирд╣реАрдВ рд╣реЛ рд╕рдХрддреА: рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдпреВрдЬрд░ рдХрд╛ рдкреНрд░рд╢реНрди рдЕрдХреНрд╕рд░ рдПрдХ рд╕рд╡рд╛рд▓ рд╣реЛрдЧрд╛, рдЬрдмрдХрд┐ рд╕рд╣реА рдЙрддреНрддрд░ рджреЗрдиреЗ рд╡рд╛рд▓рд╛ рдбреЙрдХреНрдпреВрдореЗрдВрдЯ рд╕рдХрд╛рд░рд╛рддреНрдордХ рд╕реНрд╡рд░ рдореЗрдВ рд╣реЛ рд╕рдХрддрд╛ рд╣реИ, рдФрд░ рдЗрд╕рдХрд╛ рд╕рдорд╛рдирддрд╛ рд╕реНрдХреЛрд░ рдЕрдиреНрдп рд╕реНрд░реЛрдд рджрд╕реНрддрд╛рд╡реЗрдЬрд╝реЛрдВ рдХреА рддреБрд▓рдирд╛ рдореЗрдВ рдХрдо рд╣реЛ рд╕рдХрддрд╛ рд╣реИ, рдЬреЛ рдкреНрд░рд╢реНрдирд╡рд╛рдЪрдХ рд╕реНрд╡рд░ рдореЗрдВ рд╣реЛ рд╕рдХрддреЗ рд╣реИрдВред рдЗрд╕рд╕реЗ рд╕рдВрдмрдВрдзрд┐рдд рдЬрд╛рдирдХрд╛рд░реА рдХреЛ рдЪреВрдХрдиреЗ рдХрд╛ рдЬреЛрдЦрд┐рдо рд╣реЛрддрд╛ рд╣реИред + +рд╣рдо рдПрдХ RAG рдПрдЬреЗрдВрдЯ рдмрдирд╛рдХрд░ рдЗрди рд╕рдорд╕реНрдпрд╛рдУрдВ рдХреЛ рдХрдо рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ: рдмрд╣реБрдд рд╕рд░рд▓ рддрд░реАрдХреЗ рд╕реЗ, рдПрдХ рд░рд┐рдЯреНрд░реАрд╡рд░ рдЯреВрд▓ рд╕реЗ рд▓реИрд╕ рдПрдЬреЗрдВрдЯ! + +рдпрд╣ рдПрдЬреЗрдВрдЯ рдХрд░реЗрдЧрд╛: тЬЕ рд╕реНрд╡рдпрдВ рдХреНрд╡реЗрд░реА рддреИрдпрд╛рд░ рдХрд░реЗрдЧрд╛ рдФрд░ тЬЕ рдЖрд╡рд╢реНрдпрдХрддрд╛ рдкрдбрд╝рдиреЗ рдкрд░ рдкреБрдирдГ-рдкреНрд░рд╛рдкреНрддрд┐ рдХреЗ рд▓рд┐рдП рд╕рдореАрдХреНрд╖рд╛ рдХрд░реЗрдЧрд╛ред + +рдЗрд╕рд▓рд┐рдП рдпрд╣ рд╕рд╣рдЬ рд░реВрдк рд╕реЗ рдХреБрдЫ рдЙрдиреНрдирдд RAG рддрдХрдиреАрдХреЛрдВ рдХреЛ рдкреНрд░рд╛рдкреНрдд рдХрд░ рд▓реЗрдирд╛ рдЪрд╛рд╣рд┐рдП! +- рд╕рд┐рдореЗрдВрдЯрд┐рдХ рдЦреЛрдЬ рдореЗрдВ рд╕реАрдзреЗ рдпреВрдЬрд░ рдХреНрд╡реЗрд░реА рдХрд╛ рд╕рдВрджрд░реНрдн рдХреЗ рд░реВрдк рдореЗрдВ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рдмрдЬрд╛рдп, рдПрдЬреЗрдВрдЯ рд╕реНрд╡рдпрдВ рдПрдХ рд╕рдВрджрд░реНрдн рд╡рд╛рдХреНрдп рддреИрдпрд╛рд░ рдХрд░рддрд╛ рд╣реИ рдЬреЛ рд▓рдХреНрд╖рд┐рдд рдбреЙрдХреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреЗ рдХрд░реАрдм рд╣реЛ рд╕рдХрддрд╛ рд╣реИ, рдЬреИрд╕рд╛ рдХрд┐ [HyDE](https://huggingface.co/papers/2212.10496) рдореЗрдВ рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИред +рдПрдЬреЗрдВрдЯ рдЬрдирд░реЗрдЯ рдХрд┐рдП рдЧрдП рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдФрд░ рдЖрд╡рд╢реНрдпрдХрддрд╛ рдкрдбрд╝рдиреЗ рдкрд░ рдкреБрдирдГ-рдкреНрд░рд╛рдкреНрддрд┐ рдХрд░ рд╕рдХрддрд╛ рд╣реИ, рдЬреИрд╕рд╛ рдХрд┐ [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/) рдореЗрдВ рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИред + +рдЪрд▓рд┐рдП рдЗрд╕ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рдмрдирд╛рддреЗ рд╣реИрдВред ЁЯЫая╕П + +рдЖрд╡рд╢реНрдпрдХ рдбрд┐рдкреЗрдВрдбреЗрдВрд╕реА рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдиреАрдЪреЗ рджреА рдЧрдИ рд▓рд╛рдЗрди рдЪрд▓рд╛рдПрдВред +```bash +!pip install smolagents pandas langchain langchain-community sentence-transformers rank_bm25 --upgrade -q +``` +HF Inference API рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдЕрдкрдиреЗ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╡реЗрд░рд┐рдПрдмрд▓ `HF_TOKEN` рдХреЗ рд░реВрдк рдореЗрдВ рдПрдХ рд╡реИрдз рдЯреЛрдХрди рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреАред +рд╣рдо рдЗрд╕реЗ рд▓реЛрдб рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП python-dotenv рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддреЗ рд╣реИрдВред +```py +from dotenv import load_dotenv +load_dotenv() +``` + +рд╣рдо рдкрд╣рд▓реЗ рдПрдХ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рд▓реЛрдб рдХрд░рддреЗ рд╣реИрдВ рдЬрд┐рд╕ рдкрд░ рд╣рдо RAG рдХреЛ рд▓рд╛рдЧреВ рдХрд░рдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВ: рдпрд╣ рдбреЗрдЯрд╛ рд╕реЗрдЯ Hugging Face рдХреЗ рдХрдИ рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдХреЗ рдбреЙрдХреНрдпреВрдореЗрдВрдЯ рдкреГрд╖реНрдареЛрдВ рдХрд╛ рд╕рдВрдХрд▓рди рд╣реИ, рдЬрд┐рдиреНрд╣реЗрдВ Markdown рдореЗрдВ рд╕реНрдЯреЛрд░ рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИред рд╣рдо рдХреЗрд╡рд▓ `transformers` рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдХреЗ рджрд╕реНрддрд╛рд╡реЗрдЬрд╝реЛрдВ рдХреЛ рд░рдЦреЗрдВрдЧреЗред + +рдлрд┐рд░ рдбреЗрдЯрд╛рд╕реЗрдЯ рдХреЛ рдкреНрд░реЛрд╕реЗрд╕ рдХрд░рдХреЗ рдФрд░ рдЗрд╕реЗ рдПрдХ рд╡реЗрдХреНрдЯрд░ рдбреЗрдЯрд╛рдмреЗрд╕ рдореЗрдВ рд╕реНрдЯреЛрд░ рдХрд░рдХреЗ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рддреИрдпрд╛рд░ рдХрд░реЗрдВ рдЬрд┐рд╕реЗ рд░рд┐рдЯреНрд░реАрд╡рд░ рджреНрд╡рд╛рд░рд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛ред + +рд╣рдо [LangChain](https://python.langchain.com/docs/introduction/) рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддреЗ рд╣реИрдВ рдХреНрдпреЛрдВрдХрд┐ рдЗрд╕рдореЗрдВ рдЙрддреНрдХреГрд╖реНрдЯ рд╡реЗрдХреНрдЯрд░ рдбреЗрдЯрд╛рдмреЗрд╕ рдЙрдкрдпреЛрдЧрд┐рддрд╛рдПрдВ рд╣реИрдВред + +```py +import datasets +from langchain.docstore.document import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.retrievers import BM25Retriever + +knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") +knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) + +source_docs = [ + Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) + for doc in knowledge_base +] + +text_splitter = RecursiveCharacterTextSplitter( + chunk_size=500, + chunk_overlap=50, + add_start_index=True, + strip_whitespace=True, + separators=["\n\n", "\n", ".", " ", ""], +) +docs_processed = text_splitter.split_documents(source_docs) +``` + +рдЕрдм рдбреЙрдХреНрдпреВрдореЗрдВрдЯреНрд╕ рддреИрдпрд╛рд░ рд╣реИрдВред + +рддреЛ рдЪрд▓рд┐рдП рдЕрдкрдирд╛ рдПрдЬреЗрдВрдЯрд┐рдХ RAG рд╕рд┐рд╕реНрдЯрдо рдмрдирд╛рдПрдВ! + +ЁЯСЙ рд╣рдореЗрдВ рдХреЗрд╡рд▓ рдПрдХ RetrieverTool рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ рдЬрд┐рд╕рдХрд╛ рдЙрдкрдпреЛрдЧ рд╣рдорд╛рд░рд╛ рдПрдЬреЗрдВрдЯ рдиреЙрд▓реЗрдЬ рдмреЗрд╕ рд╕реЗ рдЬрд╛рдирдХрд╛рд░реА рдкреНрд░рд╛рдкреНрдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд░ рд╕рдХрддрд╛ рд╣реИред + +рдЪреВрдВрдХрд┐ рд╣рдореЗрдВ рдЯреВрд▓ рдХреЗ рдПрдЯреНрд░реАрдмреНрдпреВрдЯ рдХреЗ рд░реВрдк рдореЗрдВ рдПрдХ vectordb рдЬреЛрдбрд╝рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ, рд╣рдо рд╕рд░рд▓ рдЯреВрд▓ рдХрдВрд╕реНрдЯреНрд░рдХреНрдЯрд░ рдХреЛ `@tool` рдбреЗрдХреЛрд░реЗрдЯрд░ рдХреЗ рд╕рд╛рде рд╕реАрдзреЗ рдЙрдкрдпреЛрдЧ рдирд╣реАрдВ рдХрд░ рд╕рдХрддреЗ: рдЗрд╕рд▓рд┐рдП рд╣рдо [tools tutorial](../tutorials/tools) рдореЗрдВ рд╣рд╛рдЗрд▓рд╛рдЗрдЯ рдХрд┐рдП рдЧрдП рд╕реЗрдЯрдЕрдк рдХрд╛ рдкрд╛рд▓рди рдХрд░реЗрдВрдЧреЗред + +```py +from smolagents import Tool + +class RetrieverTool(Tool): + name = "retriever" + description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query." + inputs = { + "query": { + "type": "string", + "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", + } + } + output_type = "string" + + def __init__(self, docs, **kwargs): + super().__init__(**kwargs) + self.retriever = BM25Retriever.from_documents( + docs, k=10 + ) + + def forward(self, query: str) -> str: + assert isinstance(query, str), "Your search query must be a string" + + docs = self.retriever.invoke( + query, + ) + return "\nRetrieved documents:\n" + "".join( + [ + f"\n\n===== Document {str(i)} =====\n" + doc.page_content + for i, doc in enumerate(docs) + ] + ) + +retriever_tool = RetrieverTool(docs_processed) +``` +рд╣рдордиреЗ BM25 рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рд╣реИ, рдЬреЛ рдПрдХ рдХреНрд▓рд╛рд╕рд┐рдХ рд░рд┐рдЯреНрд░реАрд╡рд▓ рд╡рд┐рдзрд┐ рд╣реИ, рдХреНрдпреЛрдВрдХрд┐ рдЗрд╕реЗ рд╕реЗрдЯрдЕрдк рдХрд░рдирд╛ рдмрд╣реБрдд рдЖрд╕рд╛рди рд╣реИред +рд░рд┐рдЯреНрд░реАрд╡рд▓ рд╕рдЯреАрдХрддрд╛ рдореЗрдВ рд╕реБрдзрд╛рд░ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдк BM25 рдХреЛ рдбреЙрдХреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреЗ рд▓рд┐рдП рд╡реЗрдХреНрдЯрд░ рдкреНрд░рддрд┐рдирд┐рдзрд┐рддреНрд╡ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рд╕рд┐рдореЗрдВрдЯрд┐рдХ рдЦреЛрдЬ рд╕реЗ рдмрджрд▓ рд╕рдХрддреЗ рд╣реИрдВ: рдЗрд╕ рдкреНрд░рдХрд╛рд░ рдЖрдк рдПрдХ рдЕрдЪреНрдЫрд╛ рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдореЙрдбрд▓ рдЪреБрдирдиреЗ рдХреЗ рд▓рд┐рдП [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) рдкрд░ рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВред + +рдЕрдм рдпрд╣ рд╕реАрдзрд╛ рд╣реИ рдХрд┐ рдПрдХ рдПрдЬреЗрдВрдЯ рдмрдирд╛рдпрд╛ рдЬрд╛рдП рдЬреЛ рдЗрд╕ `retriever_tool` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдЧрд╛! + + +рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рдЗрди рдЖрд░реНрдЧреБрдореЗрдВрдЯреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреА: +- `tools`: рдЯреВрд▓реНрд╕ рдХреА рдПрдХ рд╕реВрдЪреА рдЬрд┐рдиреНрд╣реЗрдВ рдПрдЬреЗрдВрдЯ рдХреЙрд▓ рдХрд░ рд╕рдХреЗрдЧрд╛ред +- `model`: LLM рдЬреЛ рдПрдЬреЗрдВрдЯ рдХреЛ рдкрд╛рд╡рд░ рджреЗрддрд╛ рд╣реИред +рд╣рдорд╛рд░рд╛ `model` рдПрдХ рдХреЙрд▓реЗрдмрд▓ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП рдЬреЛ рдЗрдирдкреБрдЯ рдХреЗ рд░реВрдк рдореЗрдВ рд╕рдВрджреЗрд╢реЛрдВ рдХреА рдПрдХ рд╕реВрдЪреА рд▓реЗрддрд╛ рд╣реИ рдФрд░ рдЯреЗрдХреНрд╕реНрдЯ рд▓реМрдЯрд╛рддрд╛ рд╣реИред рдЗрд╕реЗ рдПрдХ stop_sequences рдЖрд░реНрдЧреБрдореЗрдВрдЯ рднреА рд╕реНрд╡реАрдХрд╛рд░ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ рдЬреЛ рдмрддрд╛рддрд╛ рд╣реИ рдХрд┐ рдЬрдирд░реЗрд╢рди рдХрдм рд░реЛрдХрдиреА рд╣реИред рд╕реБрд╡рд┐рдзрд╛ рдХреЗ рд▓рд┐рдП, рд╣рдо рд╕реАрдзреЗ рдкреИрдХреЗрдЬ рдореЗрдВ рдкреНрд░рджрд╛рди рдХреА рдЧрдИ HfEngine рдХреНрд▓рд╛рд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддреЗ рд╣реИрдВ рддрд╛рдХрд┐ рдПрдХ LLM рдЗрдВрдЬрди рдорд┐рд▓ рд╕рдХреЗ рдЬреЛ Hugging Face рдХреЗ Inference API рдХреЛ рдХреЙрд▓ рдХрд░рддрд╛ рд╣реИред + +рдФрд░ рд╣рдо [meta-llama/Llama-3.3-70B-Instruct](meta-llama/Llama-3.3-70B-Instruct) рдХрд╛ рдЙрдкрдпреЛрдЧ llm рдЗрдВрдЬрди рдХреЗ рд░реВрдк рдореЗрдВ рдХрд░рддреЗ рд╣реИрдВ рдХреНрдпреЛрдВрдХрд┐: +- рдЗрд╕рдореЗрдВ рд▓рдВрдмрд╛ 128k рдХреЙрдиреНрдЯреЗрдХреНрд╕реНрдЯ рд╣реИ, рдЬреЛ рд▓рдВрдмреЗ рд╕реНрд░реЛрдд рджрд╕реНрддрд╛рд╡реЗрдЬреЛрдВ рдХреЛ рдкреНрд░реЛрд╕реЗрд╕ рдХрд░рдиреЗ рдореЗрдВ рдорджрджрдЧрд╛рд░ рд╣реИ +- рдпрд╣ рд╣рд░ рд╕рдордп HF рдХреЗ Inference API рдкрд░ рдореБрдлреНрдд рдореЗрдВ рдЙрдкрд▓рдмреНрдз рд╣реИ! + +_рдиреЛрдЯ:_ Inference API рд╡рд┐рднрд┐рдиреНрди рдорд╛рдирджрдВрдбреЛрдВ рдХреЗ рдЖрдзрд╛рд░ рдкрд░ рдореЙрдбрд▓ рд╣реЛрд╕реНрдЯ рдХрд░рддрд╛ рд╣реИ, рдФрд░ рдбрд┐рдкреНрд▓реЙрдп рдХрд┐рдП рдЧрдП рдореЙрдбрд▓ рдмрд┐рдирд╛ рдкреВрд░реНрд╡ рд╕реВрдЪрдирд╛ рдХреЗ рдЕрдкрдбреЗрдЯ рдпрд╛ рдмрджрд▓реЗ рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВред рдЗрд╕рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдЬрд╛рдиреЗрдВ [рдпрд╣рд╛рдВ](https://huggingface.co/docs/api-inference/supported-models) рдкрдврд╝реЗрдВред + +```py +from smolagents import HfApiModel, CodeAgent + +agent = CodeAgent( + tools=[retriever_tool], model=HfApiModel("meta-llama/Llama-3.3-70B-Instruct"), max_steps=4, verbosity_level=2 +) +``` + +CodeAgent рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬ рдХрд░рдиреЗ рдкрд░, рдЗрд╕реЗ рд╕реНрд╡рдЪрд╛рд▓рд┐рдд рд░реВрдк рд╕реЗ рдПрдХ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рджрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ рдЬреЛ LLM рдЗрдВрдЬрди рдХреЛ рдЪрд░рдг-рджрд░-рдЪрд░рдг рдкреНрд░реЛрд╕реЗрд╕ рдХрд░рдиреЗ рдФрд░ рдХреЛрдб рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдХреЗ рд░реВрдк рдореЗрдВ рдЯреВрд▓ рдХреЙрд▓ рдЬрдирд░реЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд╣рддрд╛ рд╣реИ, рд▓реЗрдХрд┐рди рдЖрдк рдЖрд╡рд╢реНрдпрдХрддрд╛рдиреБрд╕рд╛рд░ рдЗрд╕ рдкреНрд░реЙрдореНрдкреНрдЯ рдЯреЗрдореНрдкрд▓реЗрдЯ рдХреЛ рдЕрдкрдиреЗ рд╕реЗ рдмрджрд▓ рд╕рдХрддреЗ рд╣реИрдВред + +рдЬрдм CodeAgent рдХрд╛ `.run()` рдореЗрдердб рд▓реЙрдиреНрдЪ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ, рддреЛ рдПрдЬреЗрдВрдЯ LLM рдЗрдВрдЬрди рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХрд╛ рдХрд╛рд░реНрдп рдХрд░рддрд╛ рд╣реИ, рдФрд░ рдЯреВрд▓ рдХреЙрд▓реНрд╕ рдХреЛ рдирд┐рд╖реНрдкрд╛рджрд┐рдд рдХрд░рддрд╛ рд╣реИ, рдпрд╣ рд╕рдм рдПрдХ рд▓реВрдк рдореЗрдВ рд╣реЛрддрд╛ рд╣реИ, рдЬреЛ рддрдм рддрдХ рдЪрд▓рддрд╛ рд╣реИ рдЬрдм рддрдХ рдЯреВрд▓ final_answer рдХреЗ рд╕рд╛рде рдЕрдВрддрд┐рдо рдЙрддреНрддрд░ рдХреЗ рд░реВрдк рдореЗрдВ рдирд╣реАрдВ рдмреБрд▓рд╛рдпрд╛ рдЬрд╛рддрд╛ред + +```py +agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") + +print("Final output:") +print(agent_output) +``` + + diff --git a/docs/source/hi/examples/text_to_sql.md b/docs/source/hi/examples/text_to_sql.md new file mode 100644 index 000000000..213821ac8 --- /dev/null +++ b/docs/source/hi/examples/text_to_sql.md @@ -0,0 +1,203 @@ + +# Text-to-SQL + +[[open-in-colab]] + +рдЗрд╕ рдЯреНрдпреВрдЯреЛрд░рд┐рдпрд▓ рдореЗрдВ, рд╣рдо рджреЗрдЦреЗрдВрдЧреЗ рдХрд┐ рдХреИрд╕реЗ `smolagents` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдПрдХ рдПрдЬреЗрдВрдЯ рдХреЛ SQL рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд▓рд╛рдЧреВ рдХрд┐рдпрд╛ рдЬрд╛ рд╕рдХрддрд╛ рд╣реИред + +> рдЖрдЗрдП рд╕рдмрд╕реЗ рдорд╣рддреНрд╡рдкреВрд░реНрдг рдкреНрд░рд╢реНрди рд╕реЗ рд╢реБрд░реВ рдХрд░реЗрдВ: рдЗрд╕реЗ рд╕рд╛рдзрд╛рд░рдг рдХреНрдпреЛрдВ рдирд╣реАрдВ рд░рдЦреЗрдВ рдФрд░ рдПрдХ рд╕рд╛рдорд╛рдиреНрдп text-to-SQL рдкрд╛рдЗрдкрд▓рд╛рдЗрди рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ? + +рдПрдХ рд╕рд╛рдорд╛рдиреНрдп text-to-SQL рдкрд╛рдЗрдкрд▓рд╛рдЗрди рдХрдордЬреЛрд░ рд╣реЛрддреА рд╣реИ, рдХреНрдпреЛрдВрдХрд┐ рдЙрддреНрдкрдиреНрди SQL рдХреНрд╡реЗрд░реА рдЧрд▓рдд рд╣реЛ рд╕рдХрддреА рд╣реИред рдЗрд╕рд╕реЗ рднреА рдмреБрд░реА рдмрд╛рдд рдпрд╣ рд╣реИ рдХрд┐ рдХреНрд╡реЗрд░реА рдЧрд▓рдд рд╣реЛ рд╕рдХрддреА рд╣реИ, рд▓реЗрдХрд┐рди рдХреЛрдИ рдПрд░рд░ рдирд╣реАрдВ рджрд┐рдЦрд╛рдПрдЧреА, рдмрд▓реНрдХрд┐ рдмрд┐рдирд╛ рдХрд┐рд╕реА рдЕрд▓рд╛рд░реНрдо рдХреЗ рдЧрд▓рдд/рдмреЗрдХрд╛рд░ рдЖрдЙрдЯрдкреБрдЯ рджреЗ рд╕рдХрддреА рд╣реИред + + +ЁЯСЙ рдЗрд╕рдХреЗ рдмрдЬрд╛рдп, рдПрдХ рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рдЖрдЙрдЯрдкреБрдЯ рдХрд╛ рдЧрдВрднреАрд░рддрд╛ рд╕реЗ рдирд┐рд░реАрдХреНрд╖рдг рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдФрд░ рддрдп рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдХрд┐ рдХреНрд╡реЗрд░реА рдХреЛ рдмрджрд▓рдиреЗ рдХреА рдЬрд░реВрд░рдд рд╣реИ рдпрд╛ рдирд╣реАрдВ, рдЗрд╕ рдкреНрд░рдХрд╛рд░ рдЗрд╕реЗ рдмреЗрд╣рддрд░ рдкреНрд░рджрд░реНрд╢рди рдореЗрдВ рдорджрдж рдорд┐рд▓рддреА рд╣реИред + +рдЖрдЗрдП рдЗрд╕ рдПрдЬреЗрдВрдЯ рдХреЛ рдмрдирд╛рдПрдВ! ЁЯТк + +рдкрд╣рд▓реЗ, рд╣рдо SQL рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╕реЗрдЯрдЕрдк рдХрд░рддреЗ рд╣реИрдВ: +```py +from sqlalchemy import ( + create_engine, + MetaData, + Table, + Column, + String, + Integer, + Float, + insert, + inspect, + text, +) + +engine = create_engine("sqlite:///:memory:") +metadata_obj = MetaData() + +# create city SQL table +table_name = "receipts" +receipts = Table( + table_name, + metadata_obj, + Column("receipt_id", Integer, primary_key=True), + Column("customer_name", String(16), primary_key=True), + Column("price", Float), + Column("tip", Float), +) +metadata_obj.create_all(engine) + +rows = [ + {"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20}, + {"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24}, + {"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43}, + {"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00}, +] +for row in rows: + stmt = insert(receipts).values(**row) + with engine.begin() as connection: + cursor = connection.execute(stmt) +``` + +### Agent рдмрдирд╛рдПрдВ + +рдЕрдм рдЖрдЗрдП рд╣рдорд╛рд░реА SQL рдЯреЗрдмрд▓ рдХреЛ рдПрдХ рдЯреВрд▓ рджреНрд╡рд╛рд░рд╛ рдкреБрдирд░реНрдкреНрд░рд╛рдкреНрдд рдХрд░рдиреЗ рдпреЛрдЧреНрдп рдмрдирд╛рдПрдВред + +рдЯреВрд▓ рдХрд╛ рд╡рд┐рд╡рд░рдг рд╡рд┐рд╢реЗрд╖рддрд╛ рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рджреНрд╡рд╛рд░рд╛ LLM рдХреЗ prompt рдореЗрдВ рдПрдореНрдмреЗрдб рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛: рдпрд╣ LLM рдХреЛ рдЯреВрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЬрд╛рдирдХрд╛рд░реА рджреЗрддрд╛ рд╣реИред рдпрд╣реАрдВ рдкрд░ рд╣рдо SQL рдЯреЗрдмрд▓ рдХрд╛ рд╡рд░реНрдгрди рдХрд░рдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВред + +```py +inspector = inspect(engine) +columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] + +table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) +print(table_description) +``` + +```text +Columns: + - receipt_id: INTEGER + - customer_name: VARCHAR(16) + - price: FLOAT + - tip: FLOAT +``` + +рдЕрдм рдЖрдЗрдП рд╣рдорд╛рд░рд╛ рдЯреВрд▓ рдмрдирд╛рдПрдВред рдЗрд╕реЗ рдирд┐рдореНрдирд▓рд┐рдЦрд┐рдд рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ: (рдЕрдзрд┐рдХ рдЬрд╛рдирдХрд╛рд░реА рдХреЗ рд▓рд┐рдП [рдЯреВрд▓ doc](../tutorials/tools) рдкрдврд╝реЗрдВ) +- рдПрдХ рдбреЙрдХрд╕реНрдЯреНрд░рд┐рдВрдЧ рдЬрд┐рд╕рдореЗрдВ рдЖрд░реНрдЧреНрдпреБрдореЗрдВрдЯреНрд╕ рдХреА рд╕реВрдЪреА рд╡рд╛рд▓рд╛ `Args:` рднрд╛рдЧ рд╣реЛред +- рдЗрдирдкреБрдЯ рдФрд░ рдЖрдЙрдЯрдкреБрдЯ рджреЛрдиреЛрдВ рдкрд░ рдЯрд╛рдЗрдк рд╣рд┐рдВрдЯреНрд╕ред + +```py +from smolagents import tool + +@tool +def sql_engine(query: str) -> str: + """ + Allows you to perform SQL queries on the table. Returns a string representation of the result. + The table is named 'receipts'. Its description is as follows: + Columns: + - receipt_id: INTEGER + - customer_name: VARCHAR(16) + - price: FLOAT + - tip: FLOAT + + Args: + query: The query to perform. This should be correct SQL. + """ + output = "" + with engine.connect() as con: + rows = con.execute(text(query)) + for row in rows: + output += "\n" + str(row) + return output +``` + +рдЕрдм рдЖрдЗрдП рдПрдХ рдПрдЬреЗрдВрдЯ рдмрдирд╛рдПрдВ рдЬреЛ рдЗрд╕ рдЯреВрд▓ рдХрд╛ рд▓рд╛рдн рдЙрдард╛рддрд╛ рд╣реИред + +рд╣рдо `CodeAgent` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддреЗ рд╣реИрдВ, рдЬреЛ smolagents рдХрд╛ рдореБрдЦреНрдп рдПрдЬреЗрдВрдЯ рдХреНрд▓рд╛рд╕ рд╣реИ: рдПрдХ рдПрдЬреЗрдВрдЯ рдЬреЛ рдХреЛрдб рдореЗрдВ рдПрдХреНрд╢рди рд▓рд┐рдЦрддрд╛ рд╣реИ рдФрд░ ReAct рдлреНрд░реЗрдорд╡рд░реНрдХ рдХреЗ рдЕрдиреБрд╕рд╛рд░ рдкрд┐рдЫрд▓реЗ рдЖрдЙрдЯрдкреБрдЯ рдкрд░ рдкреБрдирд░рд╛рд╡реГрддреНрддрд┐ рдХрд░ рд╕рдХрддрд╛ рд╣реИред + +рдореЙрдбрд▓ рд╡рд╣ LLM рд╣реИ рдЬреЛ рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рдХреЛ рд╕рдВрдЪрд╛рд▓рд┐рдд рдХрд░рддрд╛ рд╣реИред `HfApiModel` рдЖрдкрдХреЛ HF рдХреЗ Inference API рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ LLM рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХреА рдЕрдиреБрдорддрд┐ рджреЗрддрд╛ рд╣реИ, рдпрд╛ рддреЛ рд╕рд░реНрд╡рд░рд▓реЗрд╕ рдпрд╛ рдбреЗрдбрд┐рдХреЗрдЯреЗрдб рдПрдВрдбрдкреЙрдЗрдВрдЯ рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ, рд▓реЗрдХрд┐рди рдЖрдк рдХрд┐рд╕реА рднреА рдкреНрд░реЛрдкреНрд░рд╛рдЗрдЯрд░реА API рдХрд╛ рднреА рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +```py +from smolagents import CodeAgent, HfApiModel + +agent = CodeAgent( + tools=[sql_engine], + model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"), +) +agent.run("Can you give me the name of the client who got the most expensive receipt?") +``` + +### рд▓реЗрд╡рд▓ 2: рдЯреЗрдмрд▓ рдЬреЙрдЗрдиреНрд╕ + +рдЕрдм рдЖрдЗрдП рдЗрд╕реЗ рдФрд░ рдЪреБрдиреМрддреАрдкреВрд░реНрдг рдмрдирд╛рдПрдВ! рд╣рдо рдЪрд╛рд╣рддреЗ рд╣реИрдВ рдХрд┐ рд╣рдорд╛рд░рд╛ рдПрдЬреЗрдВрдЯ рдХрдИ рдЯреЗрдмрд▓реНрд╕ рдХреЗ рдмреАрдЪ рдЬреЙрдЗрди рдХреЛ рд╕рдВрднрд╛рд▓ рд╕рдХреЗред + +рддреЛ рдЖрдЗрдП рд╣рдо рдкреНрд░рддреНрдпреЗрдХ receipt_id рдХреЗ рд▓рд┐рдП рд╡реЗрдЯрд░реНрд╕ рдХреЗ рдирд╛рдо рд░рд┐рдХреЙрд░реНрдб рдХрд░рдиреЗ рд╡рд╛рд▓реА рдПрдХ рджреВрд╕рд░реА рдЯреЗрдмрд▓ рдмрдирд╛рддреЗ рд╣реИрдВ! + +```py +table_name = "waiters" +receipts = Table( + table_name, + metadata_obj, + Column("receipt_id", Integer, primary_key=True), + Column("waiter_name", String(16), primary_key=True), +) +metadata_obj.create_all(engine) + +rows = [ + {"receipt_id": 1, "waiter_name": "Corey Johnson"}, + {"receipt_id": 2, "waiter_name": "Michael Watts"}, + {"receipt_id": 3, "waiter_name": "Michael Watts"}, + {"receipt_id": 4, "waiter_name": "Margaret James"}, +] +for row in rows: + stmt = insert(receipts).values(**row) + with engine.begin() as connection: + cursor = connection.execute(stmt) +``` +рдЪреВрдВрдХрд┐ рд╣рдордиреЗ рдЯреЗрдмрд▓ рдХреЛ рдмрджрд▓ рджрд┐рдпрд╛ рд╣реИ, рд╣рдо LLM рдХреЛ рдЗрд╕ рдЯреЗрдмрд▓ рдХреА рдЬрд╛рдирдХрд╛рд░реА рдХрд╛ рдЙрдЪрд┐рдд рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рджреЗрдиреЗ рдХреЗ рд▓рд┐рдП рдЗрд╕ рдЯреЗрдмрд▓ рдХреЗ рд╡рд┐рд╡рд░рдг рдХреЗ рд╕рд╛рде `SQLExecutorTool` рдХреЛ рдЕрдкрдбреЗрдЯ рдХрд░рддреЗ рд╣реИрдВред + +```py +updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output. +It can use the following tables:""" + +inspector = inspect(engine) +for table in ["receipts", "waiters"]: + columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)] + + table_description = f"Table '{table}':\n" + + table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) + updated_description += "\n\n" + table_description + +print(updated_description) +``` +рдЪреВрдВрдХрд┐ рдпрд╣ рд░рд┐рдХреНрд╡реЗрд╕реНрдЯ рдкрд┐рдЫрд▓реЗ рд╡рд╛рд▓реЗ рд╕реЗ рдереЛрдбрд╝реА рдХрдард┐рди рд╣реИ, рд╣рдо LLM рдЗрдВрдЬрди рдХреЛ рдЕрдзрд┐рдХ рд╢рдХреНрддрд┐рд╢рд╛рд▓реА [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд╕реНрд╡рд┐рдЪ рдХрд░реЗрдВрдЧреЗ! + +```py +sql_engine.description = updated_description + +agent = CodeAgent( + tools=[sql_engine], + model=HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct"), +) + +agent.run("Which waiter got more total money from tips?") +``` +рдпрд╣ рд╕реАрдзреЗ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ! рд╕реЗрдЯрдЕрдк рдЖрд╢реНрдЪрд░реНрдпрдЬрдирдХ рд░реВрдк рд╕реЗ рд╕рд░рд▓ рдерд╛, рд╣реИ рдирд╛? + +рдпрд╣ рдЙрджрд╛рд╣рд░рдг рдкреВрд░рд╛ рд╣реЛ рдЧрдпрд╛! рд╣рдордиреЗ рдЗрди рдЕрд╡рдзрд╛рд░рдгрд╛рдУрдВ рдХреЛ рдЫреБрдЖ рд╣реИ: +- рдирдП рдЯреВрд▓реНрд╕ рдХрд╛ рдирд┐рд░реНрдорд╛рдгред +- рдЯреВрд▓ рдХреЗ рд╡рд┐рд╡рд░рдг рдХреЛ рдЕрдкрдбреЗрдЯ рдХрд░рдирд╛ред +- рдПрдХ рдордЬрдмреВрдд LLM рдореЗрдВ рд╕реНрд╡рд┐рдЪ рдХрд░рдиреЗ рд╕реЗ рдПрдЬреЗрдВрдЯ рдХреА рддрд░реНрдХрд╢рдХреНрддрд┐ рдореЗрдВ рдорджрдж рдорд┐рд▓рддреА рд╣реИред + +тЬЕ рдЕрдм рдЖрдк рд╡рд╣ text-to-SQL рд╕рд┐рд╕реНрдЯрдо рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬрд┐рд╕рдХрд╛ рдЖрдкрдиреЗ рд╣рдореЗрд╢рд╛ рд╕рдкрдирд╛ рджреЗрдЦрд╛ рд╣реИ! тЬи \ No newline at end of file diff --git a/docs/source/hi/guided_tour.md b/docs/source/hi/guided_tour.md new file mode 100644 index 000000000..24cb71d03 --- /dev/null +++ b/docs/source/hi/guided_tour.md @@ -0,0 +1,360 @@ + +# Agents - рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░ + +[[open-in-colab]] + +рдЗрд╕ рдЧрд╛рдЗрдбреЗрдб рд╡рд┐рдЬрд┐рдЯ рдореЗрдВ, рдЖрдк рд╕реАрдЦреЗрдВрдЧреЗ рдХрд┐ рдПрдХ рдПрдЬреЗрдВрдЯ рдХреИрд╕реЗ рдмрдирд╛рдПрдВ, рдЗрд╕реЗ рдХреИрд╕реЗ рдЪрд▓рд╛рдПрдВ, рдФрд░ рдЕрдкрдиреЗ рдпреВрдЬ-рдХреЗрд╕ рдХреЗ рд▓рд┐рдП рдмреЗрд╣рддрд░ рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЗрд╕реЗ рдХреИрд╕реЗ рдХрд╕реНрдЯрдорд╛рдЗрдЬрд╝ рдХрд░реЗрдВред + +### рдЕрдкрдирд╛ Agent рдмрдирд╛рдирд╛ + +рдПрдХ рдорд┐рдирд┐рдорд▓ рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдХрдо рд╕реЗ рдХрдо рдЗрди рджреЛ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ: + +- `model`, рдЖрдкрдХреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдкрд╛рд╡рд░ рджреЗрдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рдЯреЗрдХреНрд╕реНрдЯ-рдЬрдирд░реЗрд╢рди рдореЙрдбрд▓ - рдХреНрдпреЛрдВрдХрд┐ рдПрдЬреЗрдВрдЯ рдПрдХ рд╕рд┐рдВрдкрд▓ LLM рд╕реЗ рдЕрд▓рдЧ рд╣реИ, рдпрд╣ рдПрдХ рд╕рд┐рд╕реНрдЯрдо рд╣реИ рдЬреЛ LLM рдХреЛ рдЕрдкрдиреЗ рдЗрдВрдЬрди рдХреЗ рд░реВрдк рдореЗрдВ рдЙрдкрдпреЛрдЧ рдХрд░рддрд╛ рд╣реИред рдЖрдк рдЗрдирдореЗрдВ рд╕реЗ рдХреЛрдИ рднреА рд╡рд┐рдХрд▓реНрдк рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ: + - [`TransformersModel`] `transformers` рдкрд╛рдЗрдкрд▓рд╛рдЗрди рдХреЛ рдкрд╣рд▓реЗ рд╕реЗ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рддрд╛ рд╣реИ рдЬреЛ `transformers` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдЖрдкрдХреА рд▓реЛрдХрд▓ рдорд╢реАрди рдкрд░ рдЗрдиреНрдлрд░реЗрдВрд╕ рдЪрд▓рд╛рдиреЗ рдХреЗ рд▓рд┐рдП рд╣реЛрддрд╛ рд╣реИред + - [`HfApiModel`] рдЕрдВрджрд░ рд╕реЗ `huggingface_hub.InferenceClient` рдХрд╛ рд▓рд╛рдн рдЙрдард╛рддрд╛ рд╣реИред + - [`LiteLLMModel`] рдЖрдкрдХреЛ [LiteLLM](https://docs.litellm.ai/) рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ 100+ рдЕрд▓рдЧ-рдЕрд▓рдЧ рдореЙрдбрд▓реНрд╕ рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рджреЗрддрд╛ рд╣реИ! + +- `tools`, `Tools` рдХреА рдПрдХ рд▓рд┐рд╕реНрдЯ рдЬрд┐рд╕реЗ рдПрдЬреЗрдВрдЯ рдЯрд╛рд╕реНрдХ рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддрд╛ рд╣реИред рдпрд╣ рдПрдХ рдЦрд╛рд▓реА рд▓рд┐рд╕реНрдЯ рд╣реЛ рд╕рдХрддреА рд╣реИред рдЖрдк рдСрдкреНрд╢рдирд▓ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯ `add_base_tools=True` рдХреЛ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рдХреЗ рдЕрдкрдиреА `tools` рд▓рд┐рд╕реНрдЯ рдХреЗ рдКрдкрд░ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдЯреВрд▓рдмреЙрдХреНрд╕ рднреА рдЬреЛрдбрд╝ рд╕рдХрддреЗ рд╣реИрдВред + +рдПрдХ рдмрд╛рд░ рдЬрдм рдЖрдкрдХреЗ рдкрд╛рд╕ рдпреЗ рджреЛ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕, `tools` рдФрд░ `model` рд╣реИрдВ, рддреЛ рдЖрдк рдПрдХ рдПрдЬреЗрдВрдЯ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдФрд░ рдЗрд╕реЗ рдЪрд▓рд╛ рд╕рдХрддреЗ рд╣реИрдВред рдЖрдк рдХреЛрдИ рднреА LLM рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдпрд╛ рддреЛ [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), рдпрд╛ [LiteLLM](https://www.litellm.ai/) рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗред + + + + +Hugging Face API рдЯреЛрдХрди рдХреЗ рдмрд┐рдирд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдореБрдлреНрдд рд╣реИ, рд▓реЗрдХрд┐рди рдлрд┐рд░ рдЗрд╕рдореЗрдВ рд░реЗрдЯ рд▓рд┐рдорд┐рдЯреЗрд╢рди рд╣реЛрдЧреАред + +рдЧреЗрдЯреЗрдб рдореЙрдбрд▓реНрд╕ рддрдХ рдкрд╣реБрдВрдЪрдиреЗ рдпрд╛ PRO рдЕрдХрд╛рдЙрдВрдЯ рдХреЗ рд╕рд╛рде рдЕрдкрдиреА рд░реЗрдЯ рд▓рд┐рдорд┐рдЯреНрд╕ рдмрдврд╝рд╛рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╡реЗрд░рд┐рдПрдмрд▓ `HF_TOKEN` рд╕реЗрдЯ рдХрд░рдирд╛ рд╣реЛрдЧрд╛ рдпрд╛ `HfApiModel` рдХреЗ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ `token` рд╡реЗрд░рд┐рдПрдмрд▓ рдкрд╛рд╕ рдХрд░рдирд╛ рд╣реЛрдЧрд╛ред + +```python +from smolagents import CodeAgent, HfApiModel + +model_id = "meta-llama/Llama-3.3-70B-Instruct" + +model = HfApiModel(model_id=model_id, token="") +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + + + +```python +from smolagents import CodeAgent, TransformersModel + +model_id = "meta-llama/Llama-3.2-3B-Instruct" + +model = TransformersModel(model_id=model_id) +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + + + +`LiteLLMModel` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╡реЗрд░рд┐рдПрдмрд▓ `ANTHROPIC_API_KEY` рдпрд╛ `OPENAI_API_KEY` рд╕реЗрдЯ рдХрд░рдирд╛ рд╣реЛрдЧрд╛, рдпрд╛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ `api_key` рд╡реЗрд░рд┐рдПрдмрд▓ рдкрд╛рд╕ рдХрд░рдирд╛ рд╣реЛрдЧрд╛ред + +```python +from smolagents import CodeAgent, LiteLLMModel + +model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # Could use 'gpt-4o' +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + + + +```python +from smolagents import CodeAgent, LiteLLMModel + +model = LiteLLMModel( + model_id="ollama_chat/llama3.2", # This model is a bit weak for agentic behaviours though + api_base="http://localhost:11434", # replace with 127.0.0.1:11434 or remote open-ai compatible server if necessary + api_key="YOUR_API_KEY" # replace with API key if necessary + num_ctx=8192 # ollama default is 2048 which will fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. +) + +agent = CodeAgent(tools=[], model=model, add_base_tools=True) + +agent.run( + "Could you give me the 118th number in the Fibonacci sequence?", +) +``` + + + +#### CodeAgent рдФрд░ ToolCallingAgent + +[`CodeAgent`] рд╣рдорд╛рд░рд╛ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдПрдЬреЗрдВрдЯ рд╣реИред рдпрд╣ рд╣рд░ рд╕реНрдЯреЗрдк рдкрд░ рдкрд╛рдпрдерди рдХреЛрдб рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рд▓рд┐рдЦреЗрдЧрд╛ рдФрд░ рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд░реЗрдЧрд╛ред + +рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд░реВрдк рд╕реЗ, рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдЖрдкрдХреЗ рд▓реЛрдХрд▓ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдореЗрдВ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред +рдпрд╣ рд╕реБрд░рдХреНрд╖рд┐рдд рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП рдХреНрдпреЛрдВрдХрд┐ рдХреЗрд╡рд▓ рд╡рд╣реА рдлрд╝рдВрдХреНрд╢рдВрд╕ рдХреЙрд▓ рдХрд┐рдП рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ рдЖрдкрдиреЗ рдкреНрд░рджрд╛рди рдХрд┐рдП рд╣реИрдВ (рд╡рд┐рд╢реЗрд╖ рд░реВрдк рд╕реЗ рдпрджрд┐ рдпрд╣ рдХреЗрд╡рд▓ Hugging Face рдЯреВрд▓реНрд╕ рд╣реИрдВ) рдФрд░ рдкреВрд░реНрд╡-рдкрд░рд┐рднрд╛рд╖рд┐рдд рд╕реБрд░рдХреНрд╖рд┐рдд рдлрд╝рдВрдХреНрд╢рдВрд╕ рдЬреИрд╕реЗ `print` рдпрд╛ `math` рдореЙрдбреНрдпреВрд▓ рд╕реЗ рдлрд╝рдВрдХреНрд╢рдВрд╕, рдЗрд╕рд▓рд┐рдП рдЖрдк рдкрд╣рд▓реЗ рд╕реЗ рд╣реА рд╕реАрдорд┐рдд рд╣реИрдВ рдХрд┐ рдХреНрдпрд╛ рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд┐рдпрд╛ рдЬрд╛ рд╕рдХрддрд╛ рд╣реИред + +рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд░реВрдк рд╕реЗ рд╕реЗрдл рд▓рд┐рд╕реНрдЯ рдХреЗ рдмрд╛рд╣рд░ рдЗрдореНрдкреЛрд░реНрдЯ рдХреА рдЕрдиреБрдорддрд┐ рдирд╣реАрдВ рджреЗрддрд╛ рд╣реИ, рдЗрд╕рд▓рд┐рдП рд╕рдмрд╕реЗ рд╕реНрдкрд╖реНрдЯ рдЕрдЯреИрдХ рд╕рдорд╕реНрдпрд╛ рдирд╣реАрдВ рд╣реЛрдиреА рдЪрд╛рд╣рд┐рдПред +рдЖрдк рдЕрдкрдиреЗ [`CodeAgent`] рдХреЗ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯ `additional_authorized_imports` рдореЗрдВ рд╕реНрдЯреНрд░рд┐рдВрдЧреНрд╕ рдХреА рд▓рд┐рд╕реНрдЯ рдХреЗ рд░реВрдк рдореЗрдВ рдЕрддрд┐рд░рд┐рдХреНрдд рдореЙрдбреНрдпреВрд▓реНрд╕ рдХреЛ рдЕрдзрд┐рдХреГрдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +```py +model = HfApiModel() +agent = CodeAgent(tools=[], model=model, additional_authorized_imports=['requests', 'bs4']) +agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") +``` + +> [!WARNING] +> LLM рдЖрд░реНрдмрд┐рдЯреНрд░рд░реА рдХреЛрдб рдЬрдирд░реЗрдЯ рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдЬреЛ рдлрд┐рд░ рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛: рдХреЛрдИ рдЕрд╕реБрд░рдХреНрд╖рд┐рдд рдЗрдореНрдкреЛрд░реНрдЯ рди рдЬреЛрдбрд╝реЗрдВ! + +рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдХрд┐рд╕реА рднреА рдХреЛрдб рдкрд░ рд░реБрдХ рдЬрд╛рдПрдЧрд╛ рдЬреЛ рдПрдХ рдЕрд╡реИрдз рдСрдкрд░реЗрд╢рди рдХрд░рдиреЗ рдХрд╛ рдкреНрд░рдпрд╛рд╕ рдХрд░рддрд╛ рд╣реИ рдпрд╛ рдпрджрд┐ рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рдЬрдирд░реЗрдЯ рдХрд┐рдП рдЧрдП рдХреЛрдб рдореЗрдВ рдПрдХ рд░реЗрдЧреБрд▓рд░ рдкрд╛рдпрдерди рдПрд░рд░ рд╣реИред + +рдЖрдк [E2B рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрдЯрд░](https://e2b.dev/docs#what-is-e2-b) рдХрд╛ рдЙрдкрдпреЛрдЧ рд▓реЛрдХрд▓ рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рдХреЗ рдмрдЬрд╛рдп рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдкрд╣рд▓реЗ [`E2B_API_KEY` рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╡реЗрд░рд┐рдПрдмрд▓ рд╕реЗрдЯ рдХрд░рдХреЗ](https://e2b.dev/dashboard?tab=keys) рдФрд░ рдлрд┐рд░ рдПрдЬреЗрдВрдЯ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ `use_e2b_executor=True` рдкрд╛рд╕ рдХрд░рдХреЗред + +> [!TIP] +> рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдФрд░ рдЬрд╛рдиреЗрдВ [рдЗрд╕ рдЯреНрдпреВрдЯреЛрд░рд┐рдпрд▓ рдореЗрдВ](tutorials/secure_code_execution)ред + +рд╣рдо JSON-рдЬреИрд╕реЗ рдмреНрд▓реЙрдмреНрд╕ рдХреЗ рд░реВрдк рдореЗрдВ рдПрдХреНрд╢рди рд▓рд┐рдЦрдиреЗ рдХреЗ рд╡реНрдпрд╛рдкрдХ рд░реВрдк рд╕реЗ рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдиреЗ рд╡рд╛рд▓реЗ рддрд░реАрдХреЗ рдХрд╛ рднреА рд╕рдорд░реНрдерди рдХрд░рддреЗ рд╣реИрдВ: рдпрд╣ [`ToolCallingAgent`] рд╣реИ, рдпрд╣ рдмрд╣реБрдд рдХреБрдЫ [`CodeAgent`] рдХреА рддрд░рд╣ рд╣реА рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ, рдмреЗрд╢рдХ `additional_authorized_imports` рдХреЗ рдмрд┐рдирд╛ рдХреНрдпреЛрдВрдХрд┐ рдпрд╣ рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдирд╣реАрдВ рдХрд░рддрд╛ред + +```py +from smolagents import ToolCallingAgent + +agent = ToolCallingAgent(tools=[], model=model) +agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") +``` + +### рдПрдЬреЗрдВрдЯ рд░рди рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг + +рд░рди рдХреЗ рдмрд╛рдж рдХреНрдпрд╛ рд╣реБрдЖ рдпрд╣ рдЬрд╛рдВрдЪрдиреЗ рдХреЗ рд▓рд┐рдП рдпрд╣рд╛рдБ рдХреБрдЫ рдЙрдкрдпреЛрдЧреА рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рд╣реИрдВ: +- `agent.logs` рдПрдЬреЗрдВрдЯ рдХреЗ рдлрд╛рдЗрди-рдЧреНрд░реЗрдиреНрдб рд▓реЙрдЧреНрд╕ рдХреЛ рд╕реНрдЯреЛрд░ рдХрд░рддрд╛ рд╣реИред рдПрдЬреЗрдВрдЯ рдХреЗ рд░рди рдХреЗ рд╣рд░ рд╕реНрдЯреЗрдк рдкрд░, рд╕рдм рдХреБрдЫ рдПрдХ рдбрд┐рдХреНрд╢рдирд░реА рдореЗрдВ рд╕реНрдЯреЛрд░ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ рдЬреЛ рдлрд┐рд░ `agent.logs` рдореЗрдВ рдЬреЛрдбрд╝рд╛ рдЬрд╛рддрд╛ рд╣реИред +- `agent.write_inner_memory_from_logs()` рдЪрд▓рд╛рдиреЗ рд╕реЗ LLM рдХреЗ рд▓рд┐рдП рдПрдЬреЗрдВрдЯ рдХреЗ рд▓реЙрдЧреНрд╕ рдХреА рдПрдХ рдЗрдирд░ рдореЗрдореЛрд░реА рдмрдирддреА рд╣реИ, рдЪреИрдЯ рдореИрд╕реЗрдЬ рдХреА рд▓рд┐рд╕реНрдЯ рдХреЗ рд░реВрдк рдореЗрдВред рдпрд╣ рдореЗрдердб рд▓реЙрдЧ рдХреЗ рдкреНрд░рддреНрдпреЗрдХ рд╕реНрдЯреЗрдк рдкрд░ рдЬрд╛рддрд╛ рд╣реИ рдФрд░ рдХреЗрд╡рд▓ рд╡рд╣реА рд╕реНрдЯреЛрд░ рдХрд░рддрд╛ рд╣реИ рдЬрд┐рд╕рдореЗрдВ рдпрд╣ рдПрдХ рдореИрд╕реЗрдЬ рдХреЗ рд░реВрдк рдореЗрдВ рд░реБрдЪрд┐ рд░рдЦрддрд╛ рд╣реИ: рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдпрд╣ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдФрд░ рдЯрд╛рд╕реНрдХ рдХреЛ рдЕрд▓рдЧ-рдЕрд▓рдЧ рдореИрд╕реЗрдЬ рдХреЗ рд░реВрдк рдореЗрдВ рд╕реЗрд╡ рдХрд░реЗрдЧрд╛, рдлрд┐рд░ рдкреНрд░рддреНрдпреЗрдХ рд╕реНрдЯреЗрдк рдХреЗ рд▓рд┐рдП рдпрд╣ LLM рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдПрдХ рдореИрд╕реЗрдЬ рдХреЗ рд░реВрдк рдореЗрдВ рдФрд░ рдЯреВрд▓ рдХреЙрд▓ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рджреВрд╕рд░реЗ рдореИрд╕реЗрдЬ рдХреЗ рд░реВрдк рдореЗрдВ рд╕реНрдЯреЛрд░ рдХрд░реЗрдЧрд╛ред + +## рдЯреВрд▓реНрд╕ + +рдЯреВрд▓ рдПрдХ рдПрдЯреЙрдорд┐рдХ рдлрд╝рдВрдХреНрд╢рди рд╣реИ рдЬрд┐рд╕реЗ рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред LLM рджреНрд╡рд╛рд░рд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдиреЗ рдХреЗ рд▓рд┐рдП, рдЗрд╕реЗ рдХреБрдЫ рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХреА рднреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ рдЬреЛ рдЗрд╕рдХреА API рдмрдирд╛рддреЗ рд╣реИрдВ рдФрд░ LLM рдХреЛ рдпрд╣ рдмрддрд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдПрдВрдЧреЗ рдХрд┐ рдЗрд╕ рдЯреВрд▓ рдХреЛ рдХреИрд╕реЗ рдХреЙрд▓ рдХрд░реЗрдВ: +- рдПрдХ рдирд╛рдо +- рдПрдХ рд╡рд┐рд╡рд░рдг +- рдЗрдирдкреБрдЯ рдкреНрд░рдХрд╛рд░ рдФрд░ рд╡рд┐рд╡рд░рдг +- рдПрдХ рдЖрдЙрдЯрдкреБрдЯ рдкреНрд░рдХрд╛рд░ + +рдЖрдк рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП [`PythonInterpreterTool`] рдХреЛ рдЪреЗрдХ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ: рдЗрд╕рдореЗрдВ рдПрдХ рдирд╛рдо, рд╡рд┐рд╡рд░рдг, рдЗрдирдкреБрдЯ рд╡рд┐рд╡рд░рдг, рдПрдХ рдЖрдЙрдЯрдкреБрдЯ рдкреНрд░рдХрд╛рд░, рдФрд░ рдПрдХреНрд╢рди рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ `forward` рдореЗрдердб рд╣реИред + +рдЬрдм рдПрдЬреЗрдВрдЯ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ, рдЯреВрд▓ рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдПрдХ рдЯреВрд▓ рд╡рд┐рд╡рд░рдг рдЬрдирд░реЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ рдЬреЛ рдПрдЬреЗрдВрдЯ рдХреЗ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рдмреЗрдХ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред рдпрд╣ рдПрдЬреЗрдВрдЯ рдХреЛ рдмрддрд╛рддрд╛ рд╣реИ рдХрд┐ рд╡рд╣ рдХреМрди рд╕реЗ рдЯреВрд▓реНрд╕ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдФрд░ рдХреНрдпреЛрдВред + +### рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдЯреВрд▓рдмреЙрдХреНрд╕ + +Transformers рдПрдЬреЗрдВрдЯреНрд╕ рдХреЛ рд╕рд╢рдХреНрдд рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдЯреВрд▓рдмреЙрдХреНрд╕ рдХреЗ рд╕рд╛рде рдЖрддрд╛ рд╣реИ, рдЬрд┐рд╕реЗ рдЖрдк рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯ `add_base_tools = True` рдХреЗ рд╕рд╛рде рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдореЗрдВ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рдЬреЛрдбрд╝ рд╕рдХрддреЗ рд╣реИрдВ: + +- **DuckDuckGo рд╡реЗрдм рд╕рд░реНрдЪ**: DuckDuckGo рдмреНрд░рд╛рдЙрдЬрд╝рд░ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рд╡реЗрдм рд╕рд░реНрдЪ рдХрд░рддрд╛ рд╣реИред +- **рдкрд╛рдпрдерди рдХреЛрдб рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░**: рдЖрдкрдХрд╛ LLM рдЬрдирд░реЗрдЯреЗрдб рдкрд╛рдпрдерди рдХреЛрдб рдПрдХ рд╕реБрд░рдХреНрд╖рд┐рдд рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдореЗрдВ рдЪрд▓рд╛рддрд╛ рд╣реИред рдпрд╣ рдЯреВрд▓ [`ToolCallingAgent`] рдореЗрдВ рдХреЗрд╡рд▓ рддрднреА рдЬреЛрдбрд╝рд╛ рдЬрд╛рдПрдЧрд╛ рдЬрдм рдЖрдк рдЗрд╕реЗ `add_base_tools=True` рдХреЗ рд╕рд╛рде рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рддреЗ рд╣реИрдВ, рдХреНрдпреЛрдВрдХрд┐ рдХреЛрдб-рдмреЗрд╕реНрдб рдПрдЬреЗрдВрдЯ рдкрд╣рд▓реЗ рд╕реЗ рд╣реА рдиреЗрдЯрд┐рд╡ рд░реВрдк рд╕реЗ рдкрд╛рдпрдерди рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд░ рд╕рдХрддрд╛ рд╣реИ +- **рдЯреНрд░рд╛рдВрд╕рдХреНрд░рд╛рдЗрдмрд░**: Whisper-Turbo рдкрд░ рдмрдирд╛рдпрд╛ рдЧрдпрд╛ рдПрдХ рд╕реНрдкреАрдЪ-рдЯреВ-рдЯреЗрдХреНрд╕реНрдЯ рдкрд╛рдЗрдкрд▓рд╛рдЗрди рдЬреЛ рдСрдбрд┐рдпреЛ рдХреЛ рдЯреЗрдХреНрд╕реНрдЯ рдореЗрдВ рдЯреНрд░рд╛рдВрд╕рдХреНрд░рд╛рдЗрдм рдХрд░рддрд╛ рд╣реИред + +рдЖрдк рдореИрдиреНрдпреБрдЕрд▓ рд░реВрдк рд╕реЗ рдПрдХ рдЯреВрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдЙрд╕рдХреЗ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреЗ рд╕рд╛рде рдХреЙрд▓ рдХрд░рдХреЗ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +```python +from smolagents import DuckDuckGoSearchTool + +search_tool = DuckDuckGoSearchTool() +print(search_tool("Who's the current president of Russia?")) +``` + +### рдЕрдкрдиреЗ рдХрд╕реНрдЯрдо рдЯреВрд▓ рдмрдирд╛рдПрдВ + +рдЖрдк рдРрд╕реЗ рдЙрдкрдпреЛрдЧ рдХреЗ рдорд╛рдорд▓реЛрдВ рдХреЗ рд▓рд┐рдП рдЕрдкрдиреЗ рдЦреБрдж рдХреЗ рдЯреВрд▓ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ Hugging Face рдХреЗ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдЯреВрд▓реНрд╕ рджреНрд╡рд╛рд░рд╛ рдХрд╡рд░ рдирд╣реАрдВ рдХрд┐рдП рдЧрдП рд╣реИрдВред +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЪрд▓рд┐рдП рдПрдХ рдЯреВрд▓ рдмрдирд╛рддреЗ рд╣реИрдВ рдЬреЛ рджрд┐рдП рдЧрдП рдХрд╛рд░реНрдп (task) рдХреЗ рд▓рд┐рдП рд╣рдм рд╕реЗ рд╕рдмрд╕реЗ рдЕрдзрд┐рдХ рдбрд╛рдЙрдирд▓реЛрдб рдХрд┐рдП рдЧрдП рдореЙрдбрд▓ рдХреЛ рд░рд┐рдЯрд░реНрди рдХрд░рддрд╛ рд╣реИред + +рдЖрдк рдиреАрдЪреЗ рджрд┐рдП рдЧрдП рдХреЛрдб рд╕реЗ рд╢реБрд░реБрдЖрдд рдХрд░реЗрдВрдЧреЗред + +```python +from huggingface_hub import list_models + +task = "text-classification" + +most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) +print(most_downloaded_model.id) +``` + +рдпрд╣ рдХреЛрдб рдЖрд╕рд╛рдиреА рд╕реЗ рдЯреВрд▓ рдореЗрдВ рдмрджрд▓рд╛ рдЬрд╛ рд╕рдХрддрд╛ рд╣реИ, рдмрд╕ рдЗрд╕реЗ рдПрдХ рдлрд╝рдВрдХреНрд╢рди рдореЗрдВ рд░реИрдк рдХрд░реЗрдВ рдФрд░ `tool` рдбреЗрдХреЛрд░реЗрдЯрд░ рдЬреЛрдбрд╝реЗрдВ: +рдпрд╣ рдЯреВрд▓ рдмрдирд╛рдиреЗ рдХрд╛ рдПрдХрдорд╛рддреНрд░ рддрд░реАрдХрд╛ рдирд╣реАрдВ рд╣реИ: рдЖрдк рдЗрд╕реЗ рд╕реАрдзреЗ [`Tool`] рдХрд╛ рд╕рдмрдХреНрд▓рд╛рд╕ рдмрдирд╛рдХрд░ рднреА рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЬреЛ рдЖрдкрдХреЛ рдЕрдзрд┐рдХ рд▓рдЪреАрд▓рд╛рдкрди рдкреНрд░рджрд╛рди рдХрд░рддрд╛ рд╣реИ, рдЬреИрд╕реЗ рднрд╛рд░реА рдХреНрд▓рд╛рд╕ рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рдиреЗ рдХреА рд╕рдВрднрд╛рд╡рдирд╛ред + +рдЪрд▓реЛ рджреЗрдЦрддреЗ рд╣реИрдВ рдХрд┐ рдпрд╣ рджреЛрдиреЛрдВ рд╡рд┐рдХрд▓реНрдкреЛрдВ рдХреЗ рд▓рд┐рдП рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ: + + + + +```py +from smolagents import tool + +@tool +def model_download_tool(task: str) -> str: + """ + This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. + It returns the name of the checkpoint. + + Args: + task: The task for which to get the download count. + """ + most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) + return most_downloaded_model.id +``` + +рдлрд╝рдВрдХреНрд╢рди рдХреЛ рдЪрд╛рд╣рд┐рдП: +- рдПрдХ рд╕реНрдкрд╖реНрдЯ рдирд╛рдо: рдирд╛рдо рдЯреВрд▓ рдХреЗ рдХрд╛рд░реНрдп рдХреЛ рд╕реНрдкрд╖реНрдЯ рд░реВрдк рд╕реЗ рдмрддрд╛рдиреЗ рд╡рд╛рд▓рд╛ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП рддрд╛рдХрд┐ рдЗрд╕реЗ рдЪрд▓рд╛рдиреЗ рд╡рд╛рд▓реЗ LLM рдХреЛ рдЖрд╕рд╛рдиреА рд╣реЛред рдЪреВрдВрдХрд┐ рдпрд╣ рдЯреВрд▓ рдХрд╛рд░реНрдп рдХреЗ рд▓рд┐рдП рд╕рдмрд╕реЗ рдЕрдзрд┐рдХ рдбрд╛рдЙрдирд▓реЛрдб рдХрд┐рдП рдЧрдП рдореЙрдбрд▓ рдХреЛ рд▓реМрдЯрд╛рддрд╛ рд╣реИ, рдЗрд╕рдХрд╛ рдирд╛рдо `model_download_tool` рд░рдЦрд╛ рдЧрдпрд╛ рд╣реИред +- рдЗрдирдкреБрдЯ рдФрд░ рдЖрдЙрдЯрдкреБрдЯ рдкрд░ рдЯрд╛рдЗрдк рд╣рд┐рдВрдЯреНрд╕ред +- рдПрдХ рд╡рд┐рд╡рд░рдг: рдЗрд╕рдореЗрдВ 'Args:' рднрд╛рдЧ рд╢рд╛рдорд┐рд▓ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП, рдЬрд┐рд╕рдореЗрдВ рдкреНрд░рддреНрдпреЗрдХ рдЖрд░реНрдЧреНрдпреБрдореЗрдВрдЯ рдХрд╛ рд╡рд░реНрдгрди (рдмрд┐рдирд╛ рдЯрд╛рдЗрдк рд╕рдВрдХреЗрдд рдХреЗ) рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реЛред рдпрд╣ рд╡рд┐рд╡рд░рдг рдПрдХ рдирд┐рд░реНрджреЗрд╢ рдореИрдиреБрдЕрд▓ рдХреА рддрд░рд╣ рд╣реЛрддрд╛ рд╣реИ рдЬреЛ LLM рдХреЛ рдЯреВрд▓ рдЪрд▓рд╛рдиреЗ рдореЗрдВ рдорджрдж рдХрд░рддрд╛ рд╣реИред рдЗрд╕реЗ рдЕрдирджреЗрдЦрд╛ рди рдХрд░реЗрдВред +рдЗрди рд╕рднреА рддрддреНрд╡реЛрдВ рдХреЛ рдПрдЬреЗрдВрдЯ рдХреА рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рд╕реНрд╡рдЪрд╛рд▓рд┐рдд рд░реВрдк рд╕реЗ рд╢рд╛рдорд┐рд▓ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛: рдЗрд╕рд▓рд┐рдП рдЗрдиреНрд╣реЗрдВ рдпрдерд╛рд╕рдВрднрд╡ рд╕реНрдкрд╖реНрдЯ рдмрдирд╛рдиреЗ рдХрд╛ рдкреНрд░рдпрд╛рд╕ рдХрд░реЗрдВ! + +> [!TIP] +> рдпрд╣ рдкрд░рд┐рднрд╛рд╖рд╛ рдкреНрд░рд╛рд░реВрдк `apply_chat_template` рдореЗрдВ рдЙрдкрдпреЛрдЧ рдХреА рдЧрдИ рдЯреВрд▓ рд╕реНрдХреАрдорд╛ рдЬреИрд╕рд╛ рд╣реА рд╣реИ, рдХреЗрд╡рд▓ рдЕрддрд┐рд░рд┐рдХреНрдд `tool` рдбреЗрдХреЛрд░реЗрдЯрд░ рдЬреЛрдбрд╝рд╛ рдЧрдпрд╛ рд╣реИ: рд╣рдорд╛рд░реЗ рдЯреВрд▓ рдЙрдкрдпреЛрдЧ API рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдкрдврд╝реЗрдВ [рдпрд╣рд╛рдБ](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template)ред + + + +```py +from smolagents import Tool + +class ModelDownloadTool(Tool): + name = "model_download_tool" + description = "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint." + inputs = {"task": {"type": "string", "description": "The task for which to get the download count."}} + output_type = "string" + + def forward(self, task: str) -> str: + most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) + return most_downloaded_model.id +``` + +рд╕рдмрдХреНрд▓рд╛рд╕ рдХреЛ рдирд┐рдореНрдирд▓рд┐рдЦрд┐рдд рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ: +- рдПрдХ рд╕реНрдкрд╖реНрдЯ `name`: рдирд╛рдо рдЯреВрд▓ рдХреЗ рдХрд╛рд░реНрдп рдХреЛ рд╕реНрдкрд╖реНрдЯ рд░реВрдк рд╕реЗ рдмрддрд╛рдиреЗ рд╡рд╛рд▓рд╛ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдПред +- рдПрдХ `description`: рдпрд╣ рднреА LLM рдХреЗ рд▓рд┐рдП рдирд┐рд░реНрджреЗрд╢ рдореИрдиреБрдЕрд▓ рдХреА рддрд░рд╣ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИред +- рдЗрдирдкреБрдЯ рдкреНрд░рдХрд╛рд░ рдФрд░ рдЙрдирдХреЗ рд╡рд┐рд╡рд░рдгред +- рдЖрдЙрдЯрдкреБрдЯ рдкреНрд░рдХрд╛рд░ред +рдЗрди рд╕рднреА рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХреЛ рдПрдЬреЗрдВрдЯ рдХреА рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рд╕реНрд╡рдЪрд╛рд▓рд┐рдд рд░реВрдк рд╕реЗ рд╢рд╛рдорд┐рд▓ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛, рдЗрдиреНрд╣реЗрдВ рд╕реНрдкрд╖реНрдЯ рдФрд░ рд╡рд┐рд╕реНрддреГрдд рдмрдирд╛рдПрдВред + + + + +рдЖрдк рд╕реАрдзреЗ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ: +```py +from smolagents import CodeAgent, HfApiModel +agent = CodeAgent(tools=[model_download_tool], model=HfApiModel()) +agent.run( + "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" +) +``` + +рд▓реЙрдЧреНрд╕ рдЗрд╕ рдкреНрд░рдХрд╛рд░ рд╣реЛрдВрдЧреЗ: +```text +тХнтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА New run тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХо +тФВ тФВ +тФВ Can you give me the name of the model that has the most downloads in the 'text-to-video' тФВ +тФВ task on the Hugging Face Hub? тФВ +тФВ тФВ +тХ░тФА HfApiModel - Qwen/Qwen2.5-Coder-32B-Instruct тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХп +тФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБ Step 0 тФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБ +тХнтФА Executing this code: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХо +тФВ 1 model_name = model_download_tool(task="text-to-video") тФВ +тФВ 2 print(model_name) тФВ +тХ░тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХп +Execution logs: +ByteDance/AnimateDiff-Lightning + +Out: None +[Step 0: Duration 0.27 seconds| Input tokens: 2,069 | Output tokens: 60] +тФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБ Step 1 тФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБтФБ +тХнтФА Executing this code: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХо +тФВ 1 final_answer("ByteDance/AnimateDiff-Lightning") тФВ +тХ░тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтХп +Out - Final answer: ByteDance/AnimateDiff-Lightning +[Step 1: Duration 0.10 seconds| Input tokens: 4,288 | Output tokens: 148] +Out[20]: 'ByteDance/AnimateDiff-Lightning' +``` + + [!TIP] +> рдЯреВрд▓реНрд╕ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдкрдврд╝реЗрдВ [dedicated tutorial](./tutorials/tools#рдЯреВрд▓-рдХреНрдпрд╛-рд╣реИ-рдФрд░-рдЗрд╕реЗ-рдХреИрд╕реЗ-рдмрдирд╛рдПрдВ) рдореЗрдВред + +## рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯреНрд╕ + +Microsoft рдХреЗ рдлреНрд░реЗрдорд╡рд░реНрдХ [Autogen](https://huggingface.co/papers/2308.08155) рдХреЗ рд╕рд╛рде рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдореНрд╕ рдХреА рд╢реБрд░реБрдЖрдд рд╣реБрдИред + +рдЗрд╕ рдкреНрд░рдХрд╛рд░ рдХреЗ рдлреНрд░реЗрдорд╡рд░реНрдХ рдореЗрдВ, рдЖрдкрдХреЗ рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрдИ рдПрдЬреЗрдВрдЯреНрд╕ рдПрдХ рд╕рд╛рде рдХрд╛рдо рдХрд░рддреЗ рд╣реИрдВ, рди рдХрд┐ рдХреЗрд╡рд▓ рдПрдХред +рдпрд╣ рдЕрдзрд┐рдХрд╛рдВрд╢ рдмреЗрдВрдЪрдорд╛рд░реНрдХреНрд╕ рдкрд░ рдмреЗрд╣рддрд░ рдкреНрд░рджрд░реНрд╢рди рджреЗрддрд╛ рд╣реИред рдЗрд╕рдХрд╛ рдХрд╛рд░рдг рдпрд╣ рд╣реИ рдХрд┐ рдХрдИ рдХрд╛рд░реНрдпреЛрдВ рдХреЗ рд▓рд┐рдП, рдПрдХ рд╕рд░реНрд╡-рд╕рдорд╛рд╡реЗрд╢реА рдкреНрд░рдгрд╛рд▓реА рдХреЗ рдмрдЬрд╛рдп, рдЖрдк рдЙрдк-рдХрд╛рд░реНрдпреЛрдВ рдкрд░ рд╡рд┐рд╢реЗрд╖рдЬреНрдЮрддрд╛ рд░рдЦрдиреЗ рд╡рд╛рд▓реА рдЗрдХрд╛рдЗрдпреЛрдВ рдХреЛ рдкрд╕рдВрдж рдХрд░реЗрдВрдЧреЗред рдЗрд╕ рддрд░рд╣, рдЕрд▓рдЧ-рдЕрд▓рдЧ рдЯреВрд▓ рд╕реЗрдЯреНрд╕ рдФрд░ рдореЗрдореЛрд░реА рд╡рд╛рд▓реЗ рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рдкрд╛рд╕ рд╡рд┐рд╢реЗрд╖рдХрд░рдг рдХреА рдЕрдзрд┐рдХ рдХреБрд╢рд▓рддрд╛ рд╣реЛрддреА рд╣реИред рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдХреЛрдб рдЙрддреНрдкрдиреНрди рдХрд░рдиреЗ рд╡рд╛рд▓реЗ рдПрдЬреЗрдВрдЯ рдХреА рдореЗрдореЛрд░реА рдХреЛ рд╡реЗрдм рд╕рд░реНрдЪ рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рджреЗрдЦреЗ рдЧрдП рд╡реЗрдмрдкреЗрдЬреЛрдВ рдХреА рд╕рднреА рд╕рд╛рдордЧреНрд░реА рд╕реЗ рдХреНрдпреЛрдВ рднрд░реЗрдВ? рдЗрдиреНрд╣реЗрдВ рдЕрд▓рдЧ рд░рдЦрдирд╛ рдмреЗрд╣рддрд░ рд╣реИред + +рдЖрдк `smolagents` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдЖрд╕рд╛рдиреА рд╕реЗ рд╢реНрд░реЗрдгреАрдмрджреНрдз рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдореНрд╕ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВред + +рдРрд╕рд╛ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдПрдЬреЗрдВрдЯ рдХреЛ [`ManagedAgent`] рдСрдмреНрдЬреЗрдХреНрдЯ рдореЗрдВ рд╕рдорд╛рд╣рд┐рдд рдХрд░реЗрдВред рдпрд╣ рдСрдмреНрдЬреЗрдХреНрдЯ `agent`, `name`, рдФрд░ рдПрдХ `description` рдЬреИрд╕реЗ рддрд░реНрдХреЛрдВ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ, рдЬреЛ рдлрд┐рд░ рдореИрдиреЗрдЬрд░ рдПрдЬреЗрдВрдЯ рдХреА рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рдПрдореНрдмреЗрдб рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИ + +рдпрд╣рд╛рдВ рдПрдХ рдПрдЬреЗрдВрдЯ рдмрдирд╛рдиреЗ рдХрд╛ рдЙрджрд╛рд╣рд░рдг рджрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ рдЬреЛ рд╣рдорд╛рд░реЗ [`DuckDuckGoSearchTool`] рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдПрдХ рд╡рд┐рд╢рд┐рд╖реНрдЯ рд╡реЗрдм рдЦреЛрдЬ рдПрдЬреЗрдВрдЯ рдХреЛ рдкреНрд░рдмрдВрдзрд┐рдд рдХрд░рддрд╛ рд╣реИред + +```py +from smolagents import CodeAgent, HfApiModel, DuckDuckGoSearchTool, ManagedAgent + +model = HfApiModel() + +web_agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model) + +managed_web_agent = ManagedAgent( + agent=web_agent, + name="web_search", + description="Runs web searches for you. Give it your query as an argument." +) + +manager_agent = CodeAgent( + tools=[], model=model, managed_agents=[managed_web_agent] +) + +manager_agent.run("Who is the CEO of Hugging Face?") +``` + +> [!TIP] +> рдХреБрд╢рд▓ рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рдЗрдВрдкреНрд▓реАрдореЗрдВрдЯреЗрд╢рди рдХрд╛ рдПрдХ рд╡рд┐рд╕реНрддреГрдд рдЙрджрд╛рд╣рд░рдг рджреЗрдЦрдиреЗ рдХреЗ рд▓рд┐рдП, [рдХреИрд╕реЗ рд╣рдордиреЗ рдЕрдкрдиреЗ рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯ рд╕рд┐рд╕реНрдЯрдо рдХреЛ GAIA рд▓реАрдбрд░рдмреЛрд░реНрдб рдХреЗ рд╢реАрд░реНрд╖ рдкрд░ рдкрд╣реБрдВрдЪрд╛рдпрд╛](https://huggingface.co/blog/beating-gaia) рдкрд░ рдЬрд╛рдПрдВред + + +## рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рд╕реЗ рдмрд╛рдд рдХрд░реЗрдВ рдФрд░ рдЙрд╕рдХреЗ рд╡рд┐рдЪрд╛рд░реЛрдВ рдХреЛ рдПрдХ рд╢рд╛рдирджрд╛рд░ Gradio рдЗрдВрдЯрд░рдлреЗрд╕ рдореЗрдВ рд╡рд┐рдЬрд╝реБрдЕрд▓рд╛рдЗрдЬрд╝ рдХрд░реЗрдВ + +рдЖрдк `GradioUI` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрдВрдЯрд░реИрдХреНрдЯрд┐рд╡ рддрд░реАрдХреЗ рд╕реЗ рдХрд╛рд░реНрдп рд╕реМрдВрдк рд╕рдХрддреЗ рд╣реИрдВ рдФрд░ рдЙрд╕рдХреЗ рд╕реЛрдЪрдиреЗ рдФрд░ рдирд┐рд╖реНрдкрд╛рджрди рдХреА рдкреНрд░рдХреНрд░рд┐рдпрд╛ рдХреЛ рджреЗрдЦ рд╕рдХрддреЗ рд╣реИрдВред рдиреАрдЪреЗ рдПрдХ рдЙрджрд╛рд╣рд░рдг рджрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ: + +```py +from smolagents import ( + load_tool, + CodeAgent, + HfApiModel, + GradioUI +) + +# Import tool from Hub +image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) + +model = HfApiModel(model_id) + +# Initialize the agent with the image generation tool +agent = CodeAgent(tools=[image_generation_tool], model=model) + +GradioUI(agent).launch() +``` + +рдЕрдВрджрд░реВрдиреА рддреМрд░ рдкрд░, рдЬрдм рдпреВрдЬрд░ рдПрдХ рдирдпрд╛ рдЙрддреНрддрд░ рдЯрд╛рдЗрдк рдХрд░рддрд╛ рд╣реИ, рддреЛ рдПрдЬреЗрдВрдЯ рдХреЛ `agent.run(user_request, reset=False)` рдХреЗ рд╕рд╛рде рд▓реЙрдиреНрдЪ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред +рдпрд╣рд╛рдБ `reset=False` рдлреНрд▓реИрдЧ рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рдПрдЬреЗрдВрдЯ рдХреА рдореЗрдореЛрд░реА рдЗрд╕ рдирдП рдХрд╛рд░реНрдп рдХреЛ рд▓реЙрдиреНрдЪ рдХрд░рдиреЗ рд╕реЗ рдкрд╣рд▓реЗ рдХреНрд▓рд┐рдпрд░ рдирд╣реАрдВ рд╣реЛрддреА, рдЬрд┐рд╕рд╕реЗ рдмрд╛рддрдЪреАрдд рдЬрд╛рд░реА рд░рд╣рддреА рд╣реИред + +рдЖрдк рдЗрд╕ `reset=False` рдЖрд░реНрдЧреНрдпреБрдореЗрдВрдЯ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рд╕реА рднреА рдЕрдиреНрдп рдПрдЬреЗрдВрдЯрд┐рдХ рдПрдкреНрд▓рд┐рдХреЗрд╢рди рдореЗрдВ рдмрд╛рддрдЪреАрдд рдЬрд╛рд░реА рд░рдЦрдиреЗ рдХреЗ рд▓рд┐рдП рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +## рдЕрдЧрд▓реЗ рдХрджрдо + +рдЕрдзрд┐рдХ рдЧрд╣рди рдЙрдкрдпреЛрдЧ рдХреЗ рд▓рд┐рдП, рдЖрдк рд╣рдорд╛рд░реЗ рдЯреНрдпреВрдЯреЛрд░рд┐рдпрд▓реНрд╕ рджреЗрдЦ рд╕рдХрддреЗ рд╣реИрдВ: +- [рд╣рдорд╛рд░реЗ рдХреЛрдб рдПрдЬреЗрдВрдЯреНрд╕ рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддреЗ рд╣реИрдВ рдЗрд╕рдХрд╛ рд╡рд┐рд╡рд░рдг](./tutorials/secure_code_execution) +- [рдЕрдЪреНрдЫреЗ рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдпрд╣ рдЧрд╛рдЗрдб](./tutorials/building_good_agents) +- [рдЯреВрд▓ рдЙрдкрдпреЛрдЧ рдХреЗ рд▓рд┐рдП рдЗрди-рдбреЗрдкреНрде рдЧрд╛рдЗрдб ](./tutorials/building_good_agents)ред diff --git a/docs/source/hi/index.md b/docs/source/hi/index.md new file mode 100644 index 000000000..533b3b62d --- /dev/null +++ b/docs/source/hi/index.md @@ -0,0 +1,54 @@ + + +# `smolagents` + +
+ +
+ +рдпрд╣ рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдкрд╛рд╡рд░рдлреБрд▓ рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рд╕рдмрд╕реЗ рд╕рд░рд▓ рдлреНрд░реЗрдорд╡рд░реНрдХ рд╣реИ! рд╡реИрд╕реЗ, "рдПрдЬреЗрдВрдЯреНрд╕" рд╣реИрдВ рдХреНрдпрд╛? рд╣рдо рдЕрдкрдиреА рдкрд░рд┐рднрд╛рд╖рд╛ [рдЗрд╕ рдкреЗрдЬ рдкрд░](conceptual_guides/intro_agents) рдкреНрд░рджрд╛рди рдХрд░рддреЗ рд╣реИрдВ, рдЬрд╣рд╛рдБ рдЖрдкрдХреЛ рдпрд╣ рднреА рдкрддрд╛ рдЪрд▓реЗрдЧрд╛ рдХрд┐ рдЗрдиреНрд╣реЗрдВ рдХрдм рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ рдпрд╛ рди рдХрд░реЗрдВ (рд╕реНрдкреЙрдЗрд▓рд░: рдЖрдк рдЕрдХреНрд╕рд░ рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рдмрд┐рдирд╛ рдмреЗрд╣рддрд░ рдХрд╛рдо рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ)ред + +рдпрд╣ рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдкреНрд░рджрд╛рди рдХрд░рддреА рд╣реИ: + +тЬи **рд╕рд░рд▓рддрд╛**: Agents рдХрд╛ рд▓реЙрдЬрд┐рдХ рд▓рдЧрднрдЧ рдПрдХ рд╣рдЬрд╛рд░ рд▓рд╛рдЗрдиреНрд╕ рдСрдлрд╝ рдХреЛрдб рдореЗрдВ рд╕рдорд╛рд╣рд┐рдд рд╣реИред рд╣рдордиреЗ рд░реЙ рдХреЛрдб рдХреЗ рдКрдкрд░ рдПрдмреНрд╕реНрдЯреНрд░реИрдХреНрд╢рди рдХреЛ рдиреНрдпреВрдирддрдо рдЖрдХрд╛рд░ рдореЗрдВ рд░рдЦрд╛ рд╣реИ! + +ЁЯМР **рд╕рднреА LLM рдХреЗ рд▓рд┐рдП рд╕рдкреЛрд░реНрдЯ**: рдпрд╣ рд╣рдм рдкрд░ рд╣реЛрд╕реНрдЯ рдХрд┐рдП рдЧрдП рдореЙрдбрд▓реНрд╕ рдХреЛ рдЙрдирдХреЗ `transformers` рд╡рд░реНрдЬрди рдореЗрдВ рдпрд╛ рд╣рдорд╛рд░реЗ рдЗрдиреНрдлрд░реЗрдВрд╕ API рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рд╕рдкреЛрд░реНрдЯ рдХрд░рддрд╛ рд╣реИ, рд╕рд╛рде рд╣реА OpenAI, Anthropic рд╕реЗ рднреА... рдХрд┐рд╕реА рднреА LLM рд╕реЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдкрд╛рд╡рд░ рдХрд░рдирд╛ рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ рдЖрд╕рд╛рди рд╣реИред + +ЁЯзСтАНЁЯТ╗ **рдХреЛрдб Agents рдХреЗ рд▓рд┐рдП рдлрд░реНрд╕реНрдЯ-рдХреНрд▓рд╛рд╕ рд╕рдкреЛрд░реНрдЯ**, рдпрд╛рдиреА рдРрд╕реЗ рдПрдЬреЗрдВрдЯреНрд╕ рдЬреЛ рдЕрдкрдиреА рдПрдХреНрд╢рдиреНрд╕ рдХреЛ рдХреЛрдб рдореЗрдВ рд▓рд┐рдЦрддреЗ рд╣реИрдВ (рдХреЛрдб рд▓рд┐рдЦрдиреЗ рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдиреЗ рд╡рд╛рд▓реЗ рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рд╡рд┐рдкрд░реАрдд), [рдпрд╣рд╛рдБ рдФрд░ рдкрдврд╝реЗрдВ](tutorials/secure_code_execution)ред + +ЁЯдЧ **рд╣рдм рдЗрдВрдЯреАрдЧреНрд░реЗрд╢рди**: рдЖрдк рдЯреВрд▓реНрд╕ рдХреЛ рд╣рдм рдкрд░ рд╢реЗрдпрд░ рдФрд░ рд▓реЛрдб рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдФрд░ рдЖрдЧреЗ рдФрд░ рднреА рдмрд╣реБрдд рдХреБрдЫ рдЖрдиреЗ рд╡рд╛рд▓рд╛ рд╣реИ! +! + +
+
+
рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░
+

рдмреЗрд╕рд┐рдХреНрд╕ рд╕реАрдЦреЗрдВ рдФрд░ рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдореЗрдВ рдкрд░рд┐рдЪрд┐рдд рд╣реЛрдВред рдпрджрд┐ рдЖрдк рдкрд╣рд▓реА рдмрд╛рд░ рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд░рд╣реЗ рд╣реИрдВ рддреЛ рдпрд╣рд╛рдБ рд╕реЗ рд╢реБрд░реВ рдХрд░реЗрдВ!

+
+
рд╣рд╛рдЙ-рдЯреВ рдЧрд╛рдЗрдбреНрд╕
+

рдПрдХ рд╡рд┐рд╢рд┐рд╖реНрдЯ рд▓рдХреНрд╖реНрдп рдкреНрд░рд╛рдкреНрдд рдХрд░рдиреЗ рдореЗрдВ рдорджрдж рдХреЗ рд▓рд┐рдП рдЧрд╛рдЗрдб: SQL рдХреНрд╡реЗрд░реА рдЬрдирд░реЗрдЯ рдФрд░ рдЯреЗрд╕реНрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдЬреЗрдВрдЯ рдмрдирд╛рдПрдВ!

+
+
рдХреЙрдиреНрд╕реЗрдкреНрдЪреБрдЕрд▓ рдЧрд╛рдЗрдбреНрд╕
+

рдорд╣рддреНрд╡рдкреВрд░реНрдг рд╡рд┐рд╖рдпреЛрдВ рдХреА рдмреЗрд╣рддрд░ рд╕рдордЭ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдЙрдЪреНрдЪ-рд╕реНрддрд░реАрдп рд╡реНрдпрд╛рдЦреНрдпрд╛рдПрдВред

+
+
рдЯреНрдпреВрдЯреЛрд░рд┐рдпрд▓реНрд╕
+

рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдХреЗ рдорд╣рддреНрд╡рдкреВрд░реНрдг рдкрд╣рд▓реБрдУрдВ рдХреЛ рдХрд╡рд░ рдХрд░рдиреЗ рд╡рд╛рд▓реЗ рдХреНрдЯреНрдпреВрдЯреЛрд░рд┐рдпрд▓реНрд╕ред

+
+
+
\ No newline at end of file diff --git a/docs/source/hi/reference/agents.md b/docs/source/hi/reference/agents.md new file mode 100644 index 000000000..11b461e79 --- /dev/null +++ b/docs/source/hi/reference/agents.md @@ -0,0 +1,156 @@ + +# Agents + + + +Smolagents рдПрдХ experimental API рд╣реИ рдЬреЛ рдХрд┐рд╕реА рднреА рд╕рдордп рдмрджрд▓ рд╕рдХрддрд╛ рд╣реИред рдПрдЬреЗрдВрдЯреНрд╕ рджреНрд╡рд╛рд░рд╛ рд▓реМрдЯрд╛рдП рдЧрдП рдкрд░рд┐рдгрд╛рдо рднрд┐рдиреНрди рд╣реЛ рд╕рдХрддреЗ рд╣реИрдВ рдХреНрдпреЛрдВрдХрд┐ APIs рдпрд╛ underlying рдореЙрдбрд▓ рдмрджрд▓рдиреЗ рдХреА рд╕рдВрднрд╛рд╡рдирд╛ рд░рдЦрддреЗ рд╣реИрдВред + + + +Agents рдФрд░ tools рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдЬрд╛рдирдиреЗ рдХреЗ рд▓рд┐рдП [introductory guide](../index) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред +рдпрд╣ рдкреЗрдЬ underlying рдХреНрд▓рд╛рд╕реЗрдЬ рдХреЗ рд▓рд┐рдП API docs рдХреЛ рд╢рд╛рдорд┐рд▓ рдХрд░рддрд╛ рд╣реИред + +## Agents + +рд╣рдорд╛рд░реЗ рдПрдЬреЗрдВрдЯреНрд╕ [`MultiStepAgent`] рд╕реЗ рдЗрдирд╣реЗрд░рд┐рдЯ рдХрд░рддреЗ рд╣реИрдВ, рдЬрд┐рд╕рдХрд╛ рдЕрд░реНрде рд╣реИ рдХрд┐ рд╡реЗ рдХрдИ рдЪрд░рдгреЛрдВ рдореЗрдВ рдХрд╛рд░реНрдп рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдкреНрд░рддреНрдпреЗрдХ рдЪрд░рдг рдореЗрдВ рдПрдХ рд╡рд┐рдЪрд╛рд░, рдлрд┐рд░ рдПрдХ рдЯреВрд▓ рдХреЙрд▓ рдФрд░ рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рд╢рд╛рдорд┐рд▓ рд╣реЛрддрд╛ рд╣реИред [рдЗрд╕ рдХреЙрдиреНрд╕реЗрдкреНрдЪреБрдЕрд▓ рдЧрд╛рдЗрдб](../conceptual_guides/react) рдореЗрдВ рдЕрдзрд┐рдХ рдкрдврд╝реЗрдВред + +рд╣рдо рдореБрдЦреНрдп [`Agent`] рдХреНрд▓рд╛рд╕ рдкрд░ рдЖрдзрд╛рд░рд┐рдд рджреЛ рдкреНрд░рдХрд╛рд░ рдХреЗ рдПрдЬреЗрдВрдЯреНрд╕ рдкреНрд░рджрд╛рди рдХрд░рддреЗ рд╣реИрдВред + - [`CodeAgent`] рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдПрдЬреЗрдВрдЯ рд╣реИ, рдпрд╣ рдЕрдкрдиреЗ рдЯреВрд▓ рдХреЙрд▓реНрд╕ рдХреЛ Python рдХреЛрдб рдореЗрдВ рд▓рд┐рдЦрддрд╛ рд╣реИред + - [`ToolCallingAgent`] рдЕрдкрдиреЗ рдЯреВрд▓ рдХреЙрд▓реНрд╕ рдХреЛ JSON рдореЗрдВ рд▓рд┐рдЦрддрд╛ рд╣реИред + +рджреЛрдиреЛрдВ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ `model` рдФрд░ рдЯреВрд▓реНрд╕ рдХреА рд╕реВрдЪреА `tools` рдЖрд░реНрдЧреБрдореЗрдВрдЯреНрд╕ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИред + +### Agents рдХреА рдХреНрд▓рд╛рд╕реЗрдЬ + +[[autodoc]] MultiStepAgent + +[[autodoc]] CodeAgent + +[[autodoc]] ToolCallingAgent + + +### ManagedAgent + +[[autodoc]] ManagedAgent + +### stream_to_gradio + +[[autodoc]] stream_to_gradio + +### GradioUI + +[[autodoc]] GradioUI + +## рдореЙрдбрд▓реНрд╕ + +рдЖрдк рд╕реНрд╡рддрдВрддреНрд░ рд░реВрдк рд╕реЗ рдЕрдкрдиреЗ рд╕реНрд╡рдпрдВ рдХреЗ рдореЙрдбрд▓ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдФрд░ рдЙрдирдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +рдЖрдк рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЗ рд▓рд┐рдП рдХреЛрдИ рднреА `model` рдХреЙрд▓ рдХрд░рдиреЗ рдпреЛрдЧреНрдп рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЬрдм рддрдХ рдХрд┐: +1. рдпрд╣ рдЕрдкрдиреЗ рдЗрдирдкреБрдЯ `messages` рдХреЗ рд▓рд┐рдП [messages format](./chat_templating) (`List[Dict[str, str]]`) рдХрд╛ рдкрд╛рд▓рди рдХрд░рддрд╛ рд╣реИ, рдФрд░ рдпрд╣ рдПрдХ `str` рд▓реМрдЯрд╛рддрд╛ рд╣реИред +2. рдпрд╣ рдЖрд░реНрдЧреБрдореЗрдВрдЯ `stop_sequences` рдореЗрдВ рдкрд╛рд╕ рдХрд┐рдП рдЧрдП рд╕реАрдХреНрд╡реЗрдВрд╕ рд╕реЗ *рдкрд╣рд▓реЗ* рдЖрдЙрдЯрдкреБрдЯ рдЬрдирд░реЗрдЯ рдХрд░рдирд╛ рдмрдВрдж рдХрд░ рджреЗрддрд╛ рд╣реИред + +рдЕрдкрдиреЗ LLM рдХреЛ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдк рдПрдХ `custom_model` рдореЗрдердб рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ [messages](./chat_templating) рдХреА рдПрдХ рд╕реВрдЪреА рд╕реНрд╡реАрдХрд╛рд░ рдХрд░рддрд╛ рд╣реИ рдФрд░ рдЯреЗрдХреНрд╕реНрдЯ рдпреБрдХреНрдд .content рд╡рд┐рд╢реЗрд╖рддрд╛ рд╡рд╛рд▓рд╛ рдПрдХ рдСрдмреНрдЬреЗрдХреНрдЯ рд▓реМрдЯрд╛рддрд╛ рд╣реИред рдЗрд╕ рдХреЙрд▓реЗрдмрд▓ рдХреЛ рдПрдХ `stop_sequences` рдЖрд░реНрдЧреБрдореЗрдВрдЯ рднреА рд╕реНрд╡реАрдХрд╛рд░ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ рдЬреЛ рдмрддрд╛рддрд╛ рд╣реИ рдХрд┐ рдХрдм рдЬрдирд░реЗрдЯ рдХрд░рдирд╛ рдФрд░ рдмрдВрдж рдХрд░рдирд╛ рд╣реИред + +```python +from huggingface_hub import login, InferenceClient + +login("") + +model_id = "meta-llama/Llama-3.3-70B-Instruct" + +client = InferenceClient(model=model_id) + +def custom_model(messages, stop_sequences=["Task"]): + response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000) + answer = response.choices[0].message + return answer +``` + +рдЗрд╕рдХреЗ рдЕрддрд┐рд░рд┐рдХреНрдд, `custom_model` рдПрдХ `grammar` рдЖрд░реНрдЧреБрдореЗрдВрдЯ рднреА рд▓реЗ рд╕рдХрддрд╛ рд╣реИред рдЬрд┐рд╕ рд╕реНрдерд┐рддрд┐ рдореЗрдВ рдЖрдк рдПрдЬреЗрдВрдЯ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рдПрдХ `grammar` рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдХрд░рддреЗ рд╣реИрдВ, рдпрд╣ рдЖрд░реНрдЧреБрдореЗрдВрдЯ рдореЙрдбрд▓ рдХреЗ рдХреЙрд▓реНрд╕ рдХреЛ рдЖрдкрдХреЗ рджреНрд╡рд╛рд░рд╛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рдкрд░рд┐рднрд╛рд╖рд┐рдд `grammar` рдХреЗ рд╕рд╛рде рдкрд╛рд╕ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛, рддрд╛рдХрд┐ [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) рдХреА рдЕрдиреБрдорддрд┐ рдорд┐рд▓ рд╕рдХреЗ рдЬрд┐рд╕рд╕реЗ рдЙрдЪрд┐рдд-рдлреЙрд░реНрдореЗрдЯреЗрдб рдПрдЬреЗрдВрдЯ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдлреЛрд░реНрд╕ рдХрд┐рдпрд╛ рдЬрд╛ рд╕рдХреЗред + +### TransformersModel + +рд╕реБрд╡рд┐рдзрд╛ рдХреЗ рд▓рд┐рдП, рд╣рдордиреЗ рдПрдХ `TransformersModel` рдЬреЛрдбрд╝рд╛ рд╣реИ рдЬреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ рджрд┐рдП рдЧрдП model_id рдХреЗ рд▓рд┐рдП рдПрдХ рд▓реЛрдХрд▓ `transformers` рдкрд╛рдЗрдкрд▓рд╛рдЗрди рдмрдирд╛рдХрд░ рдКрдкрд░ рдХреЗ рдмрд┐рдВрджреБрдУрдВ рдХреЛ рд▓рд╛рдЧреВ рдХрд░рддрд╛ рд╣реИред + +```python +from smolagents import TransformersModel + +model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct") + +print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"])) +``` +```text +>>> What a +``` + +[[autodoc]] TransformersModel + +### HfApiModel + +`HfApiModel` LLM рдХреЗ рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдХреЗ рд▓рд┐рдП [HF Inference API](https://huggingface.co/docs/api-inference/index) рдХреНрд▓рд╛рдЗрдВрдЯ рдХреЛ рд░реИрдк рдХрд░рддрд╛ рд╣реИред + +```python +from smolagents import HfApiModel + +messages = [ + {"role": "user", "content": "Hello, how are you?"}, + {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, + {"role": "user", "content": "No need to help, take it easy."}, +] + +model = HfApiModel() +print(model(messages)) +``` +```text +>>> Of course! If you change your mind, feel free to reach out. Take care! +``` +[[autodoc]] HfApiModel + +### LiteLLMModel + +`LiteLLMModel` рд╡рд┐рднрд┐рдиреНрди рдкреНрд░рджрд╛рддрд╛рдУрдВ рд╕реЗ 100+ LLMs рдХреЛ рд╕рдкреЛрд░реНрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП [LiteLLM](https://www.litellm.ai/) рдХрд╛ рд▓рд╛рдн рдЙрдард╛рддрд╛ рд╣реИред +рдЖрдк рдореЙрдбрд▓ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдкрд░ kwargs рдкрд╛рд╕ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ рддрдм рдореЙрдбрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддреЗ рд╕рдордп рдкреНрд░рдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдПрдВрдЧреЗ, рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП рдиреАрдЪреЗ рд╣рдо `temperature` рдкрд╛рд╕ рдХрд░рддреЗ рд╣реИрдВред + +```python +from smolagents import LiteLLMModel + +messages = [ + {"role": "user", "content": "Hello, how are you?"}, + {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, + {"role": "user", "content": "No need to help, take it easy."}, +] + +model = LiteLLMModel("anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10) +print(model(messages)) +``` + +[[autodoc]] LiteLLMModel + +### OpenAiServerModel + + +рдпрд╣ рдХреНрд▓рд╛рд╕ рдЖрдкрдХреЛ рдХрд┐рд╕реА рднреА OpenAIServer рдХрдореНрдкреИрдЯрд┐рдмрд▓ рдореЙрдбрд▓ рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рджреЗрддреА рд╣реИред +рдпрд╣рд╛рдБ рдмрддрд╛рдпрд╛ рдЧрдпрд╛ рд╣реИ рдХрд┐ рдЖрдк рдЗрд╕реЗ рдХреИрд╕реЗ рд╕реЗрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ (рдЖрдк рджреВрд╕рд░реЗ рд╕рд░реНрд╡рд░ рдХреЛ рдкреЙрдЗрдВрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `api_base` url рдХреЛ рдХрд╕реНрдЯрдорд╛рдЗрдЬрд╝ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ): +```py +from smolagents import OpenAIServerModel + +model = OpenAIServerModel( + model_id="gpt-4o", + api_base="https://api.openai.com/v1", + api_key=os.environ["OPENAI_API_KEY"], +) +``` \ No newline at end of file diff --git a/docs/source/hi/reference/tools.md b/docs/source/hi/reference/tools.md new file mode 100644 index 000000000..ddb24d1ab --- /dev/null +++ b/docs/source/hi/reference/tools.md @@ -0,0 +1,91 @@ + +# Tools + + + +Smolagents рдПрдХ experimental API рд╣реИ рдЬреЛ рдХрд┐рд╕реА рднреА рд╕рдордп рдмрджрд▓ рд╕рдХрддрд╛ рд╣реИред рдПрдЬреЗрдВрдЯреНрд╕ рджреНрд╡рд╛рд░рд╛ рд▓реМрдЯрд╛рдП рдЧрдП рдкрд░рд┐рдгрд╛рдо рднрд┐рдиреНрди рд╣реЛ рд╕рдХрддреЗ рд╣реИрдВ рдХреНрдпреЛрдВрдХрд┐ APIs рдпрд╛ underlying рдореЙрдбрд▓ рдмрджрд▓рдиреЗ рдХреА рд╕рдВрднрд╛рд╡рдирд╛ рд░рдЦрддреЗ рд╣реИрдВред + + + +рдПрдЬреЗрдВрдЯреНрд╕ рдФрд░ рдЯреВрд▓реНрд╕ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдЕрдзрд┐рдХ рдЬрд╛рдирдиреЗ рдХреЗ рд▓рд┐рдП [introductory guide](../index) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред +рдпрд╣ рдкреЗрдЬ underlying рдХреНрд▓рд╛рд╕реЗрдЬ рдХреЗ рд▓рд┐рдП API docs рдХреЛ рд╢рд╛рдорд┐рд▓ рдХрд░рддрд╛ рд╣реИред + +## Tools + +### load_tool + +[[autodoc]] load_tool + +### tool + +[[autodoc]] tool + +### Tool + +[[autodoc]] Tool + +### launch_gradio_demo + +[[autodoc]] launch_gradio_demo + +## Default Tools + +### PythonInterpreterTool + +[[autodoc]] PythonInterpreterTool + +### DuckDuckGoSearchTool + +[[autodoc]] DuckDuckGoSearchTool + +### VisitWebpageTool + +[[autodoc]] VisitWebpageTool + +### UserInputTool + +[[autodoc]] UserInputTool + +## ToolCollection + +[[autodoc]] ToolCollection + +## Agent рдЯрд╛рдЗрдкреНрд╕ + +рдПрдЬреЗрдВрдЯреНрд╕ рдЯреВрд▓реНрд╕ рдХреЗ рдмреАрдЪ рдХрд┐рд╕реА рднреА рдкреНрд░рдХрд╛рд░ рдХреА рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЛ рд╕рдВрднрд╛рд▓ рд╕рдХрддреЗ рд╣реИрдВ; рдЯреВрд▓реНрд╕, рдкреВрд░реА рддрд░рд╣ рд╕реЗ рдорд▓реНрдЯреАрдореЛрдбрд▓ рд╣реЛрдиреЗ рдХреЗ рдХрд╛рд░рдг, рдЯреЗрдХреНрд╕реНрдЯ, рдЗрдореЗрдЬ, рдСрдбрд┐рдпреЛ, рд╡реАрдбрд┐рдпреЛ рд╕рд╣рд┐рдд рдЕрдиреНрдп рдкреНрд░рдХрд╛рд░реЛрдВ рдХреЛ рд╕реНрд╡реАрдХрд╛рд░ рдФрд░ рд░рд┐рдЯрд░реНрди рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред +рдЯреВрд▓реНрд╕ рдХреЗ рдмреАрдЪ рдЕрдиреБрдХреВрд▓рддрд╛ рдмрдврд╝рд╛рдиреЗ рдХреЗ рд╕рд╛рде-рд╕рд╛рде рдЗрди рд░рд┐рдЯрд░реНрдиреНрд╕ рдХреЛ ipython (jupyter, colab, ipython notebooks, ...) рдореЗрдВ рд╕рд╣реА рдврдВрдЧ рд╕реЗ рд░реЗрдВрдбрд░ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рд╣рдо рдЗрди рдЯрд╛рдЗрдкреНрд╕ рдХреЗ рдЖрд╕рдкрд╛рд╕ рд░реИрдкрд░ рдХреНрд▓рд╛рд╕реЗрдЬ рдХреЛ рд▓рд╛рдЧреВ рдХрд░рддреЗ рд╣реИрдВред + +рд░реИрдк рдХрд┐рдП рдЧрдП рдСрдмреНрдЬреЗрдХреНрдЯреНрд╕ рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдореЗрдВ рдЬреИрд╕рд╛ рд╡реНрдпрд╡рд╣рд╛рд░ рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдП рд╡реИрд╕рд╛ рд╣реА рдХрд░рдирд╛ рдЬрд╛рд░реА рд░рдЦрдирд╛ рдЪрд╛рд╣рд┐рдП; рдПрдХ рдЯреЗрдХреНрд╕реНрдЯ рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЛ рдЕрднреА рднреА рд╕реНрдЯреНрд░рд┐рдВрдЧ рдХреА рддрд░рд╣ рд╡реНрдпрд╡рд╣рд╛рд░ рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдП| +рдПрдХ рдЗрдореЗрдЬ рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЛ рдЕрднреА рднреА `PIL.Image` рдХреА рддрд░рд╣ рд╡реНрдпрд╡рд╣рд╛рд░ рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдПред + +рдЗрди рдЯрд╛рдЗрдкреНрд╕ рдХреЗ рддреАрди рд╡рд┐рд╢рд┐рд╖реНрдЯ рдЙрджреНрджреЗрд╢реНрдп рд╣реИрдВ: + +- рдЯрд╛рдЗрдк рдкрд░ `to_raw` рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рд╕реЗ рдЕрдВрддрд░реНрдирд┐рд╣рд┐рдд рдСрдмреНрдЬреЗрдХреНрдЯ рд░рд┐рдЯрд░реНрди рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП +- рдЯрд╛рдЗрдк рдкрд░ `to_string` рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рд╕реЗ рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЛ рд╕реНрдЯреНрд░рд┐рдВрдЧ рдХреЗ рд░реВрдк рдореЗрдВ рд░рд┐рдЯрд░реНрди рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП: рд╡рд╣ `AgentText` рдХреЗ рдорд╛рдорд▓реЗ рдореЗрдВ рд╕реНрдЯреНрд░рд┐рдВрдЧ рд╣реЛ рд╕рдХрддреА рд╣реИ рд▓реЗрдХрд┐рди рдЕрдиреНрдп рдЙрджрд╛рд╣рд░рдгреЛрдВ рдореЗрдВ рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЗ рд╕реАрд░рд┐рдпрд▓рд╛рдЗрдЬреНрдб рд╡рд░реНрдЬрди рдХрд╛ рдкрд╛рде рд╣реЛрдЧрд╛ +- рдЗрд╕реЗ рдПрдХ ipython kernel рдореЗрдВ рдкреНрд░рджрд░реНрд╢рд┐рдд рдХрд░рдиреЗ рдкрд░ рдСрдмреНрдЬреЗрдХреНрдЯ рдХреЛ рд╕рд╣реА рдврдВрдЧ рд╕реЗ рдкреНрд░рджрд░реНрд╢рд┐рдд рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдП + +### AgentText + +[[autodoc]] smolagents.types.AgentText + +### AgentImage + +[[autodoc]] smolagents.types.AgentImage + +### AgentAudio + +[[autodoc]] smolagents.types.AgentAudio diff --git a/docs/source/hi/tutorials/building_good_agents.md b/docs/source/hi/tutorials/building_good_agents.md new file mode 100644 index 000000000..86eee273c --- /dev/null +++ b/docs/source/hi/tutorials/building_good_agents.md @@ -0,0 +1,286 @@ + +# рдЕрдЪреНрдЫреЗ Agents рдХрд╛ рдирд┐рд░реНрдорд╛рдг + +[[open-in-colab]] + +рдПрдХ рдРрд╕рд╛ рдПрдЬреЗрдВрдЯ рдмрдирд╛рдиреЗ рдореЗрдВ рдЬреЛ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ рдФрд░ рдЬреЛ рдХрд╛рдо рдирд╣реАрдВ рдХрд░рддрд╛ рд╣реИ, рдЗрд╕рдореЗрдВ рдЬрд╝рдореАрди-рдЖрд╕рдорд╛рди рдХрд╛ рдЕрдВрддрд░ рд╣реИред +рд╣рдо рдХреИрд╕реЗ рдРрд╕реЗ рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬреЛ рдмрд╛рдж рд╡рд╛рд▓реА рд╢реНрд░реЗрдгреА рдореЗрдВ рдЖрддреЗ рд╣реИрдВ? +рдЗрд╕ рдЧрд╛рдЗрдб рдореЗрдВ, рд╣рдо рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рд╕рд░реНрд╡реЛрддреНрддрдо рдкреНрд░рдХреНрд░рд┐рдпрд╛рдПрдБ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдмрд╛рдд рдХрд░реЗрдВрдЧреЗред + +> [!TIP] +> рдпрджрд┐ рдЖрдк рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдореЗрдВ рдирдП рд╣реИрдВ, рддреЛ рдкрд╣рд▓реЗ [рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдкрд░рд┐рдЪрдп](../conceptual_guides/intro_agents) рдФрд░ [smolagents рдХреА рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░](../guided_tour) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред + +### рд╕рд░реНрд╡рд╢реНрд░реЗрд╖реНрда рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рд╕рдмрд╕реЗ рд╕рд░рд▓ рд╣реЛрддреЗ рд╣реИрдВ: рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдХреЛ рдЬрд┐рддрдирд╛ рд╣реЛ рд╕рдХреЗ рдЙрддрдирд╛ рд╕рд░рд▓ рдмрдирд╛рдПрдВ + +рдЕрдкрдиреЗ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдореЗрдВ рдПрдХ LLM рдХреЛ рдХреБрдЫ рдПрдЬреЗрдВрд╕реА рджреЗрдиреЗ рд╕реЗ рддреНрд░реБрдЯрд┐рдпреЛрдВ рдХрд╛ рдЬреЛрдЦрд┐рдо рд╣реЛрддрд╛ рд╣реИред + +рдЕрдЪреНрдЫреА рддрд░рд╣ рд╕реЗ рдкреНрд░реЛрдЧреНрд░рд╛рдо рдХрд┐рдП рдЧрдП рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдореЗрдВ рд╡реИрд╕реЗ рднреА рдЕрдЪреНрдЫреА рдПрд░рд░ рд▓реЙрдЧрд┐рдВрдЧ рдФрд░ рд░реАрдЯреНрд░рд╛рдИ рдореИрдХреЗрдирд┐рдЬреНрдо рд╣реЛрддреЗ рд╣реИрдВ, рдЬрд┐рд╕рд╕реЗ LLM рдЗрдВрдЬрди рдЕрдкрдиреА рдЧрд▓рддрд┐рдпреЛрдВ рдХреЛ рд╕реБрдзрд╛рд░рдиреЗ рдХрд╛ рдореМрдХрд╛ рдорд┐рд▓рддрд╛ рд╣реИред рд▓реЗрдХрд┐рди LLM рддреНрд░реБрдЯрд┐ рдХреЗ рдЬреЛрдЦрд┐рдо рдХреЛ рдЕрдзрд┐рдХрддрдо рдХрдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдЕрдкрдирд╛ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рд╕рд░рд▓ рдмрдирд╛рдирд╛ рдЪрд╛рд╣рд┐рдП! + +рдЖрдЗрдП [рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдкрд░рд┐рдЪрдп](../conceptual_guides/intro_agents) рд╕реЗ рдЙрджрд╛рд╣рд░рдг рдкрд░ рдлрд┐рд░ рд╕реЗ рд╡рд┐рдЪрд╛рд░ рдХрд░реЗрдВ: рдПрдХ рд╕рд░реНрдл рдЯреНрд░рд┐рдк рдХрдВрдкрдиреА рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рдкреНрд░рд╢реНрдиреЛрдВ рдХрд╛ рдЙрддреНрддрд░ рджреЗрдиреЗ рд╡рд╛рд▓рд╛ рдмреЙрдЯред +рдПрдЬреЗрдВрдЯ рдХреЛ рд╣рд░ рдмрд╛рд░ рдЬрдм рдПрдХ рдирдП рд╕рд░реНрдл рд╕реНрдкреЙрдЯ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдкреВрдЫрд╛ рдЬрд╛рддрд╛ рд╣реИ рддреЛ "travel distance API" рдФрд░ "weather API" рдХреЗ рд▓рд┐рдП 2 рдЕрд▓рдЧ-рдЕрд▓рдЧ рдХреЙрд▓ рдХрд░рдиреЗ рджреЗрдиреЗ рдХреЗ рдмрдЬрд╛рдп, рдЖрдк рдХреЗрд╡рд▓ рдПрдХ рдПрдХреАрдХреГрдд рдЯреВрд▓ "return_spot_information" рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ, рдПрдХ рдлрдВрдХреНрд╢рди рдЬреЛ рджреЛрдиреЛрдВ APIs рдХреЛ рдПрдХ рд╕рд╛рде рдХреЙрд▓ рдХрд░рддрд╛ рд╣реИ рдФрд░ рдЙрдирдХреЗ рд╕рдВрдпреЛрдЬрд┐рдд рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рдХреЛ рд╡рд╛рдкрд╕ рдХрд░рддрд╛ рд╣реИред + +рдпрд╣ рд▓рд╛рдЧрдд, рджреЗрд░реА рдФрд░ рддреНрд░реБрдЯрд┐ рдЬреЛрдЦрд┐рдо рдХреЛ рдХрдо рдХрд░реЗрдЧрд╛! + +рдореБрдЦреНрдп рджрд┐рд╢рд╛рдирд┐рд░реНрджреЗрд╢ рд╣реИ: LLM рдХреЙрд▓реНрд╕ рдХреА рд╕рдВрдЦреНрдпрд╛ рдХреЛ рдЬрд┐рддрдирд╛ рд╣реЛ рд╕рдХреЗ рдЙрддрдирд╛ рдХрдо рдХрд░реЗрдВред + +рдЗрд╕рд╕реЗ рдХреБрдЫ рдирд┐рд╖реНрдХрд░реНрд╖ рдирд┐рдХрд▓рддреЗ рд╣реИрдВ: +- рдЬрдм рднреА рд╕рдВрднрд╡ рд╣реЛ, рджреЛ APIs рдХреЗ рд╣рдорд╛рд░реЗ рдЙрджрд╛рд╣рд░рдг рдХреА рддрд░рд╣ 2 рдЯреВрд▓реНрд╕ рдХреЛ рдПрдХ рдореЗрдВ рд╕рдореВрд╣рд┐рдд рдХрд░реЗрдВред +- рдЬрдм рднреА рд╕рдВрднрд╡ рд╣реЛ, рд▓реЙрдЬрд┐рдХ рдПрдЬреЗрдВрдЯрд┐рдХ рдирд┐рд░реНрдгрдпреЛрдВ рдХреЗ рдмрдЬрд╛рдп рдбрд┐рдЯрд░рдорд┐рдирд┐рд╕реНрдЯрд┐рдХ рдлрдВрдХреНрд╢рдВрд╕ рдкрд░ рдЖрдзрд╛рд░рд┐рдд рд╣реЛрдиреА рдЪрд╛рд╣рд┐рдПред + +### LLM рдЗрдВрдЬрди рдХреЛ рдЬрд╛рдирдХрд╛рд░реА рдХреЗ рдкреНрд░рд╡рд╛рд╣ рдореЗрдВ рд╕реБрдзрд╛рд░ рдХрд░реЗрдВ + +рдпрд╛рдж рд░рдЦреЗрдВ рдХрд┐ рдЖрдкрдХрд╛ LLM рдЗрдВрдЬрди рдПрдХ *рдмреБрджреНрдзрд┐рдорд╛рди* рд░реЛрдмреЛрдЯ рдХреА рддрд░рд╣ рд╣реИ, рдЬреЛ рдПрдХ рдХрдорд░реЗ рдореЗрдВ рдмрдВрдж рд╣реИ, рдФрд░ рдмрд╛рд╣рд░реА рджреБрдирд┐рдпрд╛ рдХреЗ рд╕рд╛рде рдЗрд╕рдХрд╛ рдПрдХрдорд╛рддреНрд░ рд╕рдВрдЪрд╛рд░ рджрд░рд╡рд╛рдЬреЗ рдХреЗ рдиреАрдЪреЗ рд╕реЗ рдиреЛрдЯреНрд╕ рдкрд╛рд╕ рдХрд░рдирд╛ рд╣реИред + +рдпрд╣ рдХрд┐рд╕реА рднреА рдРрд╕реА рдЪреАрдЬ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдирд╣реАрдВ рдЬрд╛рдиреЗрдЧрд╛ рдЬрд┐рд╕реЗ рдЖрдк рд╕реНрдкрд╖реНрдЯ рд░реВрдк рд╕реЗ рдЕрдкрдиреЗ рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рдирд╣реАрдВ рдбрд╛рд▓рддреЗ рд╣реИрдВред + +рдЗрд╕рд▓рд┐рдП рдкрд╣рд▓реЗ рдЕрдкрдиреЗ рдХрд╛рд░реНрдп рдХреЛ рдмрд╣реБрдд рд╕реНрдкрд╖реНрдЯ рдмрдирд╛рдиреЗ рд╕реЗ рд╢реБрд░реВ рдХрд░реЗрдВ! +рдЪреВрдВрдХрд┐ рдПрдХ рдПрдЬреЗрдВрдЯ LLM рджреНрд╡рд╛рд░рд╛ рд╕рдВрдЪрд╛рд▓рд┐рдд рд╣реЛрддрд╛ рд╣реИ, рдЖрдкрдХреЗ рдХрд╛рд░реНрдп рдХреЗ рдирд┐рд░реНрдорд╛рдг рдореЗрдВ рдЫреЛрдЯреЗ рдмрджрд▓рд╛рд╡ рднреА рдкреВрд░реА рддрд░рд╣ рд╕реЗ рдЕрд▓рдЧ рдкрд░рд┐рдгрд╛рдо рджреЗ рд╕рдХрддреЗ рд╣реИрдВред + +рдлрд┐рд░, рдЯреВрд▓ рдХреЗ рдЙрдкрдпреЛрдЧ рдореЗрдВ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреА рдУрд░ рдЬрд╛рдирдХрд╛рд░реА рдХреЗ рдкреНрд░рд╡рд╛рд╣ рдореЗрдВ рд╕реБрдзрд╛рд░ рдХрд░реЗрдВред + +рдкрд╛рд▓рди рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд╡рд┐рд╢реЗрд╖ рджрд┐рд╢рд╛рдирд┐рд░реНрджреЗрд╢: +- рдкреНрд░рддреНрдпреЗрдХ рдЯреВрд▓ рдХреЛ рд╡рд╣ рд╕рдм рдХреБрдЫ рд▓реЙрдЧ рдХрд░рдирд╛ рдЪрд╛рд╣рд┐рдП (рдЯреВрд▓ рдХреА `forward` рдореЗрдердб рдХреЗ рдЕрдВрджрд░ рдХреЗрд╡рд▓ `print` рд╕реНрдЯреЗрдЯрдореЗрдВрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ) рдЬреЛ LLM рдЗрдВрдЬрди рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧреА рд╣реЛ рд╕рдХрддрд╛ рд╣реИред + - рд╡рд┐рд╢реЗрд╖ рд░реВрдк рд╕реЗ, рдЯреВрд▓ рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рдЧрд▓рддрд┐рдпреЛрдВ рдкрд░ рд╡рд┐рд╕реНрддреГрдд рд▓реЙрдЧрд┐рдВрдЧ рдмрд╣реБрдд рдорджрдж рдХрд░реЗрдЧреА! + +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдпрд╣рд╛рдБ рдПрдХ рдЯреВрд▓ рд╣реИ рдЬреЛ рд▓реЛрдХреЗрд╢рди рдФрд░ рдбреЗрдЯ-рдЯрд╛рдЗрдо рдХреЗ рдЖрдзрд╛рд░ рдкрд░ рдореМрд╕рдо рдбреЗрдЯрд╛ рдкреНрд░рд╛рдкреНрдд рдХрд░рддрд╛ рд╣реИ: + +рдкрд╣рд▓реЗ, рдпрд╣рд╛рдБ рдПрдХ рдЦрд░рд╛рдм рд░реВрдк рд╣реИ: +```python +import datetime +from smolagents import tool + +def get_weather_report_at_coordinates(coordinates, date_time): + # Dummy function, returns a list of [temperature in ┬░C, risk of rain on a scale 0-1, wave height in m] + return [28.0, 0.35, 0.85] + +def convert_location_to_coordinates(location): + # Returns dummy coordinates + return [3.3, -42.0] + +@tool +def get_weather_api(location: str, date_time: str) -> str: + """ + Returns the weather report. + + Args: + location: the name of the place that you want the weather for. + date_time: the date and time for which you want the report. + """ + lon, lat = convert_location_to_coordinates(location) + date_time = datetime.strptime(date_time) + return str(get_weather_report_at_coordinates((lon, lat), date_time)) +``` + +# рдпрд╣ рдЦрд░рд╛рдм рдХреНрдпреЛрдВ рд╣реИ? +- `date_time` рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдиреЗ рд╡рд╛рд▓реЗ рдлреЙрд░реНрдореЗрдЯ рдХреА рд╕рдЯреАрдХрддрд╛ рдХрд╛ рдХреЛрдИ рдЙрд▓реНрд▓реЗрдЦ рдирд╣реАрдВ рд╣реИред +- рдпрд╣ рд╕реНрдкрд╖реНрдЯ рдирд╣реАрдВ рд╣реИ рдХрд┐ рд╕реНрдерд╛рди (location) рдХреЛ рдХрд┐рд╕ рдкреНрд░рдХрд╛рд░ рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдХрд┐рдпрд╛ рдЬрд╛рдирд╛ рдЪрд╛рд╣рд┐рдПред +- рддреНрд░реБрдЯрд┐рдпреЛрдВ рдХреЛ рд╕реНрдкрд╖реНрдЯ рд░реВрдк рд╕реЗ рдЗрдВрдЧрд┐рдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХреЛрдИ рд▓реЙрдЧрд┐рдВрдЧ рдореЗрдХреИрдирд┐рдЬреНрдо рдореМрдЬреВрдж рдирд╣реАрдВ рд╣реИ, рдЬреИрд╕реЗ рдХрд┐ рд╕реНрдерд╛рди рдЧрд▓рдд рдлреЙрд░реНрдореЗрдЯ рдореЗрдВ рд╣реЛрдирд╛ рдпрд╛ `date_time` рдХрд╛ рд╕рд╣реА рдврдВрдЧ рд╕реЗ рдлреЙрд░реНрдореЗрдЯ рди рд╣реЛрдирд╛ред +- рдЖрдЙрдЯрдкреБрдЯ рдлреЙрд░реНрдореЗрдЯ рд╕рдордЭрдиреЗ рдореЗрдВ рдХрдард┐рди рд╣реИред + +рдпрджрд┐ рдЯреВрд▓ рдХреЙрд▓ рд╡рд┐рдлрд▓ рд╣реЛ рдЬрд╛рддреА рд╣реИ, рддреЛ рдореЗрдореЛрд░реА рдореЗрдВ рд▓реЙрдЧ рдХреА рдЧрдИ рдПрд░рд░ рдЯреНрд░реЗрд╕ LLM рдХреЛ рдЯреВрд▓ рдХреА рд╕рдорд╕реНрдпрд╛рдУрдВ рдХреЛ рдареАрдХ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд░рд┐рд╡рд░реНрд╕ рдЗрдВрдЬреАрдирд┐рдпрд░рд┐рдВрдЧ рдореЗрдВ рдорджрдж рдХрд░ рд╕рдХрддреА рд╣реИред рд▓реЗрдХрд┐рди рдЗрддрдирд╛ рд╕рд╛рд░рд╛ рдХрд╛рдо LLM рдХреЛ рд╣реА рдХреНрдпреЛрдВ рдХрд░рдиреЗ рджреЗрдирд╛? + +рдЗрд╕ рдЯреВрд▓ рдХреЛ рдмреЗрд╣рддрд░ рддрд░реАрдХреЗ рд╕реЗ рдмрдирд╛рдиреЗ рдХрд╛ рдПрдХ рдЙрджрд╛рд╣рд░рдг рдЗрд╕ рдкреНрд░рдХрд╛рд░ рд╣реЛ рд╕рдХрддрд╛ рд╣реИ: + +```python +@tool +def get_weather_api(location: str, date_time: str) -> str: + """ + Returns the weather report. + + Args: + location: the name of the place that you want the weather for. Should be a place name, followed by possibly a city name, then a country, like "Anchor Point, Taghazout, Morocco". + date_time: the date and time for which you want the report, formatted as '%m/%d/%y %H:%M:%S'. + """ + lon, lat = convert_location_to_coordinates(location) + try: + date_time = datetime.strptime(date_time) + except Exception as e: + raise ValueError("Conversion of `date_time` to datetime format failed, make sure to provide a string in format '%m/%d/%y %H:%M:%S'. Full trace:" + str(e)) + temperature_celsius, risk_of_rain, wave_height = get_weather_report_at_coordinates((lon, lat), date_time) + return f"Weather report for {location}, {date_time}: Temperature will be {temperature_celsius}┬░C, risk of rain is {risk_of_rain*100:.0f}%, wave height is {wave_height}m." +``` + +рд╕рд╛рдорд╛рдиреНрдп рддреМрд░ рдкрд░, рдЕрдкрдиреЗ LLM рдХрд╛ рдмреЛрдЭ рдХреЛ рдХрдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЦреБрдж рд╕реЗ рдпрд╣ рдЕрдЪреНрдЫрд╛ рд╕рд╡рд╛рд▓ рдкреВрдЫреЗрдВ: "рдпрджрд┐ рдореИрдВ рдирдпрд╛ рдФрд░ рдЕрдиреБрднрд╡рд╣реАрди рд╣реВрдВ рдФрд░ рдЗрд╕ рдЯреВрд▓ рдХрд╛ рдкрд╣рд▓реА рдмрд╛рд░ рдЙрдкрдпреЛрдЧ рдХрд░ рд░рд╣рд╛ рд╣реВрдВ, рддреЛ рдЗрд╕ рдЯреВрд▓ рдХреЗ рд╕рд╛рде рдкреНрд░реЛрдЧреНрд░рд╛рдорд┐рдВрдЧ рдХрд░рдирд╛ рдФрд░ рдЕрдкрдиреА рдЧрд▓рддрд┐рдпреЛрдВ рдХреЛ рдареАрдХ рдХрд░рдирд╛ рдореЗрд░реЗ рд▓рд┐рдП рдХрд┐рддрдирд╛ рдЖрд╕рд╛рди рд╣реЛрдЧрд╛?" + +### рдПрдЬреЗрдВрдЯ рдХреЛ рдЕрдзрд┐рдХ рддрд░реНрдХ (arguments) рджреЗрдВ + +рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдХрд╛рд░реНрдп рдХрд╛ рд╡рд░реНрдгрди рдХрд░рдиреЗ рд╡рд╛рд▓реЗ рд╕рд╛рдзрд╛рд░рдг рд╕реНрдЯреНрд░рд┐рдВрдЧ рд╕реЗ рдЖрдЧреЗ рдмрдврд╝рдХрд░ рдХреБрдЫ рдЕрддрд┐рд░рд┐рдХреНрдд рдСрдмреНрдЬреЗрдХреНрдЯреНрд╕ рджреЗрдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдк `additional_args` рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред рдпрд╣ рдЖрдкрдХреЛ рдХрд┐рд╕реА рднреА рдкреНрд░рдХрд╛рд░ рдХрд╛ рдСрдмреНрдЬреЗрдХреНрдЯ рдкрд╛рд╕ рдХрд░рдиреЗ рдХреА рд╕реБрд╡рд┐рдзрд╛ рджреЗрддрд╛ рд╣реИ: + + +```py +from smolagents import CodeAgent, HfApiModel + +model_id = "meta-llama/Llama-3.3-70B-Instruct" + +agent = CodeAgent(tools=[], model=HfApiModel(model_id=model_id), add_base_tools=True) + +agent.run( + "Why does Mike not know many people in New York?", + additional_args={"mp3_sound_file_url":'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3'} +) +``` +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЖрдк рдЗрд╕ `additional_args` рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯ рдХрд╛ рдЙрдкрдпреЛрдЧ рдЙрди рдЗрдореЗрдЬреЗрдЬрд╝ рдпрд╛ рд╕реНрдЯреНрд░рд┐рдВрдЧреНрд╕ рдХреЛ рдкрд╛рд╕ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ рдЬрд┐рдиреНрд╣реЗрдВ рдЖрдк рдЪрд╛рд╣рддреЗ рд╣реИрдВ рдХрд┐ рдЖрдкрдХрд╛ рдПрдЬреЗрдВрдЯ рдЙрдкрдпреЛрдЧ рдХрд░реЗред + + + +## рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдбрд┐рдмрдЧ рдХреИрд╕реЗ рдХрд░реЗрдВ + +### 1. рдПрдХ рдЕрдзрд┐рдХ рд╢рдХреНрддрд┐рд╢рд╛рд▓реА LLM рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ + +рдПрдЬреЗрдВрдЯрд┐рдХ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ рдореЗрдВ, рдХреБрдЫ рддреНрд░реБрдЯрд┐рдпрд╛рдВ рд╡рд╛рд╕реНрддрд╡рд┐рдХ рд╣реЛрддреА рд╣реИрдВ, рдЬрдмрдХрд┐ рдХреБрдЫ рдЕрдиреНрдп рддреНрд░реБрдЯрд┐рдпрд╛рдВ рдЖрдкрдХреЗ LLM рдЗрдВрдЬрди рдХреЗ рд╕рд╣реА рддрд░реАрдХреЗ рд╕реЗ рддрд░реНрдХ рди рдХрд░ рдкрд╛рдиреЗ рдХреА рд╡рдЬрд╣ рд╕реЗ рд╣реЛрддреА рд╣реИрдВред +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЗрд╕ рдЯреНрд░реЗрд╕ рдХреЛ рджреЗрдЦреЗрдВ, рдЬрд╣рд╛рдВ рдореИрдВрдиреЗ рдПрдХ `CodeAgent` рд╕реЗ рдПрдХ рдХрд╛рд░ рдХреА рддрд╕реНрд╡реАрд░ рдмрдирд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд╣рд╛: +``` +==================================================================================================== New task ==================================================================================================== +Make me a cool car picture +тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА New step тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +Agent is executing the code below: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +image_generator(prompt="A cool, futuristic sports car with LED headlights, aerodynamic design, and vibrant color, high-res, photorealistic") +тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА + +Last output from code snippet: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png +Step 1: + +- Time taken: 16.35 seconds +- Input tokens: 1,383 +- Output tokens: 77 +тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА New step тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +Agent is executing the code below: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +final_answer("/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png") +тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +Print outputs: + +Last output from code snippet: тФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФАтФА +/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png +Final answer: +/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png +``` +рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рдХреЛ, рдПрдХ рдЗрдореЗрдЬ рд▓реМрдЯрд╛рдП рдЬрд╛рдиреЗ рдХреЗ рдмрдЬрд╛рдп, рдЙрдиреНрд╣реЗрдВ рдПрдХ рдкрд╛рде рд▓реМрдЯрд╛рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред +рдпрд╣ рд╕рд┐рд╕реНрдЯрдо рд╕реЗ рдПрдХ рдмрдЧ рдХреА рддрд░рд╣ рджрд┐рдЦ рд╕рдХрддрд╛ рд╣реИ, рд▓реЗрдХрд┐рди рд╡рд╛рд╕реНрддрд╡ рдореЗрдВ рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдиреЗ рддреНрд░реБрдЯрд┐ рдирд╣реАрдВ рдХреА: рдпрд╣ рдХреЗрд╡рд▓ рдЗрд╕рд▓рд┐рдП рд╣реИ рдХрд┐ LLM рдмреНрд░реЗрди рдиреЗ рдЗрдореЗрдЬ рдЖрдЙрдЯрдкреБрдЯ рдХреЛ рдПрдХ рд╡реЗрд░рд┐рдПрдмрд▓ рдореЗрдВ рд╕реЗрд╡ рдХрд░рдиреЗ рдХреА рдЧрд▓рддреА рдХреАред +рдЗрд╕ рдкреНрд░рдХрд╛рд░ рдпрд╣ рдЗрдореЗрдЬ рдХреЛ рдлрд┐рд░ рд╕реЗ рдПрдХреНрд╕реЗрд╕ рдирд╣реАрдВ рдХрд░ рд╕рдХрддрд╛ рд╣реИ рд╕рд┐рд╡рд╛рдп рдЗрдореЗрдЬ рдХреЛ рд╕реЗрд╡ рдХрд░рддреЗ рд╕рдордп рд▓реЙрдЧ рдХрд┐рдП рдЧрдП рдкрд╛рде рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ, рдЗрд╕рд▓рд┐рдП рдпрд╣ рдЗрдореЗрдЬ рдХреЗ рдмрдЬрд╛рдп рдкрд╛рде рд▓реМрдЯрд╛рддрд╛ рд╣реИред + +рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдбреАрдмрдЧ рдХрд░рдиреЗ рдХрд╛ рдкрд╣рд▓рд╛ рдХрджрдо рдЗрд╕ рдкреНрд░рдХрд╛рд░ рд╣реИ "рдПрдХ рдЕрдзрд┐рдХ рд╢рдХреНрддрд┐рд╢рд╛рд▓реА LLM рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ"ред `Qwen2/5-72B-Instruct` рдЬреИрд╕реЗ рд╡рд┐рдХрд▓реНрдк рд╡рд╣ рдЧрд▓рддреА рдирд╣реАрдВ рдХрд░рддреЗред + +### 2. рдЕрдзрд┐рдХ рдорд╛рд░реНрдЧрджрд░реНрд╢рди / рдЕрдзрд┐рдХ рдЬрд╛рдирдХрд╛рд░реА рдкреНрд░рджрд╛рди рдХрд░реЗрдВ + +рдЖрдк рдХрдо рд╢рдХреНрддрд┐рд╢рд╛рд▓реА рдореЙрдбрд▓реНрд╕ рдХрд╛ рднреА рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдмрд╢рд░реНрддреЗ рдЖрдк рдЙрдиреНрд╣реЗрдВ рдЕрдзрд┐рдХ рдкреНрд░рднрд╛рд╡реА рдврдВрдЧ рд╕реЗ рдорд╛рд░реНрдЧрджрд░реНрд╢рди рдХрд░реЗрдВред + +рдЕрдкрдиреЗ рдЖрдк рдХреЛ рдЕрдкрдиреЗ рдореЙрдбрд▓ рдХреА рдЬрдЧрд╣ рд░рдЦреЗрдВ: рдпрджрд┐ рдЖрдк рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рд╡рд╛рд▓рд╛ рдореЙрдбрд▓ рд╣реЛрддреЗ, рддреЛ рдХреНрдпрд╛ рдЖрдк рдЙрдкрд▓рдмреНрдз рдЬрд╛рдирдХрд╛рд░реА (рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ + рдХрд╛рд░реНрдп рдирд┐рд░реНрдорд╛рдг + рдЯреВрд▓ рд╡рд┐рд╡рд░рдг рд╕реЗ) рдХреЗ рд╕рд╛рде рд╕рдВрдШрд░реНрд╖ рдХрд░рддреЗ? + +рдХреНрдпрд╛ рдЖрдкрдХреЛ рдХреБрдЫ рдЕрддрд┐рд░рд┐рдХреНрдд рд╕реНрдкрд╖реНрдЯреАрдХрд░рдг рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА? + +рдЕрддрд┐рд░рд┐рдХреНрдд рдЬрд╛рдирдХрд╛рд░реА рдкреНрд░рджрд╛рди рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рд╣рдо рддреБрд░рдВрдд рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдмрджрд▓рдиреЗ рдХреА рд╕рд▓рд╛рд╣ рдирд╣реАрдВ рджреЗрддреЗ рд╣реИрдВ: рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рдХрдИ рд╕рдорд╛рдпреЛрдЬрди рд╣реИрдВ рдЬрд┐рдиреНрд╣реЗрдВ рдЖрдк рддрдм рддрдХ рдирд╣реАрдВ рдмрд┐рдЧрд╛рдбрд╝рдирд╛ рдЪрд╛рд╣рддреЗ рдЬрдм рддрдХ рдЖрдк рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдмрд╣реБрдд рдЕрдЪреНрдЫреА рддрд░рд╣ рд╕реЗ рдирд╣реАрдВ рд╕рдордЭрддреЗред +рдЕрдкрдиреЗ LLM рдЗрдВрдЬрди рдХреЛ рдорд╛рд░реНрдЧрджрд░реНрд╢рди рдХрд░рдиреЗ рдХреЗ рдмреЗрд╣рддрд░ рддрд░реАрдХреЗ рд╣реИрдВ: +- рдпрджрд┐ рдпрд╣ рдХрд╛рд░реНрдп рдХреЛ рд╣рд▓ рдХрд░рдиреЗ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рд╣реИ: рдЗрди рд╕рднреА рд╡рд┐рд╡рд░рдгреЛрдВ рдХреЛ рдХрд╛рд░реНрдп рдореЗрдВ рдЬреЛрдбрд╝реЗрдВред рдпрд╣ рдХрд╛рд░реНрдп 100 рдкреЗрдЬ рд▓рдВрдмрд╛ рд╣реЛ рд╕рдХрддрд╛ рд╣реИ +- рдпрджрд┐ рдпрд╣ рдЯреВрд▓реНрд╕ рдХреЗ рдЙрдкрдпреЛрдЧ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рд╣реИ: рдЖрдкрдХреЗ рдЯреВрд▓реНрд╕ рдХреА рд╡рд┐рд╡рд░рдг рд╡рд┐рд╢реЗрд╖рддрд╛ред + +### 3. рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдмрджрд▓реЗрдВ (рдЖрдорддреМрд░ рдкрд░ рдпрд╣ рд╕рд▓рд╛рд╣ рдирд╣реАрдВ рджреА рдЬрд╛рддреА) + +рдпрджрд┐ рдЙрдкрд░реЛрдХреНрдд рд╕реНрдкрд╖реНрдЯреАрдХрд░рдг рдкрд░реНрдпрд╛рдкреНрдд рдирд╣реАрдВ рд╣реИрдВ, рддреЛ рдЖрдк рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдмрджрд▓ рд╕рдХрддреЗ рд╣реИрдВред + +рдЖрдЗрдП рджреЗрдЦреЗрдВ рдХрд┐ рдпрд╣ рдХреИрд╕реЗ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИред рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЖрдЗрдП [`CodeAgent`] рдХреЗ рд▓рд┐рдП рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреА рдЬрд╛рдБрдЪ рдХрд░реЗрдВ (рдиреАрдЪреЗ рджрд┐рдпрд╛ рдЧрдпрд╛ рд╡рд░реНрдЬрди рдЬреАрд░реЛ-рд╢реЙрдЯ рдЙрджрд╛рд╣рд░рдгреЛрдВ рдХреЛ рдЫреЛрдбрд╝рдХрд░ рдЫреЛрдЯрд╛ рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ)ред + +```python +print(agent.system_prompt_template) +``` +Here is what you get: +```text +You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can. +To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code. +To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. + +At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use. +Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '' sequence. +During each intermediate step, you can use 'print()' to save whatever important information you will then need. +These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step. +In the end you have to return a final answer using the `final_answer` tool. + +Here are a few examples using notional tools: +--- +{examples} + +Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools: + +{{tool_descriptions}} + +{{managed_agents_descriptions}} + +Here are the rules you should always follow to solve your task: +1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```' sequence, else you will fail. +2. Use only variables that you have defined! +3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'. +4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block. +5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters. +6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'. +7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables. +8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}} +9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist. +10. Don't give up! You're in charge of solving the task, not providing directions to solve it. + +Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. +``` + +рдЬреИрд╕рд╛ рдХрд┐ рдЖрдк рджреЗрдЦ рд╕рдХрддреЗ рд╣реИрдВ, `"{{tool_descriptions}}"` рдЬреИрд╕реЗ рдкреНрд▓реЗрд╕рд╣реЛрд▓реНрдбрд░реНрд╕ рд╣реИрдВ: рдЗрдирдХрд╛ рдЙрдкрдпреЛрдЧ рдПрдЬреЗрдВрдЯ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдХреЗ рд╕рдордп рдЯреВрд▓реНрд╕ рдпрд╛ рдореИрдиреЗрдЬреНрдб рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рдХреБрдЫ рд╕реНрд╡рдЪрд╛рд▓рд┐рдд рд░реВрдк рд╕реЗ рдЬрдирд░реЗрдЯ рдХрд┐рдП рдЧрдП рд╡рд┐рд╡рд░рдгреЛрдВ рдХреЛ рдбрд╛рд▓рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛ред + +рдЗрд╕рд▓рд┐рдП рдЬрдмрдХрд┐ рдЖрдк `system_prompt` рдкреИрд░рд╛рдореАрдЯрд░ рдореЗрдВ рдЕрдкрдиреЗ рдХрд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдЖрд░реНрдЧреБрдореЗрдВрдЯ рдХреЗ рд░реВрдк рдореЗрдВ рдкрд╛рд╕ рдХрд░рдХреЗ рдЗрд╕ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдЯреЗрдореНрдкрд▓реЗрдЯ рдХреЛ рдУрд╡рд░рд░рд╛рдЗрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЖрдкрдХреЗ рдирдП рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдореЗрдВ рдирд┐рдореНрдирд▓рд┐рдЦрд┐рдд рдкреНрд▓реЗрд╕рд╣реЛрд▓реНрдбрд░реНрд╕ рд╣реЛрдиреЗ рдЪрд╛рд╣рд┐рдП: +- рдЯреВрд▓ рд╡рд┐рд╡рд░рдг рдбрд╛рд▓рдиреЗ рдХреЗ рд▓рд┐рдП `"{{tool_descriptions}}"`ред +- рдпрджрд┐ рдХреЛрдИ рдореИрдиреЗрдЬреНрдб рдПрдЬреЗрдВрдЯреНрд╕ рд╣реИрдВ рддреЛ рдЙрдирдХреЗ рд▓рд┐рдП рд╡рд┐рд╡рд░рдг рдбрд╛рд▓рдиреЗ рдХреЗ рд▓рд┐рдП `"{{managed_agents_description}}"`ред +- рдХреЗрд╡рд▓ `CodeAgent` рдХреЗ рд▓рд┐рдП: рдЕрдзрд┐рдХреГрдд рдЗрдореНрдкреЛрд░реНрдЯреНрд╕ рдХреА рд╕реВрдЪреА рдбрд╛рд▓рдиреЗ рдХреЗ рд▓рд┐рдП `"{{authorized_imports}}"`ред + +рдлрд┐рд░ рдЖрдк рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдирд┐рдореНрдирд╛рдиреБрд╕рд╛рд░ рдмрджрд▓ рд╕рдХрддреЗ рд╣реИрдВ: + +```py +from smolagents.prompts import CODE_SYSTEM_PROMPT + +modified_system_prompt = CODE_SYSTEM_PROMPT + "\nHere you go!" # Change the system prompt here + +agent = CodeAgent( + tools=[], + model=HfApiModel(), + system_prompt=modified_system_prompt +) +``` + +This also works with the [`ToolCallingAgent`]. + + +### 4. рдЕрддрд┐рд░рд┐рдХреНрдд рдпреЛрдЬрдирд╛ + +рд╣рдо рдкреВрд░рдХ рдпреЛрдЬрдирд╛ рдЪрд░рдг рдХреЗ рд▓рд┐рдП рдПрдХ рдореЙрдбрд▓ рдкреНрд░рджрд╛рди рдХрд░рддреЗ рд╣реИрдВ, рдЬрд┐рд╕реЗ рдПрдЬреЗрдВрдЯ рд╕рд╛рдорд╛рдиреНрдп рдХреНрд░рд┐рдпрд╛рдУрдВ рдХреЗ рдЪрд░рдгреЛрдВ рдХреЗ рдмреАрдЪ рдирд┐рдпрдорд┐рдд рд░реВрдк рд╕реЗ рдЪрд▓рд╛ рд╕рдХрддрд╛ рд╣реИред рдЗрд╕ рдЪрд░рдг рдореЗрдВ рдХреЛрдИ рдЯреВрд▓ рдХреЙрд▓ рдирд╣реАрдВ рд╣реЛрддреА рд╣реИ, LLM рд╕реЗ рдХреЗрд╡рд▓ рдЙрди рддрдереНрдпреЛрдВ рдХреА рд╕реВрдЪреА рдХреЛ рдЕрдкрдбреЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд╣рд╛ рдЬрд╛рддрд╛ рд╣реИ рдЬреЛ рдЙрд╕реЗ рдЬреНрдЮрд╛рдд рд╣реИрдВ рдФрд░ рдЗрди рддрдереНрдпреЛрдВ рдХреЗ рдЖрдзрд╛рд░ рдкрд░ рдЙрд╕реЗ рдЕрдЧрд▓реЗ рдХрджрдореЛрдВ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рд╡рд┐рдЪрд╛рд░ рдХрд░рдирд╛ рд╣реЛрддрд╛ рд╣реИред + +```py +from smolagents import load_tool, CodeAgent, HfApiModel, DuckDuckGoSearchTool +from dotenv import load_dotenv + +load_dotenv() + +# Import tool from Hub +image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) + +search_tool = DuckDuckGoSearchTool() + +agent = CodeAgent( + tools=[search_tool], + model=HfApiModel("Qwen/Qwen2.5-72B-Instruct"), + planning_interval=3 # This is where you activate planning! +) + +# Run it! +result = agent.run( + "How long would a cheetah at full speed take to run the length of Pont Alexandre III?", +) +``` diff --git a/docs/source/hi/tutorials/inspect_runs.md b/docs/source/hi/tutorials/inspect_runs.md new file mode 100644 index 000000000..db85fc755 --- /dev/null +++ b/docs/source/hi/tutorials/inspect_runs.md @@ -0,0 +1,104 @@ + +# OpenTelemetry рдХреЗ рд╕рд╛рде runs рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг + +[[open-in-colab]] + +> [!TIP] +> рдпрджрд┐ рдЖрдк рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдореЗрдВ рдирдП рд╣реИрдВ, рддреЛ рдкрд╣рд▓реЗ [рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдкрд░рд┐рдЪрдп](../conceptual_guides/intro_agents) рдФрд░ [smolagents рдХреА рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░](../guided_tour) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред + +### Agents runs рдХреЛ рд▓реЙрдЧ рдХреНрдпреЛрдВ рдХрд░реЗрдВ? + +Agent runs рдХреЛ рдбреАрдмрдЧ рдХрд░рдирд╛ рдЬрдЯрд┐рд▓ рд╣реЛрддрд╛ рд╣реИред + +рдпрд╣ рд╕рддреНрдпрд╛рдкрд┐рдд рдХрд░рдирд╛ рдХрдард┐рди рд╣реИ рдХрд┐ рдПрдХ рд░рди рдареАрдХ рд╕реЗ рдЪрд▓рд╛ рдпрд╛ рдирд╣реАрдВ, рдХреНрдпреЛрдВрдХрд┐ рдПрдЬреЗрдВрдЯ рд╡рд░реНрдХрдлрд╝реНрд▓реЛ [рдбрд┐рдЬрд╝рд╛рдЗрди рдХреЗ рдЕрдиреБрд╕рд╛рд░ рдЕрдкреНрд░рддреНрдпрд╛рд╢рд┐рдд](../conceptual_guides/intro_agents) рд╣реЛрддреЗ рд╣реИрдВ (рдпрджрд┐ рд╡реЗ рдкреНрд░рддреНрдпрд╛рд╢рд┐рдд рд╣реЛрддреЗ, рддреЛ рдЖрдк рдкреБрд░рд╛рдиреЗ рдЕрдЪреНрдЫреЗ рдХреЛрдб рдХрд╛ рд╣реА рдЙрдкрдпреЛрдЧ рдХрд░ рд░рд╣реЗ рд╣реЛрддреЗ)ред + +рдФрд░ рд░рди рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг рдХрд░рдирд╛ рднреА рдХрдард┐рди рд╣реИ: рдорд▓реНрдЯреА-рд╕реНрдЯреЗрдк рдПрдЬреЗрдВрдЯреНрд╕ рдЬрд▓реНрджреА рд╣реА рдХрдВрд╕реЛрд▓ рдХреЛ рд▓реЙрдЧ рд╕реЗ рднрд░ рджреЗрддреЗ рд╣реИрдВ, рдФрд░ рдЕрдзрд┐рдХрд╛рдВрд╢ рддреНрд░реБрдЯрд┐рдпрд╛рдВ рдХреЗрд╡рд▓ "LLM dumb" рдкреНрд░рдХрд╛рд░ рдХреА рддреНрд░реБрдЯрд┐рдпрд╛рдВ рд╣реЛрддреА рд╣реИрдВ, рдЬрд┐рдирд╕реЗ LLM рдЕрдЧрд▓реЗ рдЪрд░рдг рдореЗрдВ рдмреЗрд╣рддрд░ рдХреЛрдб рдпрд╛ рдЯреВрд▓ рдХреЙрд▓ рд▓рд┐рдЦрдХрд░ рд╕реНрд╡рдпрдВ рдХреЛ рд╕реБрдзрд╛рд░ рд▓реЗрддрд╛ рд╣реИред + +рдЗрд╕рд▓рд┐рдП рдмрд╛рдж рдХреЗ рдирд┐рд░реАрдХреНрд╖рдг рдФрд░ рдореЙрдирд┐рдЯрд░рд┐рдВрдЧ рдХреЗ рд▓рд┐рдП рдкреНрд░реЛрдбрдХреНрд╢рди рдореЗрдВ agent runs рдХреЛ рд░рд┐рдХреЙрд░реНрдб рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЗрдВрд╕реНрдЯреНрд░реБрдореЗрдВрдЯреЗрд╢рди рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛ рдЖрд╡рд╢реНрдпрдХ рд╣реИ! + +рд╣рдордиреЗ agent runs рдХреЛ рдЗрдВрд╕реНрдЯреНрд░реБрдореЗрдВрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП [OpenTelemetry](https://opentelemetry.io/) рдорд╛рдирдХ рдХреЛ рдЕрдкрдирд╛рдпрд╛ рд╣реИред + +рдЗрд╕рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рдЖрдк рдмрд╕ рдХреБрдЫ рдЗрдВрд╕реНрдЯреНрд░реБрдореЗрдВрдЯреЗрд╢рди рдХреЛрдб рдЪрд▓рд╛ рд╕рдХрддреЗ рд╣реИрдВ, рдлрд┐рд░ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯреНрд╕ рдХреЛ рд╕рд╛рдорд╛рдиреНрдп рд░реВрдк рд╕реЗ рдЪрд▓рд╛ рд╕рдХрддреЗ рд╣реИрдВ, рдФрд░ рд╕рдм рдХреБрдЫ рдЖрдкрдХреЗ рдкреНрд▓реЗрдЯрдлреЙрд░реНрдо рдореЗрдВ рд▓реЙрдЧ рд╣реЛ рдЬрд╛рддрд╛ рд╣реИред + +рдпрд╣ рдЗрд╕ рдкреНрд░рдХрд╛рд░ рд╣реЛрддрд╛ рд╣реИ: +рдкрд╣рд▓реЗ рдЖрд╡рд╢реНрдпрдХ рдкреИрдХреЗрдЬ рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░реЗрдВред рдпрд╣рд╛рдВ рд╣рдо [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░рддреЗ рд╣реИрдВ рдХреНрдпреЛрдВрдХрд┐ рдпрд╣ рд▓реЙрдЧреНрд╕ рдХреЛ рдПрдХрддреНрд░ рдФрд░ рдирд┐рд░реАрдХреНрд╖рдг рдХрд░рдиреЗ рдХрд╛ рдПрдХ рдЕрдЪреНрдЫрд╛ рд╕рдорд╛рдзрд╛рди рд╣реИ, рд▓реЗрдХрд┐рди рдЗрд╕ рд╕рдВрдЧреНрд░рд╣ рдФрд░ рдирд┐рд░реАрдХреНрд╖рдг рднрд╛рдЧ рдХреЗ рд▓рд┐рдП рдЖрдк рдЕрдиреНрдп OpenTelemetry-рдХрдореНрдкреИрдЯрд┐рдмрд▓ рдкреНрд▓реЗрдЯрдлреЙрд░реНрдореНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +```shell +pip install smolagents +pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents +``` + +рдлрд┐рд░ рдХрд▓реЗрдХреНрдЯрд░ рдХреЛ рдмреИрдХрдЧреНрд░рд╛рдЙрдВрдб рдореЗрдВ рдЪрд▓рд╛рдПрдВред + +```shell +python -m phoenix.server.main serve +``` + +рдЕрдВрдд рдореЗрдВ, рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯреНрд╕ рдХреЛ рдЯреНрд░реЗрд╕ рдХрд░рдиреЗ рдФрд░ рдЯреНрд░реЗрд╕ рдХреЛ рдиреАрдЪреЗ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдПрдВрдбрдкреЙрдЗрдВрдЯ рдкрд░ Phoenix рдХреЛ рднреЗрдЬрдиреЗ рдХреЗ рд▓рд┐рдП `SmolagentsInstrumentor` рдХреЛ рд╕реЗрдЯ рдХрд░реЗрдВред + +```python +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor + +from openinference.instrumentation.smolagents import SmolagentsInstrumentor +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor + +endpoint = "http://0.0.0.0:6006/v1/traces" +trace_provider = TracerProvider() +trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) + +SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) +``` +рддрдм рдЖрдк рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдЪрд▓рд╛ рд╕рдХрддреЗ рд╣реИрдВ! + +```py +from smolagents import ( + CodeAgent, + ToolCallingAgent, + ManagedAgent, + DuckDuckGoSearchTool, + VisitWebpageTool, + HfApiModel, +) + +model = HfApiModel() + +agent = ToolCallingAgent( + tools=[DuckDuckGoSearchTool(), VisitWebpageTool()], + model=model, +) +managed_agent = ManagedAgent( + agent=agent, + name="managed_agent", + description="This is an agent that can do web search.", +) +manager_agent = CodeAgent( + tools=[], + model=model, + managed_agents=[managed_agent], +) +manager_agent.run( + "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" +) +``` +рдФрд░ рдлрд┐рд░ рдЖрдк рдЕрдкрдиреЗ рд░рди рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `http://0.0.0.0:6006/projects/` рдкрд░ рдЬрд╛ рд╕рдХрддреЗ рд╣реИрдВ! + + + +рдЖрдк рджреЗрдЦ рд╕рдХрддреЗ рд╣реИрдВ рдХрд┐ CodeAgent рдиреЗ рдЕрдкрдиреЗ рдореИрдиреЗрдЬреНрдб ToolCallingAgent рдХреЛ (рд╡реИрд╕реЗ, рдореИрдиреЗрдЬреНрдб рдПрдЬреЗрдВрдЯ рдПрдХ CodeAgent рднреА рд╣реЛ рд╕рдХрддрд╛ рдерд╛) U.S. 2024 рдЧреНрд░реЛрде рд░реЗрдЯ рдХреЗ рд▓рд┐рдП рд╡реЗрдм рд╕рд░реНрдЪ рдЪрд▓рд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдХреЙрд▓ рдХрд┐рдпрд╛ред рдлрд┐рд░ рдореИрдиреЗрдЬреНрдб рдПрдЬреЗрдВрдЯ рдиреЗ рдЕрдкрдиреА рд░рд┐рдкреЛрд░реНрдЯ рд▓реМрдЯрд╛рдИ рдФрд░ рдореИрдиреЗрдЬрд░ рдПрдЬреЗрдВрдЯ рдиреЗ рдЕрд░реНрдерд╡реНрдпрд╡рд╕реНрдерд╛ рдХреЗ рджреЛрдЧреБрдирд╛ рд╣реЛрдиреЗ рдХрд╛ рд╕рдордп рдЧрдгрдирд╛ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЙрд╕ рдкрд░ рдХрд╛рд░реНрдп рдХрд┐рдпрд╛! рдЕрдЪреНрдЫрд╛ рд╣реИ, рд╣реИ рдирд╛? \ No newline at end of file diff --git a/docs/source/hi/tutorials/secure_code_execution.md b/docs/source/hi/tutorials/secure_code_execution.md new file mode 100644 index 000000000..ad2cd8c34 --- /dev/null +++ b/docs/source/hi/tutorials/secure_code_execution.md @@ -0,0 +1,82 @@ + +# рд╕реБрд░рдХреНрд╖рд┐рдд рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди + +[[open-in-colab]] + +> [!TIP] +> рдпрджрд┐ рдЖрдк рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдореЗрдВ рдирдП рд╣реИрдВ, рддреЛ рд╕рдмрд╕реЗ рдкрд╣рд▓реЗ [рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдкрд░рд┐рдЪрдп](../conceptual_guides/intro_agents) рдФрд░ [smolagents рдХреА рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░](../guided_tour) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред + +### рдХреЛрдб Agents + +[рдХрдИ](https://huggingface.co/papers/2402.01030) [рд╢реЛрдз](https://huggingface.co/papers/2411.01747) [рдкрддреНрд░реЛрдВ](https://huggingface.co/papers/2401.00812) рдиреЗ рджрд┐рдЦрд╛рдпрд╛ рд╣реИ рдХрд┐ LLM рджреНрд╡рд╛рд░рд╛ рдЕрдкрдиреА рдХреНрд░рд┐рдпрд╛рдУрдВ (рдЯреВрд▓ рдХреЙрд▓реНрд╕) рдХреЛ рдХреЛрдб рдореЗрдВ рд▓рд┐рдЦрдирд╛, рдЯреВрд▓ рдХреЙрд▓рд┐рдВрдЧ рдХреЗ рд╡рд░реНрддрдорд╛рди рдорд╛рдирдХ рдкреНрд░рд╛рд░реВрдк рд╕реЗ рдмрд╣реБрдд рдмреЗрд╣рддрд░ рд╣реИ, рдЬреЛ industry рдореЗрдВ "рдЯреВрд▓реНрд╕ рдиреЗрдореНрд╕ рдФрд░ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреЛ JSON рдХреЗ рд░реВрдк рдореЗрдВ рд▓рд┐рдЦрдиреЗ" рдХреЗ рд╡рд┐рднрд┐рдиреНрди рд░реВрдк рд╣реИрдВред + +рдХреЛрдб рдмреЗрд╣рддрд░ рдХреНрдпреЛрдВ рд╣реИ? рдХреНрдпреЛрдВрдХрд┐ рд╣рдордиреЗ рдЕрдкрдиреА рдХреЛрдб рднрд╛рд╖рд╛рдУрдВ рдХреЛ рд╡рд┐рд╢реЗрд╖ рд░реВрдк рд╕реЗ рдХрдВрдкреНрдпреВрдЯрд░ рджреНрд╡рд╛рд░рд╛ рдХреА рдЬрд╛рдиреЗ рд╡рд╛рд▓реА рдХреНрд░рд┐рдпрд╛рдУрдВ рдХреЛ рд╡реНрдпрдХреНрдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рддреИрдпрд╛рд░ рдХрд┐рдпрд╛ рд╣реИред рдпрджрд┐ JSON рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдПрдХ рдмреЗрд╣рддрд░ рддрд░реАрдХрд╛ рд╣реЛрддрд╛, рддреЛ рдпрд╣ рдкреИрдХреЗрдЬ JSON рд╕реНрдирд┐рдкреЗрдЯреНрд╕ рдореЗрдВ рд▓рд┐рдЦрд╛ рдЧрдпрд╛ рд╣реЛрддрд╛ рдФрд░ рд╢реИрддрд╛рди рд╣рдо рдкрд░ рд╣рдВрд╕ рд░рд╣рд╛ рд╣реЛрддрд╛ред + +рдХреЛрдб рдХрдВрдкреНрдпреВрдЯрд░ рдкрд░ рдХреНрд░рд┐рдпрд╛рдПрдБ рд╡реНрдпрдХреНрдд рдХрд░рдиреЗ рдХрд╛ рдмреЗрд╣рддрд░ рддрд░реАрдХрд╛ рд╣реИред рдЗрд╕рдореЗрдВ рдмреЗрд╣рддрд░ рд╣реИ: +- **рдХрдВрдкреЛрдЬрд╝реЗрдмрд┐рд▓рд┐рдЯреА:** рдХреНрдпрд╛ рдЖрдк JSON рдХреНрд░рд┐рдпрд╛рдУрдВ рдХреЛ рдПрдХ-рджреВрд╕рд░реЗ рдХреЗ рднреАрддрд░ рдиреЗрд╕реНрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдпрд╛ рдмрд╛рдж рдореЗрдВ рдкреБрди: рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП JSON рдХреНрд░рд┐рдпрд╛рдУрдВ рдХрд╛ рдПрдХ рд╕реЗрдЯ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЬреИрд╕реЗ рдЖрдк рдмрд╕ рдПрдХ рдкрд╛рдпрдерди рдлрд╝рдВрдХреНрд╢рди рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ? +- **рдСрдмреНрдЬреЗрдХреНрдЯ рдкреНрд░рдмрдВрдзрди:** JSON рдореЗрдВ `generate_image` рдЬреИрд╕реА рдХреНрд░рд┐рдпрд╛ рдХрд╛ рдЖрдЙрдЯрдкреБрдЯ рдХреИрд╕реЗ рд╕реНрдЯреЛрд░ рдХрд░реЗрдВ? +- **рд╕рд╛рдорд╛рдиреНрдпрддрд╛:** рдХреЛрдб рдХрд┐рд╕реА рднреА рдХрдВрдкреНрдпреВрдЯрд░ рдХрд╛рд░реНрдп рдХреЛ рд╡реНрдпрдХреНрдд рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдмрдирд╛рдпрд╛ рдЧрдпрд╛ рд╣реИред +- **LLM рдкреНрд░рд╢рд┐рдХреНрд╖рдг рдХреЙрд░реНрдкрд╕ рдореЗрдВ рдкреНрд░рддрд┐рдирд┐рдзрд┐рддреНрд╡:** рдХреНрдпреЛрдВ рди рдЗрд╕ рдЖрд╢реАрд░реНрд╡рд╛рдж рдХрд╛ рд▓рд╛рдн рдЙрдард╛рдПрдВ рдХрд┐ рдЙрдЪреНрдЪ рдЧреБрдгрд╡рддреНрддрд╛ рд╡рд╛рд▓реЗ рдХреЛрдб рдЙрджрд╛рд╣рд░рдг рдкрд╣рд▓реЗ рд╕реЗ рд╣реА LLM рдкреНрд░рд╢рд┐рдХреНрд╖рдг рдбреЗрдЯрд╛ рдореЗрдВ рд╢рд╛рдорд┐рд▓ рд╣реИрдВ? + +рдпрд╣ рдиреАрдЪреЗ рджреА рдЧрдИ рдЫрд╡рд┐ рдореЗрдВ рджрд░реНрд╢рд╛рдпрд╛ рдЧрдпрд╛ рд╣реИ, рдЬреЛ [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030) рд╕реЗ рд▓реА рдЧрдИ рд╣реИред + + + +рдпрд╣реА рдХрд╛рд░рдг рд╣реИ рдХрд┐ рд╣рдордиреЗ рдХреЛрдб рдПрдЬреЗрдВрдЯреНрд╕, рдЗрд╕ рдорд╛рдорд▓реЗ рдореЗрдВ рдкрд╛рдпрдерди рдПрдЬреЗрдВрдЯреНрд╕ рдкрд░ рдЬреЛрд░ рджрд┐рдпрд╛, рдЬрд┐рд╕рдХрд╛ рдорддрд▓рдм рд╕реБрд░рдХреНрд╖рд┐рдд рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рдмрдирд╛рдиреЗ рдкрд░ рдЕрдзрд┐рдХ рдкреНрд░рдпрд╛рд╕ рдХрд░рдирд╛ рдерд╛ред + +### рд▓реЛрдХрд▓ рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ + +рдбрд┐рдлрд╝реЙрд▓реНрдЯ рд░реВрдк рд╕реЗ, `CodeAgent` LLM-рдЬрдирд░реЗрдЯреЗрдб рдХреЛрдб рдХреЛ рдЖрдкрдХреЗ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдореЗрдВ рдЪрд▓рд╛рддрд╛ рд╣реИред +рдпрд╣ рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рд╡реИрдирд┐рд▓рд╛ рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рджреНрд╡рд╛рд░рд╛ рдирд╣реАрдВ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛: рд╣рдордиреЗ рдПрдХ рдЕрдзрд┐рдХ рд╕реБрд░рдХреНрд╖рд┐рдд `LocalPythonInterpreter` рдХреЛ рд╢реБрд░реВ рд╕реЗ рдлрд┐рд░ рд╕реЗ рдмрдирд╛рдпрд╛ рд╣реИред +рдпрд╣ рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рд╕реБрд░рдХреНрд╖рд╛ рдХреЗ рд▓рд┐рдП рдбрд┐рдЬрд╝рд╛рдЗрди рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИ: + - рдЗрдореНрдкреЛрд░реНрдЯреНрд╕ рдХреЛ рдЙрдкрдпреЛрдЧрдХрд░реНрддрд╛ рджреНрд╡рд╛рд░рд╛ рд╕реНрдкрд╖реНрдЯ рд░реВрдк рд╕реЗ рдкрд╛рд╕ рдХреА рдЧрдИ рд╕реВрдЪреА рддрдХ рд╕реАрдорд┐рдд рдХрд░рдирд╛ + - рдЗрдирдлрд┐рдирд┐рдЯ рд▓реВрдкреНрд╕ рдФрд░ рд░рд┐рд╕реЛрд░реНрд╕ рдмреНрд▓реЛрдЯрд┐рдВрдЧ рдХреЛ рд░реЛрдХрдиреЗ рдХреЗ рд▓рд┐рдП рдСрдкрд░реЗрд╢рдВрд╕ рдХреА рд╕рдВрдЦреНрдпрд╛ рдХреЛ рдХреИрдк рдХрд░рдирд╛ + - рдХреЛрдИ рднреА рдРрд╕рд╛ рдСрдкрд░реЗрд╢рди рдирд╣реАрдВ рдХрд░реЗрдЧрд╛ рдЬреЛ рдкреВрд░реНрд╡-рдкрд░рд┐рднрд╛рд╖рд┐рдд рдирд╣реАрдВ рд╣реИ + +рд╣рдордиреЗ рдЗрд╕реЗ рдХрдИ рдЙрдкрдпреЛрдЧ рдорд╛рдорд▓реЛрдВ рдореЗрдВ рдЗрд╕реНрддреЗрдорд╛рд▓ рдХрд┐рдпрд╛ рд╣реИ, рдФрд░ рдХрднреА рднреА рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдХреЛ рдХреЛрдИ рдиреБрдХрд╕рд╛рди рдирд╣реАрдВ рджреЗрдЦрд╛ред + +рд╣рд╛рд▓рд╛рдВрдХрд┐ рдпрд╣ рд╕рдорд╛рдзрд╛рди рдкреВрд░реА рддрд░рд╣ рд╕реЗ рд╕реБрд░рдХреНрд╖рд┐рдд рдирд╣реАрдВ рд╣реИ: рдХреЛрдИ рдРрд╕реЗ рдЕрд╡рд╕рд░реЛрдВ рдХреА рдХрд▓реНрдкрдирд╛ рдХрд░ рд╕рдХрддрд╛ рд╣реИ рдЬрд╣рд╛рдВ рджреБрд░реНрднрд╛рд╡рдирд╛рдкреВрд░реНрдг рдХрд╛рд░реНрдпреЛрдВ рдХреЗ рд▓рд┐рдП рдлрд╛рдЗрди-рдЯреНрдпреВрди рдХрд┐рдП рдЧрдП LLM рдЕрднреА рднреА рдЖрдкрдХреЗ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдХреЛ рдиреБрдХрд╕рд╛рди рдкрд╣реБрдВрдЪрд╛ рд╕рдХрддреЗ рд╣реИрдВред рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП рдпрджрд┐ рдЖрдкрдиреЗ рдЫрд╡рд┐рдпреЛрдВ рдХреЛ рдкреНрд░реЛрд╕реЗрд╕ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `Pillow` рдЬреИрд╕реЗ рдорд╛рд╕реВрдо рдкреИрдХреЗрдЬ рдХреА рдЕрдиреБрдорддрд┐ рджреА рд╣реИ, рддреЛ LLM рдЖрдкрдХреА рд╣рд╛рд░реНрдб рдбреНрд░рд╛рдЗрд╡ рдХреЛ рдмреНрд▓реЛрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рд╣рдЬрд╛рд░реЛрдВ рдЫрд╡рд┐рдпреЛрдВ рдХреЛ рд╕реЗрд╡ рдХрд░ рд╕рдХрддрд╛ рд╣реИред +рдпрджрд┐ рдЖрдкрдиреЗ рдЦреБрдж LLM рдЗрдВрдЬрди рдЪреБрдирд╛ рд╣реИ рддреЛ рдпрд╣ рдирд┐рд╢реНрдЪрд┐рдд рд░реВрдк рд╕реЗ рд╕рдВрднрд╛рд╡рд┐рдд рдирд╣реАрдВ рд╣реИ, рд▓реЗрдХрд┐рди рдпрд╣ рд╣реЛ рд╕рдХрддрд╛ рд╣реИред + +рддреЛ рдпрджрд┐ рдЖрдк рдЕрддрд┐рд░рд┐рдХреНрдд рд╕рд╛рд╡рдзрд╛рдиреА рдмрд░рддрдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВ, рддреЛ рдЖрдк рдиреАрдЪреЗ рд╡рд░реНрдгрд┐рдд рд░рд┐рдореЛрдЯ рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рд╡рд┐рдХрд▓реНрдк рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +### E2B рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрдЯрд░ + +рдЕрдзрд┐рдХрддрдо рд╕реБрд░рдХреНрд╖рд╛ рдХреЗ рд▓рд┐рдП, рдЖрдк рдХреЛрдб рдХреЛ рд╕реИрдВрдбрдмреЙрдХреНрд╕реНрдб рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдореЗрдВ рдЪрд▓рд╛рдиреЗ рдХреЗ рд▓рд┐рдП E2B рдХреЗ рд╕рд╛рде рд╣рдорд╛рд░реЗ рдПрдХреАрдХрд░рдг рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред рдпрд╣ рдПрдХ рд░рд┐рдореЛрдЯ рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рд╕реЗрд╡рд╛ рд╣реИ рдЬреЛ рдЖрдкрдХреЗ рдХреЛрдб рдХреЛ рдПрдХ рдЖрдЗрд╕реЛрд▓реЗрдЯреЗрдб рдХрдВрдЯреЗрдирд░ рдореЗрдВ рдЪрд▓рд╛рддреА рд╣реИ, рдЬрд┐рд╕рд╕реЗ рдХреЛрдб рдХрд╛ рдЖрдкрдХреЗ рд╕реНрдерд╛рдиреАрдп рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рдХреЛ рдкреНрд░рднрд╛рд╡рд┐рдд рдХрд░рдирд╛ рдЕрд╕рдВрднрд╡ рд╣реЛ рдЬрд╛рддрд╛ рд╣реИред + +рдЗрд╕рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЛ рдЕрдкрдирд╛ E2B рдЕрдХрд╛рдЙрдВрдЯ рд╕реЗрдЯрдЕрдк рдХрд░рдиреЗ рдФрд░ рдЕрдкрдиреЗ рдПрдирд╡рд╛рдпрд░рдирдореЗрдВрдЯ рд╡реЗрд░рд┐рдПрдмрд▓реНрд╕ рдореЗрдВ рдЕрдкрдирд╛ `E2B_API_KEY` рд╕реЗрдЯ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреАред рдЕрдзрд┐рдХ рдЬрд╛рдирдХрд╛рд░реА рдХреЗ рд▓рд┐рдП [E2B рдХреА рдХреНрд╡рд┐рдХрд╕реНрдЯрд╛рд░реНрдЯ рдбреЙрдХреНрдпреВрдореЗрдВрдЯреЗрд╢рди](https://e2b.dev/docs/quickstart) рдкрд░ рдЬрд╛рдПрдВред + +рдлрд┐рд░ рдЖрдк рдЗрд╕реЗ `pip install e2b-code-interpreter python-dotenv` рдХреЗ рд╕рд╛рде рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +рдЕрдм рдЖрдк рддреИрдпрд╛рд░ рд╣реИрдВ! + +рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрдЯрд░ рдХреЛ E2B рдкрд░ рд╕реЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдмрд╕ рдЕрдкрдиреЗ `CodeAgent` рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рддреЗ рд╕рдордп `use_e2b_executor=True` рдлреНрд▓реИрдЧ рдкрд╛рд╕ рдХрд░реЗрдВред +рдзреНрдпрд╛рди рджреЗрдВ рдХрд┐ рдЖрдкрдХреЛ `additional_authorized_imports` рдореЗрдВ рд╕рднреА рдЯреВрд▓ рдХреА рдбрд┐рдкреЗрдВрдбреЗрдВрд╕реАрдЬрд╝ рдЬреЛрдбрд╝рдиреА рдЪрд╛рд╣рд┐рдП, рддрд╛рдХрд┐ рдПрдХреНрдЬреАрдХреНрдпреВрдЯрд░ рдЙрдиреНрд╣реЗрдВ рдЗрдВрд╕реНрдЯреЙрд▓ рдХрд░реЗред + +```py +from smolagents import CodeAgent, VisitWebpageTool, HfApiModel +agent = CodeAgent( + tools = [VisitWebpageTool()], + model=HfApiModel(), + additional_authorized_imports=["requests", "markdownify"], + use_e2b_executor=True +) + +agent.run("What was Abraham Lincoln's preferred pet?") +``` + +E2B рдХреЛрдб рдПрдХреНрдЬреАрдХреНрдпреВрд╢рди рд╡рд░реНрддрдорд╛рди рдореЗрдВ рдорд▓реНрдЯреА-рдПрдЬреЗрдВрдЯреНрд╕ рдХреЗ рд╕рд╛рде рдХрд╛рдо рдирд╣реАрдВ рдХрд░рддрд╛ рд╣реИ - рдХреНрдпреЛрдВрдХрд┐ рдХреЛрдб рдмреНрд▓реЙрдм рдореЗрдВ рдПрдХ рдПрдЬреЗрдВрдЯ рдХреЙрд▓ рдХрд░рдирд╛ рдЬреЛ рд░рд┐рдореЛрдЯрд▓реА рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд┐рдпрд╛ рдЬрд╛рдирд╛ рдЪрд╛рд╣рд┐рдП, рдпрд╣ рдПрдХ рдЧрдбрд╝рдмрдбрд╝ рд╣реИред рд▓реЗрдХрд┐рди рд╣рдо рдЗрд╕реЗ рдЬреЛрдбрд╝рдиреЗ рдкрд░ рдХрд╛рдо рдХрд░ рд░рд╣реЗ рд╣реИрдВ! diff --git a/docs/source/hi/tutorials/tools.md b/docs/source/hi/tutorials/tools.md new file mode 100644 index 000000000..bb56d7bfc --- /dev/null +++ b/docs/source/hi/tutorials/tools.md @@ -0,0 +1,247 @@ + +# Tools + +[[open-in-colab]] + +рдпрд╣рд╛рдБ, рд╣рдо рдПрдбрд╡рд╛рдВрд╕реНрдб tools рдЙрдкрдпреЛрдЧ рджреЗрдЦреЗрдВрдЧреЗред + +> [!TIP] +> рдпрджрд┐ рдЖрдк рдПрдЬреЗрдВрдЯреНрд╕ рдмрдирд╛рдиреЗ рдореЗрдВ рдирдП рд╣реИрдВ, рддреЛ рд╕рдмрд╕реЗ рдкрд╣рд▓реЗ [рдПрдЬреЗрдВрдЯреНрд╕ рдХрд╛ рдкрд░рд┐рдЪрдп](../conceptual_guides/intro_agents) рдФрд░ [smolagents рдХреА рдЧрд╛рдЗрдбреЗрдб рдЯреВрд░](../guided_tour) рдкрдврд╝рдирд╛ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВред + +- [Tools](#tools) + - [рдЯреВрд▓ рдХреНрдпрд╛ рд╣реИ, рдФрд░ рдЗрд╕реЗ рдХреИрд╕реЗ рдмрдирд╛рдПрдВ?](#рдЯреВрд▓-рдХреНрдпрд╛-рд╣реИ-рдФрд░-рдЗрд╕реЗ-рдХреИрд╕реЗ-рдмрдирд╛рдПрдВ) + - [рдЕрдкрдирд╛ рдЯреВрд▓ рд╣рдм рдкрд░ рд╢реЗрдпрд░ рдХрд░реЗрдВ](#рдЕрдкрдирд╛-рдЯреВрд▓-рд╣рдм-рдкрд░-рд╢реЗрдпрд░-рдХрд░реЗрдВ) + - [рд╕реНрдкреЗрд╕ рдХреЛ рдЯреВрд▓ рдХреЗ рд░реВрдк рдореЗрдВ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░реЗрдВ](#рд╕реНрдкреЗрд╕-рдХреЛ-рдЯреВрд▓-рдХреЗ-рд░реВрдк-рдореЗрдВ-рдЗрдореНрдкреЛрд░реНрдЯ-рдХрд░реЗрдВ) + - [LangChain рдЯреВрд▓реНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ](#LangChain-рдЯреВрд▓реНрд╕-рдХрд╛-рдЙрдкрдпреЛрдЧ-рдХрд░реЗрдВ) + - [рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЗ рдЯреВрд▓рдмреЙрдХреНрд╕ рдХреЛ рдореИрдиреЗрдЬ рдХрд░реЗрдВ](#рдЕрдкрдиреЗ-рдПрдЬреЗрдВрдЯ-рдХреЗ-рдЯреВрд▓рдмреЙрдХреНрд╕-рдХреЛ-рдореИрдиреЗрдЬ-рдХрд░реЗрдВ) + - [рдЯреВрд▓реНрд╕ рдХрд╛ рдХрд▓реЗрдХреНрд╢рди рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ](#рдЯреВрд▓реНрд╕-рдХрд╛-рдХрд▓реЗрдХреНрд╢рди-рдЙрдкрдпреЛрдЧ-рдХрд░реЗрдВ) + +### рдЯреВрд▓ рдХреНрдпрд╛ рд╣реИ рдФрд░ рдЗрд╕реЗ рдХреИрд╕реЗ рдмрдирд╛рдПрдВ + +рдЯреВрд▓ рдореБрдЦреНрдп рд░реВрдк рд╕реЗ рдПрдХ рдлрд╝рдВрдХреНрд╢рди рд╣реИ рдЬрд┐рд╕реЗ рдПрдХ LLM рдПрдЬреЗрдВрдЯрд┐рдХ рд╕рд┐рд╕реНрдЯрдо рдореЗрдВ рдЙрдкрдпреЛрдЧ рдХрд░ рд╕рдХрддрд╛ рд╣реИред + +рд▓реЗрдХрд┐рди рдЗрд╕рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, LLM рдХреЛ рдПрдХ API рджреА рдЬрд╛рдПрдЧреА: рдирд╛рдо, рдЯреВрд▓ рд╡рд┐рд╡рд░рдг, рдЗрдирдкреБрдЯ рдкреНрд░рдХрд╛рд░ рдФрд░ рд╡рд┐рд╡рд░рдг, рдЖрдЙрдЯрдкреБрдЯ рдкреНрд░рдХрд╛рд░ред + +рдЗрд╕рд▓рд┐рдП рдпрд╣ рдХреЗрд╡рд▓ рдПрдХ рдлрд╝рдВрдХреНрд╢рди рдирд╣реАрдВ рд╣реЛ рд╕рдХрддрд╛ред рдпрд╣ рдПрдХ рдХреНрд▓рд╛рд╕ рд╣реЛрдиреА рдЪрд╛рд╣рд┐рдПред + +рддреЛ рдореВрд▓ рд░реВрдк рд╕реЗ, рдЯреВрд▓ рдПрдХ рдХреНрд▓рд╛рд╕ рд╣реИ рдЬреЛ рдПрдХ рдлрд╝рдВрдХреНрд╢рди рдХреЛ рдореЗрдЯрд╛рдбреЗрдЯрд╛ рдХреЗ рд╕рд╛рде рд░реИрдк рдХрд░рддреА рд╣реИ рдЬреЛ LLM рдХреЛ рд╕рдордЭрдиреЗ рдореЗрдВ рдорджрдж рдХрд░рддреА рд╣реИ рдХрд┐ рдЗрд╕рдХрд╛ рдЙрдкрдпреЛрдЧ рдХреИрд╕реЗ рдХрд░реЗрдВред + +рдпрд╣ рдХреИрд╕рд╛ рджрд┐рдЦрддрд╛ рд╣реИ: + +```python +from smolagents import Tool + +class HFModelDownloadsTool(Tool): + name = "model_download_counter" + description = """ + This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. + It returns the name of the checkpoint.""" + inputs = { + "task": { + "type": "string", + "description": "the task category (such as text-classification, depth-estimation, etc)", + } + } + output_type = "string" + + def forward(self, task: str): + from huggingface_hub import list_models + + model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) + return model.id + +model_downloads_tool = HFModelDownloadsTool() +``` + +рдХрд╕реНрдЯрдо рдЯреВрд▓ `Tool` рдХреЛ рд╕рдмрдХреНрд▓рд╛рд╕ рдХрд░рддрд╛ рд╣реИ рдЙрдкрдпреЛрдЧреА рдореЗрдердбреНрд╕ рдХреЛ рдЗрдирд╣реЗрд░рд┐рдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдПред рдЪрд╛рдЗрд▓реНрдб рдХреНрд▓рд╛рд╕ рднреА рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рддреА рд╣реИ: +- рдПрдХ `name` рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯ, рдЬреЛ рдЯреВрд▓ рдХреЗ рдирд╛рдо рд╕реЗ рд╕рдВрдмрдВрдзрд┐рдд рд╣реИред рдирд╛рдо рдЖрдорддреМрд░ рдкрд░ рдмрддрд╛рддрд╛ рд╣реИ рдХрд┐ рдЯреВрд▓ рдХреНрдпрд╛ рдХрд░рддрд╛ рд╣реИред рдЪреВрдВрдХрд┐ рдХреЛрдб рдПрдХ рдЯрд╛рд╕реНрдХ рдХреЗ рд▓рд┐рдП рд╕рдмрд╕реЗ рдЕрдзрд┐рдХ рдбрд╛рдЙрдирд▓реЛрдб рд╡рд╛рд▓реЗ рдореЙрдбрд▓ рдХреЛ рд░рд┐рдЯрд░реНрди рдХрд░рддрд╛ рд╣реИ, рдЗрд╕рд▓рд┐рдП рдЗрд╕реЗ `model_download_counter` рдирд╛рдо рджреЗрдВред +- рдПрдХ `description` рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯ рдПрдЬреЗрдВрдЯ рдХреЗ рд╕рд┐рд╕реНрдЯрдо рдкреНрд░реЙрдореНрдкреНрдЯ рдХреЛ рдкреЙрдкреБрд▓реЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЙрдкрдпреЛрдЧ рдХрд┐рдпрд╛ рдЬрд╛рддрд╛ рд╣реИред +- рдПрдХ `inputs` рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯ, рдЬреЛ `"type"` рдФрд░ `"description"` keys рд╡рд╛рд▓рд╛ рдбрд┐рдХреНрд╢рдирд░реА рд╣реИред рдЗрд╕рдореЗрдВ рдЬрд╛рдирдХрд╛рд░реА рд╣реЛрддреА рд╣реИ рдЬреЛ рдкрд╛рдпрдерди рдЗрдВрдЯрд░рдкреНрд░реЗрдЯрд░ рдХреЛ рдЗрдирдкреБрдЯ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рд╢рд┐рдХреНрд╖рд┐рдд рд╡рд┐рдХрд▓реНрдк рдЪреБрдирдиреЗ рдореЗрдВ рдорджрдж рдХрд░рддреА рд╣реИред +- рдПрдХ `output_type` рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯ, рдЬреЛ рдЖрдЙрдЯрдкреБрдЯ рдЯрд╛рдЗрдк рдХреЛ рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдХрд░рддрд╛ рд╣реИред `inputs` рдФрд░ `output_type` рджреЛрдиреЛрдВ рдХреЗ рд▓рд┐рдП рдЯрд╛рдЗрдк [Pydantic formats](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema) рд╣реЛрдиреЗ рдЪрд╛рд╣рд┐рдП, рд╡реЗ рдЗрдирдореЗрдВ рд╕реЗ рдХреЛрдИ рднреА рд╣реЛ рд╕рдХрддреЗ рд╣реИрдВ: [`~AUTHORIZED_TYPES`]ред +- рдПрдХ `forward` рдореЗрдердб рдЬрд┐рд╕рдореЗрдВ рдПрдХреНрдЬреАрдХреНрдпреВрдЯ рдХрд┐рдпрд╛ рдЬрд╛рдиреЗ рд╡рд╛рд▓рд╛ рдЗрдиреНрдлрд░реЗрдВрд╕ рдХреЛрдб рд╣реЛрддрд╛ рд╣реИред + +рдПрдЬреЗрдВрдЯ рдореЗрдВ рдЙрдкрдпреЛрдЧ рдХрд┐рдП рдЬрд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдЗрддрдирд╛ рд╣реА рдЪрд╛рд╣рд┐рдП! + +рдЯреВрд▓ рдмрдирд╛рдиреЗ рдХрд╛ рдПрдХ рдФрд░ рддрд░реАрдХрд╛ рд╣реИред [guided_tour](../guided_tour) рдореЗрдВ, рд╣рдордиреЗ `@tool` рдбреЗрдХреЛрд░реЗрдЯрд░ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдПрдХ рдЯреВрд▓ рдХреЛ рд▓рд╛рдЧреВ рдХрд┐рдпрд╛ред [`tool`] рдбреЗрдХреЛрд░реЗрдЯрд░ рд╕рд░рд▓ рдЯреВрд▓реНрд╕ рдХреЛ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рдиреЗ рдХрд╛ рдЕрдиреБрд╢рдВрд╕рд┐рдд рддрд░реАрдХрд╛ рд╣реИ, рд▓реЗрдХрд┐рди рдХрднреА-рдХрднреА рдЖрдкрдХреЛ рдЗрд╕рд╕реЗ рдЕрдзрд┐рдХ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрддреА рд╣реИ: рдЕрдзрд┐рдХ рд╕реНрдкрд╖реНрдЯрддрд╛ рдХреЗ рд▓рд┐рдП рдПрдХ рдХреНрд▓рд╛рд╕ рдореЗрдВ рдХрдИ рдореЗрдердбреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛, рдпрд╛ рдЕрддрд┐рд░рд┐рдХреНрдд рдХреНрд▓рд╛рд╕ рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛ред + +рдЗрд╕ рд╕реНрдерд┐рддрд┐ рдореЗрдВ, рдЖрдк рдКрдкрд░ рдмрддрд╛рдП рдЕрдиреБрд╕рд╛рд░ [`Tool`] рдХреЛ рд╕рдмрдХреНрд▓рд╛рд╕ рдХрд░рдХреЗ рдЕрдкрдирд╛ рдЯреВрд▓ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВред + +### рдЕрдкрдирд╛ рдЯреВрд▓ рд╣рдм рдкрд░ рд╢реЗрдпрд░ рдХрд░реЗрдВ + +рдЖрдк рдЯреВрд▓ рдкрд░ [`~Tool.push_to_hub`] рдХреЛ рдХреЙрд▓ рдХрд░рдХреЗ рдЕрдкрдирд╛ рдХрд╕реНрдЯрдо рдЯреВрд▓ рд╣рдм рдкрд░ рд╢реЗрдпрд░ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░реЗрдВ рдХрд┐ рдЖрдкрдиреЗ рд╣рдм рдкрд░ рдЗрд╕рдХреЗ рд▓рд┐рдП рдПрдХ рд░рд┐рдкреЙрдЬрд┐рдЯрд░реА рдмрдирд╛рдИ рд╣реИ рдФрд░ рдЖрдк рд░реАрдб рдПрдХреНрд╕реЗрд╕ рд╡рд╛рд▓рд╛ рдЯреЛрдХрди рдЙрдкрдпреЛрдЧ рдХрд░ рд░рд╣реЗ рд╣реИрдВред + +```python +model_downloads_tool.push_to_hub("{your_username}/hf-model-downloads", token="") +``` + +рд╣рдм рдкрд░ рдкреБрд╢ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЖрдкрдХреЗ рдЯреВрд▓ рдХреЛ рдХреБрдЫ рдирд┐рдпрдореЛрдВ рдХрд╛ рдкрд╛рд▓рди рдХрд░рдирд╛ рд╣реЛрдЧрд╛: +- рд╕рднреА рдореЗрдердбреНрд╕ рд╕реЗрд▓реНрдл-рдХрдВрдЯреЗрдиреНрдб рд╣реИрдВ, рдпрд╛рдиреА рдЙрдирдХреЗ рдЖрд░реНрдЧреНрд╕ рд╕реЗ рдЖрдиреЗ рд╡рд╛рд▓реЗ рд╡реЗрд░рд┐рдПрдмрд▓реНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВред +- рдЙрдкрд░реЛрдХреНрдд рдмрд┐рдВрджреБ рдХреЗ рдЕрдиреБрд╕рд╛рд░, **рд╕рднреА рдЗрдореНрдкреЛрд░реНрдЯреНрд╕ рдХреЛ рд╕реАрдзреЗ рдЯреВрд▓ рдХреЗ рдлрд╝рдВрдХреНрд╢рдВрд╕ рдХреЗ рднреАрддрд░ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд┐рдпрд╛ рдЬрд╛рдирд╛ рдЪрд╛рд╣рд┐рдП**, рдЕрдиреНрдпрдерд╛ рдЖрдкрдХреЛ рдЕрдкрдиреЗ рдХрд╕реНрдЯрдо рдЯреВрд▓ рдХреЗ рд╕рд╛рде [`~Tool.save`] рдпрд╛ [`~Tool.push_to_hub`] рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХрд╛ рдкреНрд░рдпрд╛рд╕ рдХрд░рддреЗ рд╕рдордп рдПрд░рд░ рдорд┐рд▓реЗрдЧрд╛ред +- рдпрджрд┐ рдЖрдк `__init__` рд╡рд┐рдзрд┐ рдХреЛ рд╕рдмрдХреНрд▓рд╛рд╕ рдХрд░рддреЗ рд╣реИрдВ, рддреЛ рдЖрдк рдЗрд╕реЗ `self` рдХреЗ рдЕрд▓рд╛рд╡рд╛ рдХреЛрдИ рдЕрдиреНрдп рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯ рдирд╣реАрдВ рджреЗ рд╕рдХрддреЗред рдРрд╕рд╛ рдЗрд╕рд▓рд┐рдП рд╣реИ рдХреНрдпреЛрдВрдХрд┐ рдХрд┐рд╕реА рд╡рд┐рд╢рд┐рд╖реНрдЯ рдЯреВрд▓ рдЗрдВрд╕реНрдЯреЗрдВрд╕ рдХреЗ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬреЗрд╢рди рдХреЗ рджреМрд░рд╛рди рд╕реЗрдЯ рдХрд┐рдП рдЧрдП рддрд░реНрдХреЛрдВ рдХреЛ рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕ рдХрд░рдирд╛ рдХрдард┐рди рд╣реЛрддрд╛ рд╣реИ, рдЬреЛ рдЙрдиреНрд╣реЗрдВ рд╣рдм рдкрд░ рдареАрдХ рд╕реЗ рд╕рд╛рдЭрд╛ рдХрд░рдиреЗ рд╕реЗ рд░реЛрдХрддрд╛ рд╣реИред рдФрд░ рд╡реИрд╕реЗ рднреА, рдПрдХ рд╡рд┐рд╢рд┐рд╖реНрдЯ рдХреНрд▓рд╛рд╕ рдмрдирд╛рдиреЗ рдХрд╛ рд╡рд┐рдЪрд╛рд░ рдпрд╣ рд╣реИ рдХрд┐ рдЖрдк рд╣рд╛рд░реНрдб-рдХреЛрдб рдХреЗ рд▓рд┐рдП рдЖрд╡рд╢реНрдпрдХ рдХрд┐рд╕реА рднреА рдЪреАрдЬрд╝ рдХреЗ рд▓рд┐рдП рдХреНрд▓рд╛рд╕ рд╡рд┐рд╢реЗрд╖рддрд╛рдПрдБ рдкрд╣рд▓реЗ рд╕реЗ рд╣реА рд╕реЗрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ (рдмрд╕ `your_variable=(...)` рдХреЛ рд╕реАрдзреЗ `class YourTool(Tool):` рдкрдВрдХреНрддрд┐ рдХреЗ рдЕрдВрддрд░реНрдЧрдд рд╕реЗрдЯ рдХрд░реЗрдВ ). рдФрд░ рдирд┐рд╢реНрдЪрд┐рдд рд░реВрдк рд╕реЗ рдЖрдк рдЕрднреА рднреА `self.your_variable` рдХреЛ рдЕрд╕рд╛рдЗрди рдХрд░рдХреЗ рдЕрдкрдиреЗ рдХреЛрдб рдореЗрдВ рдХрд╣реАрдВ рднреА рдПрдХ рдХреНрд▓рд╛рд╕ рд╡рд┐рд╢реЗрд╖рддрд╛ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВред + + +рдПрдХ рдмрд╛рд░ рдЬрдм рдЖрдкрдХрд╛ рдЯреВрд▓ рд╣рдм рдкрд░ рдкреБрд╢ рд╣реЛ рдЬрд╛рддрд╛ рд╣реИ, рддреЛ рдЖрдк рдЗрд╕реЗ рд╡рд┐рдЬрд╝реБрдЕрд▓рд╛рдЗрдЬрд╝ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред [рдпрд╣рд╛рдБ](https://huggingface.co/spaces/m-ric/hf-model-downloads) `model_downloads_tool` рд╣реИ рдЬрд┐рд╕реЗ рдореИрдВрдиреЗ рдкреБрд╢ рдХрд┐рдпрд╛ рд╣реИред рдЗрд╕рдореЗрдВ рдПрдХ рдЕрдЪреНрдЫрд╛ рдЧреНрд░реЗрдбрд┐рдпреЛ рдЗрдВрдЯрд░рдлрд╝реЗрд╕ рд╣реИред + +рдЯреВрд▓ рдлрд╝рд╛рдЗрд▓реЛрдВ рдореЗрдВ рдЧрд╣рд░рд╛рдИ рд╕реЗ рдЬрд╛рдиреЗ рдкрд░, рдЖрдк рдкрд╛ рд╕рдХрддреЗ рд╣реИрдВ рдХрд┐ рд╕рд╛рд░реА рдЯреВрд▓ рд▓реЙрдЬрд┐рдХ [tool.py](https://huggingface.co/spaces/m-ric/hf-model-downloads/blob/main/tool.py) рдХреЗ рдЕрдВрддрд░реНрдЧрдд рд╣реИред рдпрд╣реАрдВ рдЖрдк рдХрд┐рд╕реА рдФрд░ рджреНрд╡рд╛рд░рд╛ рд╢реЗрдпрд░ рдХрд┐рдП рдЧрдП рдЯреВрд▓ рдХрд╛ рдирд┐рд░реАрдХреНрд╖рдг рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +рдлрд┐рд░ рдЖрдк рдЯреВрд▓ рдХреЛ [`load_tool`] рдХреЗ рд╕рд╛рде рд▓реЛрдб рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ рдпрд╛ [`~Tool.from_hub`] рдХреЗ рд╕рд╛рде рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВ рдФрд░ рдЗрд╕реЗ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдореЗрдВ `tools` рдкреИрд░рд╛рдореАрдЯрд░ рдореЗрдВ рдкрд╛рд╕ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред +рдЪреВрдВрдХрд┐ рдЯреВрд▓реНрд╕ рдХреЛ рдЪрд▓рд╛рдиреЗ рдХрд╛ рдорддрд▓рдм рдХрд╕реНрдЯрдо рдХреЛрдб рдЪрд▓рд╛рдирд╛ рд╣реИ, рдЖрдкрдХреЛ рдпрд╣ рд╕реБрдирд┐рд╢реНрдЪрд┐рдд рдХрд░рдирд╛ рд╣реЛрдЧрд╛ рдХрд┐ рдЖрдк рд░рд┐рдкреЙрдЬрд┐рдЯрд░реА рдкрд░ рднрд░реЛрд╕рд╛ рдХрд░рддреЗ рд╣реИрдВ, рдЗрд╕рд▓рд┐рдП рд╣рдо рд╣рдм рд╕реЗ рдЯреВрд▓ рд▓реЛрдб рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `trust_remote_code=True` рдкрд╛рд╕ рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд░рдЦрддреЗ рд╣реИрдВред + +```python +from smolagents import load_tool, CodeAgent + +model_download_tool = load_tool( + "{your_username}/hf-model-downloads", + trust_remote_code=True +) +``` + +### рд╕реНрдкреЗрд╕ рдХреЛ рдЯреВрд▓ рдХреЗ рд░реВрдк рдореЗрдВ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░реЗрдВ + +рдЖрдк [`Tool.from_space`] рдореЗрдердб рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рд╣рдм рд╕реЗ рдПрдХ рд╕реНрдкреЗрд╕ рдХреЛ рд╕реАрдзреЗ рдЯреВрд▓ рдХреЗ рд░реВрдк рдореЗрдВ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ! + +рдЖрдкрдХреЛ рдХреЗрд╡рд▓ рд╣рдм рдкрд░ рд╕реНрдкреЗрд╕ рдХреА ID, рдЗрд╕рдХрд╛ рдирд╛рдо, рдФрд░ рдПрдХ рд╡рд┐рд╡рд░рдг рдкреНрд░рджрд╛рди рдХрд░рдиреЗ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ рдЬреЛ рдЖрдкрдХреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рд╕рдордЭрдиреЗ рдореЗрдВ рдорджрдж рдХрд░реЗрдЧрд╛ рдХрд┐ рдЯреВрд▓ рдХреНрдпрд╛ рдХрд░рддрд╛ рд╣реИред рдЕрдВрджрд░ рд╕реЗ, рдпрд╣ рд╕реНрдкреЗрд╕ рдХреЛ рдХреЙрд▓ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП [`gradio-client`](https://pypi.org/project/gradio-client/) рд▓рд╛рдЗрдмреНрд░реЗрд░реА рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдЧрд╛ред + +рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЪрд▓рд┐рдП рд╣рдм рд╕реЗ [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) рд╕реНрдкреЗрд╕ рдХреЛ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░реЗрдВ рдФрд░ рдЗрд╕рдХрд╛ рдЙрдкрдпреЛрдЧ рдПрдХ рдЗрдореЗрдЬ рдЬрдирд░реЗрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдХрд░реЗрдВред + +```python +image_generation_tool = Tool.from_space( + "black-forest-labs/FLUX.1-schnell", + name="image_generator", + description="Generate an image from a prompt" +) + +image_generation_tool("A sunny beach") +``` +рдФрд░ рджреЗрдЦреЛ, рдпрд╣ рддреБрдореНрд╣рд╛рд░реА рдЫрд╡рд┐ рд╣реИ! ЁЯПЦя╕П + + + +рдлрд┐рд░ рдЖрдк рдЗрд╕ рдЯреВрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд┐рд╕реА рдЕрдиреНрдп рдЯреВрд▓ рдХреА рддрд░рд╣ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП, рдЪрд▓рд┐рдП рдкреНрд░реЙрдореНрдкреНрдЯ `a rabbit wearing a space suit` рдХреЛ рд╕реБрдзрд╛рд░реЗрдВ рдФрд░ рдЗрд╕рдХреА рдПрдХ рдЗрдореЗрдЬ рдЬрдирд░реЗрдЯ рдХрд░реЗрдВред рдпрд╣ рдЙрджрд╛рд╣рд░рдг рдпрд╣ рднреА рджрд┐рдЦрд╛рддрд╛ рд╣реИ рдХрд┐ рдЖрдк рдПрдЬреЗрдВрдЯ рдХреЛ рдЕрддрд┐рд░рд┐рдХреНрдд рдЖрд░реНрдЧреНрдпреВрдореЗрдВрдЯреНрд╕ рдХреИрд╕реЗ рдкрд╛рд╕ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВред + +```python +from smolagents import CodeAgent, HfApiModel + +model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") +agent = CodeAgent(tools=[image_generation_tool], model=model) + +agent.run( + "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} +) +``` + +```text +=== Agent thoughts: +improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background" + +Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt. +>>> Agent is executing the code below: +image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background") +final_answer(image) +``` + + + +рдпрд╣ рдХрд┐рддрдирд╛ рдХреВрд▓ рд╣реИ? ЁЯдй + +### LangChain рдЯреВрд▓реНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ + +рд╣рдо LangChain рдХреЛ рдкрд╕рдВрдж рдХрд░рддреЗ рд╣реИрдВ рдФрд░ рдорд╛рдирддреЗ рд╣реИрдВ рдХрд┐ рдЗрд╕рдХреЗ рдкрд╛рд╕ рдЯреВрд▓реНрд╕ рдХрд╛ рдПрдХ рдмрд╣реБрдд рдЖрдХрд░реНрд╖рдХ рд╕рдВрдЧреНрд░рд╣ рд╣реИред +LangChain рд╕реЗ рдПрдХ рдЯреВрд▓ рдЗрдореНрдкреЛрд░реНрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, `from_langchain()` рдореЗрдердб рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВред + +рдпрд╣рд╛рдБ рдмрддрд╛рдпрд╛ рдЧрдпрд╛ рд╣реИ рдХрд┐ рдЖрдк LangChain рд╡реЗрдм рд╕рд░реНрдЪ рдЯреВрд▓ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдкрд░рд┐рдЪрдп рдХреЗ рд╕рд░реНрдЪ рд░рд┐рдЬрд▓реНрдЯ рдХреЛ рдХреИрд╕реЗ рдлрд┐рд░ рд╕реЗ рдмрдирд╛ рд╕рдХрддреЗ рд╣реИрдВред +рдЗрд╕ рдЯреВрд▓ рдХреЛ рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП `pip install langchain google-search-results -q` рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реЛрдЧреАред +```python +from langchain.agents import load_tools + +search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) + +agent = CodeAgent(tools=[search_tool], model=model) + +agent.run("How many more blocks (also denoted as layers) are in BERT base encoder compared to the encoder from the architecture proposed in Attention is All You Need?") +``` + +### рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЗ рдЯреВрд▓рдмреЙрдХреНрд╕ рдХреЛ рдореИрдиреЗрдЬ рдХрд░реЗрдВ + +рдЖрдк рдПрдЬреЗрдВрдЯ рдХреЗ рдЯреВрд▓рдмреЙрдХреНрд╕ рдХреЛ `agent.tools` рдПрдЯреНрд░рд┐рдмреНрдпреВрдЯ рдореЗрдВ рдПрдХ рдЯреВрд▓ рдЬреЛрдбрд╝рдХрд░ рдпрд╛ рдмрджрд▓рдХрд░ рдореИрдиреЗрдЬ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдХреНрдпреЛрдВрдХрд┐ рдпрд╣ рдПрдХ рд╕реНрдЯреИрдВрдбрд░реНрдб рдбрд┐рдХреНрд╢рдирд░реА рд╣реИред + +рдЪрд▓рд┐рдП рдХреЗрд╡рд▓ рдбрд┐рдлрд╝реЙрд▓реНрдЯ рдЯреВрд▓рдмреЙрдХреНрд╕ рдХреЗ рд╕рд╛рде рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд┐рдП рдЧрдП рдореМрдЬреВрджрд╛ рдПрдЬреЗрдВрдЯ рдореЗрдВ `model_download_tool` рдЬреЛрдбрд╝реЗрдВред + +```python +from smolagents import HfApiModel + +model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") + +agent = CodeAgent(tools=[], model=model, add_base_tools=True) +agent.tools[model_download_tool.name] = model_download_tool +``` +рдЕрдм рд╣рдо рдирдП рдЯреВрд▓ рдХрд╛ рд▓рд╛рдн рдЙрдард╛ рд╕рдХрддреЗ рд╣реИрдВред + +```python +agent.run( + "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub but reverse the letters?" +) +``` + + +> [!TIP] +> рдПрдЬреЗрдВрдЯ рдореЗрдВ рдмрд╣реБрдд рдЕрдзрд┐рдХ рдЯреВрд▓реНрд╕ рди рдЬреЛрдбрд╝рдиреЗ рд╕реЗ рд╕рд╛рд╡рдзрд╛рди рд░рд╣реЗрдВ: рдпрд╣ рдХрдордЬреЛрд░ LLM рдЗрдВрдЬрди рдХреЛ рдУрд╡рд░рд╡реНрд╣реЗрд▓реНрдо рдХрд░ рд╕рдХрддрд╛ рд╣реИред + + +### рдЯреВрд▓реНрд╕ рдХрд╛ рдХрд▓реЗрдХреНрд╢рди рдЙрдкрдпреЛрдЧ рдХрд░реЗрдВ + +рдЖрдк `ToolCollection` рдСрдмреНрдЬреЗрдХреНрдЯ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдЯреВрд▓ рдХрд▓реЗрдХреНрд╢рдВрд╕ рдХрд╛ рд▓рд╛рдн рдЙрдард╛ рд╕рдХрддреЗ рд╣реИрдВред рдпрд╣ рдпрд╛ рддреЛ рд╣рдм рд╕реЗ рдПрдХ рдХрд▓реЗрдХреНрд╢рди рдпрд╛ MCP рд╕рд░реНрд╡рд░ рдЯреВрд▓реНрд╕ рдХреЛ рд▓реЛрдб рдХрд░рдиреЗ рдХрд╛ рд╕рдорд░реНрдерди рдХрд░рддрд╛ рд╣реИред + +#### рд╣рдм рдореЗрдВ рдХрд▓реЗрдХреНрд╢рди рд╕реЗ рдЯреВрд▓ рдХрд▓реЗрдХреНрд╢рди + +рдЖрдк рдЙрд╕ рдХрд▓реЗрдХреНрд╢рди рдХреЗ рд╕реНрд▓рдЧ рдХреЗ рд╕рд╛рде рдЗрд╕рдХрд╛ рд▓рд╛рдн рдЙрдард╛ рд╕рдХрддреЗ рд╣реИрдВ рдЬрд┐рд╕рдХрд╛ рдЖрдк рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВред +рдлрд┐рд░ рдЙрдиреНрд╣реЗрдВ рдЕрдкрдиреЗ рдПрдЬреЗрдВрдЯ рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдПрдХ рд▓рд┐рд╕реНрдЯ рдХреЗ рд░реВрдк рдореЗрдВ рдкрд╛рд╕ рдХрд░реЗрдВ, рдФрд░ рдЙрдирдХрд╛ рдЙрдкрдпреЛрдЧ рд╢реБрд░реВ рдХрд░реЗрдВ! + +```py +from smolagents import ToolCollection, CodeAgent + +image_tool_collection = ToolCollection.from_hub( + collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f", + token="" +) +agent = CodeAgent(tools=[*image_tool_collection.tools], model=model, add_base_tools=True) + +agent.run("Please draw me a picture of rivers and lakes.") +``` + +рд╕реНрдЯрд╛рд░реНрдЯ рдХреЛ рддреЗрдЬ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП, рдЯреВрд▓реНрд╕ рдХреЗрд╡рд▓ рддрднреА рд▓реЛрдб рд╣реЛрддреЗ рд╣реИрдВ рдЬрдм рдПрдЬреЗрдВрдЯ рджреНрд╡рд╛рд░рд╛ рдХреЙрд▓ рдХрд┐рдП рдЬрд╛рддреЗ рд╣реИрдВред + +#### рдХрд┐рд╕реА рднреА MCP рд╕рд░реНрд╡рд░ рд╕реЗ рдЯреВрд▓ рдХрд▓реЗрдХреНрд╢рди + +[glama.ai](https://glama.ai/mcp/servers) рдпрд╛ [smithery.ai](https://smithery.ai/) рдкрд░ рдЙрдкрд▓рдмреНрдз рд╕реИрдХрдбрд╝реЛрдВ MCP рд╕рд░реНрд╡рд░реНрд╕ рд╕реЗ рдЯреВрд▓реНрд╕ рдХрд╛ рд▓рд╛рдн рдЙрдард╛рдПрдВред + +MCP рд╕рд░реНрд╡рд░реНрд╕ рдЯреВрд▓реНрд╕ рдХреЛ рдирд┐рдореНрдирд╛рдиреБрд╕рд╛рд░ `ToolCollection` рдСрдмреНрдЬреЗрдХреНрдЯ рдореЗрдВ рд▓реЛрдб рдХрд┐рдпрд╛ рдЬрд╛ рд╕рдХрддрд╛ рд╣реИ: + +```py +from smolagents import ToolCollection, CodeAgent +from mcp import StdioServerParameters + +server_parameters = StdioServerParameters( + command="uv", + args=["--quiet", "pubmedmcp@0.1.3"], + env={"UV_PYTHON": "3.12", **os.environ}, +) + +with ToolCollection.from_mcp(server_parameters) as tool_collection: + agent = CodeAgent(tools=[*tool_collection.tools], add_base_tools=True) + agent.run("Please find a remedy for hangover.") +``` \ No newline at end of file diff --git a/docs/source/zh/guided_tour.md b/docs/source/zh/guided_tour.md index 9816a4fa3..537e5948e 100644 --- a/docs/source/zh/guided_tour.md +++ b/docs/source/zh/guided_tour.md @@ -61,6 +61,7 @@ agent.run( ```python +# !pip install smolagents[transformers] from smolagents import CodeAgent, TransformersModel model_id = "meta-llama/Llama-3.2-3B-Instruct" @@ -78,6 +79,7 @@ agent.run( шжБф╜┐чФи `LiteLLMModel`я╝МцВищЬАшжБшо╛ч╜очОпхвГхПШщЗП `ANTHROPIC_API_KEY` цИЦ `OPENAI_API_KEY`я╝МцИЦшАЕхЬихИЭхзЛхМЦцЧ╢ф╝ащАТ `api_key` хПШщЗПуАВ ```python +# !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # ф╣ЯхПпф╗еф╜┐чФи 'gpt-4o' @@ -91,12 +93,14 @@ agent.run( ```python +# !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/llama3.2", # ш┐Щф╕кцибхЮЛхп╣ф║О agent шбМф╕║цЭешп┤цЬЙчВ╣х╝▒ api_base="http://localhost:11434", # хжВцЮЬщЬАшжБхПпф╗ецЫ┐цНвф╕║ш┐ЬчиЛ open-ai хЕ╝хо╣цЬНхКбхЩи api_key="YOUR_API_KEY" # хжВцЮЬщЬАшжБхПпф╗ецЫ┐цНвф╕║ API key + num_ctx=8192 # https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator ) agent = CodeAgent(tools=[], model=model, add_base_tools=True) diff --git a/docs/source/zh/reference/agents.md b/docs/source/zh/reference/agents.md index dc011d37e..3b05a6d28 100644 --- a/docs/source/zh/reference/agents.md +++ b/docs/source/zh/reference/agents.md @@ -55,6 +55,9 @@ Both require arguments `model` and list of tools `tools` at initialization. ### GradioUI +> [!TIP] +> You must have `gradio` installed to use the UI. Please run `pip install smolagents[gradio]` if it's not the case. + [[autodoc]] GradioUI ## Models @@ -99,6 +102,9 @@ print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"])) >>> What a ``` +> [!TIP] +> You must have `transformers` and `torch` installed on your machine. Please run `pip install smolagents[transformers]` if it's not the case. + [[autodoc]] TransformersModel ### HfApiModel diff --git a/docs/source/zh/tutorials/tools.md b/docs/source/zh/tutorials/tools.md index a5d15eb36..e62f6b660 100644 --- a/docs/source/zh/tutorials/tools.md +++ b/docs/source/zh/tutorials/tools.md @@ -139,7 +139,7 @@ model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( - "Improve this prompt, then generate an image of it.", prompt='A rabbit wearing a space suit' + "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} ) ``` diff --git a/examples/agent_from_any_llm.py b/examples/agent_from_any_llm.py new file mode 100644 index 000000000..eff667f33 --- /dev/null +++ b/examples/agent_from_any_llm.py @@ -0,0 +1,51 @@ +from typing import Optional + +from smolagents import HfApiModel, LiteLLMModel, TransformersModel, tool +from smolagents.agents import CodeAgent, ToolCallingAgent + + +# Choose which inference type to use! + +available_inferences = ["hf_api", "transformers", "ollama", "litellm"] +chosen_inference = "transformers" + +print(f"Chose model {chosen_inference}") + +if chosen_inference == "hf_api": + model = HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct") + +elif chosen_inference == "transformers": + model = TransformersModel(model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", device_map="auto", max_new_tokens=1000) + +elif chosen_inference == "ollama": + model = LiteLLMModel( + model_id="ollama_chat/llama3.2", + api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary + api_key="your-api-key", # replace with API key if necessary + ) + +elif chosen_inference == "litellm": + # For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-latest' + model = LiteLLMModel(model_id="gpt-4o") + + +@tool +def get_weather(location: str, celsius: Optional[bool] = False) -> str: + """ + Get weather in the next days at given location. + Secretly this tool does not care about the location, it hates the weather everywhere. + + Args: + location: the location + celsius: the temperature + """ + return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" + + +agent = ToolCallingAgent(tools=[get_weather], model=model) + +print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) + +agent = CodeAgent(tools=[get_weather], model=model) + +print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) diff --git a/examples/benchmark.ipynb b/examples/benchmark.ipynb index e0b59d591..065adcecd 100644 --- a/examples/benchmark.ipynb +++ b/examples/benchmark.ipynb @@ -181,6 +181,7 @@ "import datasets\n", "import pandas as pd\n", "\n", + "\n", "eval_ds = datasets.load_dataset(\"m-ric/smol_agents_benchmark\")[\"test\"]\n", "pd.DataFrame(eval_ds)" ] @@ -199,26 +200,28 @@ "metadata": {}, "outputs": [], "source": [ - "import time\n", "import json\n", "import os\n", "import re\n", "import string\n", + "import time\n", "import warnings\n", - "from tqdm import tqdm\n", "from typing import List\n", "\n", + "from dotenv import load_dotenv\n", + "from tqdm import tqdm\n", + "\n", "from smolagents import (\n", - " GoogleSearchTool,\n", + " AgentError,\n", " CodeAgent,\n", - " ToolCallingAgent,\n", + " GoogleSearchTool,\n", " HfApiModel,\n", - " AgentError,\n", - " VisitWebpageTool,\n", " PythonInterpreterTool,\n", + " ToolCallingAgent,\n", + " VisitWebpageTool,\n", ")\n", "from smolagents.agents import ActionStep\n", - "from dotenv import load_dotenv\n", + "\n", "\n", "load_dotenv()\n", "os.makedirs(\"output\", exist_ok=True)\n", @@ -231,9 +234,7 @@ " return str(obj)\n", "\n", "\n", - "def answer_questions(\n", - " eval_ds, file_name, agent, model_id, action_type, is_vanilla_llm=False\n", - "):\n", + "def answer_questions(eval_ds, file_name, agent, model_id, action_type, is_vanilla_llm=False):\n", " answered_questions = []\n", " if os.path.exists(file_name):\n", " with open(file_name, \"r\") as f:\n", @@ -365,23 +366,18 @@ " ma_elems = split_string(model_answer)\n", "\n", " if len(gt_elems) != len(ma_elems): # check length is the same\n", - " warnings.warn(\n", - " \"Answer lists have different lengths, returning False.\", UserWarning\n", - " )\n", + " warnings.warn(\"Answer lists have different lengths, returning False.\", UserWarning)\n", " return False\n", "\n", " comparisons = []\n", - " for ma_elem, gt_elem in zip(\n", - " ma_elems, gt_elems\n", - " ): # compare each element as float or str\n", + " for ma_elem, gt_elem in zip(ma_elems, gt_elems): # compare each element as float or str\n", " if is_float(gt_elem):\n", " normalized_ma_elem = normalize_number_str(ma_elem)\n", " comparisons.append(normalized_ma_elem == float(gt_elem))\n", " else:\n", " # we do not remove punct since comparisons can include punct\n", " comparisons.append(\n", - " normalize_str(ma_elem, remove_punct=False)\n", - " == normalize_str(gt_elem, remove_punct=False)\n", + " normalize_str(ma_elem, remove_punct=False) == normalize_str(gt_elem, remove_punct=False)\n", " )\n", " return all(comparisons)\n", "\n", @@ -441,9 +437,7 @@ " action_type = \"vanilla\"\n", " llm = HfApiModel(model_id)\n", " file_name = f\"output/{model_id.replace('/', '_')}-{action_type}-26-dec-2024.jsonl\"\n", - " answer_questions(\n", - " eval_ds, file_name, llm, model_id, action_type, is_vanilla_llm=True\n", - " )" + " answer_questions(eval_ds, file_name, llm, model_id, action_type, is_vanilla_llm=True)" ] }, { @@ -461,6 +455,7 @@ "source": [ "from smolagents import LiteLLMModel\n", "\n", + "\n", "litellm_model_ids = [\"gpt-4o\", \"anthropic/claude-3-5-sonnet-latest\"]\n", "\n", "for model_id in litellm_model_ids:\n", @@ -492,9 +487,7 @@ " action_type = \"vanilla\"\n", " llm = LiteLLMModel(model_id)\n", " file_name = f\"output/{model_id.replace('/', '_')}-{action_type}-26-dec-2024.jsonl\"\n", - " answer_questions(\n", - " eval_ds, file_name, llm, model_id, action_type, is_vanilla_llm=True\n", - " )" + " answer_questions(eval_ds, file_name, llm, model_id, action_type, is_vanilla_llm=True)" ] }, { @@ -556,9 +549,11 @@ } ], "source": [ - "import pandas as pd\n", "import glob\n", "\n", + "import pandas as pd\n", + "\n", + "\n", "res = []\n", "for file_path in glob.glob(\"output/*.jsonl\"):\n", " data = []\n", @@ -595,11 +590,7 @@ "\n", "result_df[\"correct\"] = result_df.apply(get_correct, axis=1)\n", "\n", - "result_df = (\n", - " (result_df.groupby([\"model_id\", \"source\", \"action_type\"])[[\"correct\"]].mean() * 100)\n", - " .round(1)\n", - " .reset_index()\n", - ")" + "result_df = (result_df.groupby([\"model_id\", \"source\", \"action_type\"])[[\"correct\"]].mean() * 100).round(1).reset_index()" ] }, { @@ -895,6 +886,7 @@ "import pandas as pd\n", "from matplotlib.legend_handler import HandlerTuple # Added import\n", "\n", + "\n", "# Assuming pivot_df is your original dataframe\n", "models = pivot_df[\"model_id\"].unique()\n", "sources = pivot_df[\"source\"].unique()\n", @@ -961,14 +953,10 @@ "handles, labels = ax.get_legend_handles_labels()\n", "unique_sources = sources\n", "legend_elements = [\n", - " (handles[i * 2], handles[i * 2 + 1], labels[i * 2].replace(\" (Agent)\", \"\"))\n", - " for i in range(len(unique_sources))\n", + " (handles[i * 2], handles[i * 2 + 1], labels[i * 2].replace(\" (Agent)\", \"\")) for i in range(len(unique_sources))\n", "]\n", "custom_legend = ax.legend(\n", - " [\n", - " (agent_handle, vanilla_handle)\n", - " for agent_handle, vanilla_handle, _ in legend_elements\n", - " ],\n", + " [(agent_handle, vanilla_handle) for agent_handle, vanilla_handle, _ in legend_elements],\n", " [label for _, _, label in legend_elements],\n", " handler_map={tuple: HandlerTuple(ndivide=None)},\n", " bbox_to_anchor=(1.05, 1),\n", @@ -1006,9 +994,7 @@ " # Start the matrix environment with 4 columns\n", " # l for left-aligned model and task, c for centered numbers\n", " mathjax_table = \"\\\\begin{array}{llcc}\\n\"\n", - " mathjax_table += (\n", - " \"\\\\text{Model} & \\\\text{Task} & \\\\text{Agent} & \\\\text{Vanilla} \\\\\\\\\\n\"\n", - " )\n", + " mathjax_table += \"\\\\text{Model} & \\\\text{Task} & \\\\text{Agent} & \\\\text{Vanilla} \\\\\\\\\\n\"\n", " mathjax_table += \"\\\\hline\\n\"\n", "\n", " # Sort the DataFrame by model_id and source\n", @@ -1033,9 +1019,7 @@ " model_display = \"\\\\;\"\n", "\n", " # Add the data row\n", - " mathjax_table += (\n", - " f\"{model_display} & {source} & {row['agent']} & {row['vanilla']} \\\\\\\\\\n\"\n", - " )\n", + " mathjax_table += f\"{model_display} & {source} & {row['agent']} & {row['vanilla']} \\\\\\\\\\n\"\n", "\n", " current_model = model\n", "\n", diff --git a/examples/e2b_example.py b/examples/e2b_example.py index 843e14406..a58c7b169 100644 --- a/examples/e2b_example.py +++ b/examples/e2b_example.py @@ -1,7 +1,9 @@ -from smolagents import Tool, CodeAgent, HfApiModel -from smolagents.default_tools import VisitWebpageTool from dotenv import load_dotenv +from smolagents import CodeAgent, HfApiModel, Tool +from smolagents.default_tools import VisitWebpageTool + + load_dotenv() @@ -16,10 +18,11 @@ def __init__(self): self.url = "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png" def forward(self): - from PIL import Image - import requests from io import BytesIO + import requests + from PIL import Image + response = requests.get(self.url) return Image.open(BytesIO(response.content)) @@ -46,4 +49,5 @@ def forward(self): # Try the agent in a Gradio UI from smolagents import GradioUI + GradioUI(agent).launch() diff --git a/examples/gradio_upload.py b/examples/gradio_upload.py index 061d22692..746013627 100644 --- a/examples/gradio_upload.py +++ b/examples/gradio_upload.py @@ -1,4 +1,5 @@ -from smolagents import CodeAgent, HfApiModel, GradioUI +from smolagents import CodeAgent, GradioUI, HfApiModel + agent = CodeAgent(tools=[], model=HfApiModel(), max_steps=4, verbosity_level=1) diff --git a/examples/inspect_runs.py b/examples/inspect_runs.py index 3e24efaca..9322f0bac 100644 --- a/examples/inspect_runs.py +++ b/examples/inspect_runs.py @@ -1,24 +1,22 @@ +from openinference.instrumentation.smolagents import SmolagentsInstrumentor from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from openinference.instrumentation.smolagents import SmolagentsInstrumentor - from smolagents import ( CodeAgent, DuckDuckGoSearchTool, - VisitWebpageTool, + HfApiModel, ManagedAgent, ToolCallingAgent, - HfApiModel, + VisitWebpageTool, ) + # Let's setup the instrumentation first trace_provider = TracerProvider() -trace_provider.add_span_processor( - SimpleSpanProcessor(OTLPSpanExporter("http://0.0.0.0:6006/v1/traces")) -) +trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter("http://0.0.0.0:6006/v1/traces"))) SmolagentsInstrumentor().instrument(tracer_provider=trace_provider, skip_dep_check=True) @@ -39,6 +37,4 @@ model=model, managed_agents=[managed_agent], ) -manager_agent.run( - "If the US keeps it 2024 growth rate, how many years would it take for the GDP to double?" -) +manager_agent.run("If the US keeps it 2024 growth rate, how many years would it take for the GDP to double?") diff --git a/examples/multiple_tools.py b/examples/multiple_tools.py new file mode 100644 index 000000000..39ed90767 --- /dev/null +++ b/examples/multiple_tools.py @@ -0,0 +1,257 @@ +from typing import Optional + +import requests + +# from smolagents.agents import ToolCallingAgent +from smolagents import CodeAgent, HfApiModel, tool + + +# Choose which LLM engine to use! +model = HfApiModel() +# model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct") + +# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' +# model = LiteLLMModel(model_id="gpt-4o") + + +@tool +def get_weather(location: str, celsius: Optional[bool] = False) -> str: + """ + Get the current weather at the given location using the WeatherStack API. + + Args: + location: The location (city name). + celsius: Whether to return the temperature in Celsius (default is False, which returns Fahrenheit). + + Returns: + A string describing the current weather at the location. + """ + api_key = "your_api_key" # Replace with your API key from https://weatherstack.com/ + units = "m" if celsius else "f" # 'm' for Celsius, 'f' for Fahrenheit + + url = f"http://api.weatherstack.com/current?access_key={api_key}&query={location}&units={units}" + + try: + response = requests.get(url) + response.raise_for_status() # Raise an exception for HTTP errors + + data = response.json() + + if data.get("error"): # Check if there's an error in the response + return f"Error: {data['error'].get('info', 'Unable to fetch weather data.')}" + + weather = data["current"]["weather_descriptions"][0] + temp = data["current"]["temperature"] + temp_unit = "┬░C" if celsius else "┬░F" + + return f"The current weather in {location} is {weather} with a temperature of {temp} {temp_unit}." + + except requests.exceptions.RequestException as e: + return f"Error fetching weather data: {str(e)}" + + +@tool +def convert_currency(amount: float, from_currency: str, to_currency: str) -> str: + """ + Converts a specified amount from one currency to another using the ExchangeRate-API. + + Args: + amount: The amount of money to convert. + from_currency: The currency code of the currency to convert from (e.g., 'USD'). + to_currency: The currency code of the currency to convert to (e.g., 'EUR'). + + Returns: + str: A string describing the converted amount in the target currency, or an error message if the conversion fails. + + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request to the ExchangeRate-API. + """ + api_key = "your_api_key" # Replace with your actual API key from https://www.exchangerate-api.com/ + url = f"https://v6.exchangerate-api.com/v6/{api_key}/latest/{from_currency}" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + exchange_rate = data["conversion_rates"].get(to_currency) + + if not exchange_rate: + return f"Error: Unable to find exchange rate for {from_currency} to {to_currency}." + + converted_amount = amount * exchange_rate + return f"{amount} {from_currency} is equal to {converted_amount} {to_currency}." + + except requests.exceptions.RequestException as e: + return f"Error fetching conversion data: {str(e)}" + + +@tool +def get_news_headlines() -> str: + """ + Fetches the top news headlines from the News API for the United States. + This function makes a GET request to the News API to retrieve the top news headlines + for the United States. It returns the titles and sources of the top 5 articles as a + formatted string. If no articles are available, it returns a message indicating that + no news is available. In case of a request error, it returns an error message. + Returns: + str: A string containing the top 5 news headlines and their sources, or an error message. + """ + api_key = "your_api_key" # Replace with your actual API key from https://newsapi.org/ + url = f"https://newsapi.org/v2/top-headlines?country=us&apiKey={api_key}" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + articles = data["articles"] + + if not articles: + return "No news available at the moment." + + headlines = [f"{article['title']} - {article['source']['name']}" for article in articles[:5]] + return "\n".join(headlines) + + except requests.exceptions.RequestException as e: + return f"Error fetching news data: {str(e)}" + + +@tool +def get_joke() -> str: + """ + Fetches a random joke from the JokeAPI. + This function sends a GET request to the JokeAPI to retrieve a random joke. + It handles both single jokes and two-part jokes (setup and delivery). + If the request fails or the response does not contain a joke, an error message is returned. + Returns: + str: The joke as a string, or an error message if the joke could not be fetched. + """ + url = "https://v2.jokeapi.dev/joke/Any?type=single" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + + if "joke" in data: + return data["joke"] + elif "setup" in data and "delivery" in data: + return f"{data['setup']} - {data['delivery']}" + else: + return "Error: Unable to fetch joke." + + except requests.exceptions.RequestException as e: + return f"Error fetching joke: {str(e)}" + + +@tool +def get_time_in_timezone(location: str) -> str: + """ + Fetches the current time for a given location using the World Time API. + Args: + location: The location for which to fetch the current time, formatted as 'Region/City'. + Returns: + str: A string indicating the current time in the specified location, or an error message if the request fails. + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request. + """ + url = f"http://worldtimeapi.org/api/timezone/{location}.json" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + current_time = data["datetime"] + + return f"The current time in {location} is {current_time}." + + except requests.exceptions.RequestException as e: + return f"Error fetching time data: {str(e)}" + + +@tool +def get_random_fact() -> str: + """ + Fetches a random fact from the "uselessfacts.jsph.pl" API. + Returns: + str: A string containing the random fact or an error message if the request fails. + """ + url = "https://uselessfacts.jsph.pl/random.json?language=en" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + + return f"Random Fact: {data['text']}" + + except requests.exceptions.RequestException as e: + return f"Error fetching random fact: {str(e)}" + + +@tool +def search_wikipedia(query: str) -> str: + """ + Fetches a summary of a Wikipedia page for a given query. + Args: + query: The search term to look up on Wikipedia. + Returns: + str: A summary of the Wikipedia page if successful, or an error message if the request fails. + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request. + """ + url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query}" + + try: + response = requests.get(url) + response.raise_for_status() + + data = response.json() + title = data["title"] + extract = data["extract"] + + return f"Summary for {title}: {extract}" + + except requests.exceptions.RequestException as e: + return f"Error fetching Wikipedia data: {str(e)}" + + +# If you want to use the ToolCallingAgent instead, uncomment the following lines as they both will work + +# agent = ToolCallingAgent( +# tools=[ +# convert_currency, +# get_weather, +# get_news_headlines, +# get_joke, +# get_random_fact, +# search_wikipedia, +# ], +# model=model, +# ) + + +agent = CodeAgent( + tools=[ + convert_currency, + get_weather, + get_news_headlines, + get_joke, + get_random_fact, + search_wikipedia, + ], + model=model, +) + +# Uncomment the line below to run the agent with a specific query + +agent.run("5000 dollars to Euros") +# agent.run("What is the weather in New York?") +# agent.run("Give me the top news headlines") +# agent.run("Tell me a joke") +# agent.run("Tell me a Random Fact") +# agent.run("who is Elon Musk?") diff --git a/examples/rag.py b/examples/rag.py index 83a201d7e..f5a2e2cb1 100644 --- a/examples/rag.py +++ b/examples/rag.py @@ -8,13 +8,10 @@ knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") -knowledge_base = knowledge_base.filter( - lambda row: row["source"].startswith("huggingface/transformers") -) +knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) source_docs = [ - Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) - for doc in knowledge_base + Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] text_splitter = RecursiveCharacterTextSplitter( @@ -51,14 +48,12 @@ def forward(self, query: str) -> str: query, ) return "\nRetrieved documents:\n" + "".join( - [ - f"\n\n===== Document {str(i)} =====\n" + doc.page_content - for i, doc in enumerate(docs) - ] + [f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs)] ) -from smolagents import HfApiModel, CodeAgent +from smolagents import CodeAgent, HfApiModel + retriever_tool = RetrieverTool(docs_processed) agent = CodeAgent( @@ -68,9 +63,7 @@ def forward(self, query: str) -> str: verbosity_level=2, ) -agent_output = agent.run( - "For a transformers model training, which is slower, the forward or the backward pass?" -) +agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") print("Final output:") print(agent_output) diff --git a/examples/rag_using_chromadb.py b/examples/rag_using_chromadb.py new file mode 100644 index 000000000..864bfc848 --- /dev/null +++ b/examples/rag_using_chromadb.py @@ -0,0 +1,130 @@ +import os + +import datasets +from langchain.docstore.document import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_chroma import Chroma + +# from langchain_community.document_loaders import PyPDFLoader +from langchain_huggingface import HuggingFaceEmbeddings +from tqdm import tqdm +from transformers import AutoTokenizer + +# from langchain_openai import OpenAIEmbeddings +from smolagents import LiteLLMModel, Tool +from smolagents.agents import CodeAgent + + +# from smolagents.agents import ToolCallingAgent + + +knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") + +source_docs = [ + Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base +] + +## For your own PDFs, you can use the following code to load them into source_docs +# pdf_directory = "pdfs" +# pdf_files = [ +# os.path.join(pdf_directory, f) +# for f in os.listdir(pdf_directory) +# if f.endswith(".pdf") +# ] +# source_docs = [] + +# for file_path in pdf_files: +# loader = PyPDFLoader(file_path) +# docs.extend(loader.load()) + +text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer( + AutoTokenizer.from_pretrained("thenlper/gte-small"), + chunk_size=200, + chunk_overlap=20, + add_start_index=True, + strip_whitespace=True, + separators=["\n\n", "\n", ".", " ", ""], +) + +# Split docs and keep only unique ones +print("Splitting documents...") +docs_processed = [] +unique_texts = {} +for doc in tqdm(source_docs): + new_docs = text_splitter.split_documents([doc]) + for new_doc in new_docs: + if new_doc.page_content not in unique_texts: + unique_texts[new_doc.page_content] = True + docs_processed.append(new_doc) + + +print("Embedding documents... This should take a few minutes (5 minutes on MacBook with M1 Pro)") +# Initialize embeddings and ChromaDB vector store +embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") + + +# embeddings = OpenAIEmbeddings(model="text-embedding-3-small") + +vector_store = Chroma.from_documents(docs_processed, embeddings, persist_directory="./chroma_db") + + +class RetrieverTool(Tool): + name = "retriever" + description = ( + "Uses semantic search to retrieve the parts of documentation that could be most relevant to answer your query." + ) + inputs = { + "query": { + "type": "string", + "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", + } + } + output_type = "string" + + def __init__(self, vector_store, **kwargs): + super().__init__(**kwargs) + self.vector_store = vector_store + + def forward(self, query: str) -> str: + assert isinstance(query, str), "Your search query must be a string" + docs = self.vector_store.similarity_search(query, k=3) + return "\nRetrieved documents:\n" + "".join( + [f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs)] + ) + + +retriever_tool = RetrieverTool(vector_store) + +# Choose which LLM engine to use! + +# from smolagents import HfApiModel +# model = HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct") + +# from smolagents import TransformersModel +# model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct") + +# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' and also change 'os.environ.get("ANTHROPIC_API_KEY")' +model = LiteLLMModel( + model_id="groq/llama-3.3-70b-versatile", + api_key=os.environ.get("GROQ_API_KEY"), +) + +# # You can also use the ToolCallingAgent class +# agent = ToolCallingAgent( +# tools=[retriever_tool], +# model=model, +# verbose=True, +# ) + +agent = CodeAgent( + tools=[retriever_tool], + model=model, + max_steps=4, + verbosity_level=2, +) + +agent_output = agent.run("How can I push a model to the Hub?") + + +print("Final output:") +print(agent_output) diff --git a/examples/text_to_sql.py b/examples/text_to_sql.py index 60b84f651..c25f0caa0 100644 --- a/examples/text_to_sql.py +++ b/examples/text_to_sql.py @@ -1,16 +1,17 @@ from sqlalchemy import ( - create_engine, - MetaData, - Table, Column, - String, - Integer, Float, + Integer, + MetaData, + String, + Table, + create_engine, insert, inspect, text, ) + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() @@ -40,9 +41,7 @@ inspector = inspect(engine) columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] -table_description = "Columns:\n" + "\n".join( - [f" - {name}: {col_type}" for name, col_type in columns_info] -) +table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) print(table_description) from smolagents import tool @@ -72,6 +71,7 @@ def sql_engine(query: str) -> str: from smolagents import CodeAgent, HfApiModel + agent = CodeAgent( tools=[sql_engine], model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"), diff --git a/examples/tool_calling_agent_from_any_llm.py b/examples/tool_calling_agent_from_any_llm.py deleted file mode 100644 index 05daaa50e..000000000 --- a/examples/tool_calling_agent_from_any_llm.py +++ /dev/null @@ -1,28 +0,0 @@ -from smolagents.agents import ToolCallingAgent -from smolagents import tool, LiteLLMModel -from typing import Optional - -# Choose which LLM engine to use! -# model = HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct") -# model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct") - -# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' -model = LiteLLMModel(model_id="gpt-4o") - - -@tool -def get_weather(location: str, celsius: Optional[bool] = False) -> str: - """ - Get weather in the next days at given location. - Secretly this tool does not care about the location, it hates the weather everywhere. - - Args: - location: the location - celsius: the temperature - """ - return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" - - -agent = ToolCallingAgent(tools=[get_weather], model=model) - -print(agent.run("What's the weather like in Paris?")) diff --git a/examples/tool_calling_agent_mcp.py b/examples/tool_calling_agent_mcp.py deleted file mode 100644 index c0e613a1e..000000000 --- a/examples/tool_calling_agent_mcp.py +++ /dev/null @@ -1,27 +0,0 @@ -"""An example of loading a ToolCollection directly from an MCP server. - -Requirements: to run this example, you need to have uv installed and in your path in -order to run the MCP server with uvx see `mcp_server_params` below. - -Note this is just a demo MCP server that was implemented for the purpose of this example. -It only provide a single tool to search amongst pubmed papers abstracts. - -Usage: ->>> uv run examples/tool_calling_agent_mcp.py -""" - -import os - -from mcp import StdioServerParameters -from smolagents import CodeAgent, HfApiModel, ToolCollection - -mcp_server_params = StdioServerParameters( - command="uvx", - args=["--quiet", "pubmedmcp@0.1.3"], - env={"UV_PYTHON": "3.12", **os.environ}, -) - -with ToolCollection.from_mcp(mcp_server_params) as tool_collection: - # print(tool_collection.tools[0](request={"term": "efficient treatment hangover"})) - agent = CodeAgent(tools=tool_collection.tools, model=HfApiModel(), max_steps=4) - agent.run("Find me one risk associated with drinking alcohol regularly on low doses for humans.") diff --git a/examples/tool_calling_agent_ollama.py b/examples/tool_calling_agent_ollama.py deleted file mode 100644 index c7198d68d..000000000 --- a/examples/tool_calling_agent_ollama.py +++ /dev/null @@ -1,27 +0,0 @@ -from smolagents.agents import ToolCallingAgent -from smolagents import tool, LiteLLMModel -from typing import Optional - -model = LiteLLMModel( - model_id="ollama_chat/llama3.2", - api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary - api_key="your-api-key", # replace with API key if necessary -) - - -@tool -def get_weather(location: str, celsius: Optional[bool] = False) -> str: - """ - Get weather in the next days at given location. - Secretly this tool does not care about the location, it hates the weather everywhere. - - Args: - location: the location - celsius: the temperature - """ - return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" - - -agent = ToolCallingAgent(tools=[get_weather], model=model) - -print(agent.run("What's the weather like in Paris?")) diff --git a/examples/vlm_web_browser.py b/examples/vlm_web_browser.py new file mode 100644 index 000000000..01d50a517 --- /dev/null +++ b/examples/vlm_web_browser.py @@ -0,0 +1,222 @@ +from io import BytesIO +from time import sleep + +import helium +from dotenv import load_dotenv +from PIL import Image +from selenium import webdriver +from selenium.common.exceptions import ElementNotInteractableException, TimeoutException +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +from smolagents import CodeAgent, LiteLLMModel, OpenAIServerModel, TransformersModel, tool # noqa: F401 +from smolagents.agents import ActionStep + + +load_dotenv() +import os + + +# Let's use Qwen-2VL-72B via an inference provider like Fireworks AI + +model = OpenAIServerModel( + api_key=os.getenv("FIREWORKS_API_KEY"), + api_base="https://api.fireworks.ai/inference/v1", + model_id="accounts/fireworks/models/qwen2-vl-72b-instruct", +) + +# You can also use a close model + +# model = LiteLLMModel( +# model_id="gpt-4o", +# api_key=os.getenv("OPENAI_API_KEY"), +# ) + +# locally a good candidate is Qwen2-VL-7B-Instruct +# model = TransformersModel( +# model_id="Qwen/Qwen2-VL-7B-Instruct", +# device_map = "auto", +# flatten_messages_as_text=False +# ) + + +# Prepare callback +def save_screenshot(step_log: ActionStep, agent: CodeAgent) -> None: + sleep(1.0) # Let JavaScript animations happen before taking the screenshot + driver = helium.get_driver() + current_step = step_log.step_number + if driver is not None: + for step_logs in agent.logs: # Remove previous screenshots from logs for lean processing + if isinstance(step_log, ActionStep) and step_log.step_number <= current_step - 2: + step_logs.observations_images = None + png_bytes = driver.get_screenshot_as_png() + image = Image.open(BytesIO(png_bytes)) + print(f"Captured a browser screenshot: {image.size} pixels") + step_log.observations_images = [image.copy()] # Create a copy to ensure it persists, important! + + # Update observations with current URL + url_info = f"Current url: {driver.current_url}" + step_log.observations = url_info if step_logs.observations is None else step_log.observations + "\n" + url_info + return + + +# Initialize driver and agent +chrome_options = webdriver.ChromeOptions() +chrome_options.add_argument("--force-device-scale-factor=1") +chrome_options.add_argument("--window-size=1000,1300") +chrome_options.add_argument("--disable-pdf-viewer") + +driver = helium.start_chrome(headless=False, options=chrome_options) + +# Initialize tools + + +@tool +def search_item_ctrl_f(text: str, nth_result: int = 1) -> str: + """ + Searches for text on the current page via Ctrl + F and jumps to the nth occurrence. + Args: + text: The text to search for + nth_result: Which occurrence to jump to (default: 1) + """ + elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]") + if nth_result > len(elements): + raise Exception(f"Match n┬░{nth_result} not found (only {len(elements)} matches found)") + result = f"Found {len(elements)} matches for '{text}'." + elem = elements[nth_result - 1] + driver.execute_script("arguments[0].scrollIntoView(true);", elem) + result += f"Focused on element {nth_result} of {len(elements)}" + return result + + +@tool +def go_back() -> None: + """Goes back to previous page.""" + driver.back() + + +@tool +def close_popups() -> str: + """ + Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows! This does not work on cookie consent banners. + """ + # Common selectors for modal close buttons and overlay elements + modal_selectors = [ + "button[class*='close']", + "[class*='modal']", + "[class*='modal'] button", + "[class*='CloseButton']", + "[aria-label*='close']", + ".modal-close", + ".close-modal", + ".modal .close", + ".modal-backdrop", + ".modal-overlay", + "[class*='overlay']", + ] + + wait = WebDriverWait(driver, timeout=0.5) + + for selector in modal_selectors: + try: + elements = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector))) + + for element in elements: + if element.is_displayed(): + try: + # Try clicking with JavaScript as it's more reliable + driver.execute_script("arguments[0].click();", element) + except ElementNotInteractableException: + # If JavaScript click fails, try regular click + element.click() + + except TimeoutException: + continue + except Exception as e: + print(f"Error handling selector {selector}: {str(e)}") + continue + return "Modals closed" + + +agent = CodeAgent( + tools=[go_back, close_popups, search_item_ctrl_f], + model=model, + additional_authorized_imports=["helium"], + step_callbacks=[save_screenshot], + max_steps=20, + verbosity_level=2, +) + +helium_instructions = """ +You can use helium to access websites. Don't bother about the helium driver, it's already managed. +First you need to import everything from helium, then you can do other actions! +Code: +```py +from helium import * +go_to('github.com/trending') +``` + +You can directly click clickable elements by inputting the text that appears on them. +Code: +```py +click("Top products") +``` + +If it's a link: +Code: +```py +click(Link("Top products")) +``` + +If you try to interact with an element and it's not found, you'll get a LookupError. +In general stop your action after each button click to see what happens on your screenshot. +Never try to login in a page. + +To scroll up or down, use scroll_down or scroll_up with as an argument the number of pixels to scroll from. +Code: +```py +scroll_down(num_pixels=1200) # This will scroll one viewport down +``` + +When you have pop-ups with a cross icon to close, don't try to click the close icon by finding its element or targeting an 'X' element (this most often fails). +Just use your built-in tool `close_popups` to close them: +Code: +```py +close_popups() +``` + +You can use .exists() to check for the existence of an element. For example: +Code: +```py +if Text('Accept cookies?').exists(): + click('I accept') +``` + +Proceed in several steps rather than trying to solve the task in one shot. +And at the end, only when you have your answer, return your final answer. +Code: +```py +final_answer("YOUR_ANSWER_HERE") +``` + +If pages seem stuck on loading, you might have to wait, for instance `import time` and run `time.sleep(5.0)`. But don't overuse this! +To list elements on page, DO NOT try code-based element searches like 'contributors = find_all(S("ol > li"))': just look at the latest screenshot you have and read it visually, or use your tool search_item_ctrl_f. +Of course, you can act on buttons like a user would do when navigating. +After each code blob you write, you will be automatically provided with an updated screenshot of the browser and the current browser url. +But beware that the screenshot will only be taken at the end of the whole action, it won't see intermediate states. +Don't kill the browser. +""" + +# Run the agent! + +github_request = """ +I'm trying to find how hard I have to work to get a repo in github.com/trending. +Can you navigate to the profile for the top author of the top trending repo, and give me their total number of commits over the last year? +""" # The agent is able to achieve this request only when powered by GPT-4o or Claude-3.5-sonnet. + +search_request = """ +Please navigate to https://en.wikipedia.org/wiki/Chicago and give me a sentence containing the word "1992" that mentions a construction accident. +""" + +agent.run(search_request + helium_instructions) diff --git a/package-lock.json b/package-lock.json deleted file mode 100644 index 9684c54b5..000000000 --- a/package-lock.json +++ /dev/null @@ -1,2708 +0,0 @@ -{ - "name": "smolagents", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "dependencies": { - "@e2b/cli": "^1.0.9" - } - }, - "node_modules/@bufbuild/protobuf": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/@bufbuild/protobuf/-/protobuf-2.2.3.tgz", - "integrity": "sha512-tFQoXHJdkEOSwj5tRIZSPNUuXK3RaR7T1nUrPgbYX1pUbvqqaaZAsfo+NXBPsz5rZMSKVFrgK1WL8Q/MSLvprg==", - "license": "(Apache-2.0 AND BSD-3-Clause)" - }, - "node_modules/@connectrpc/connect": { - "version": "2.0.0-rc.3", - "resolved": "https://registry.npmjs.org/@connectrpc/connect/-/connect-2.0.0-rc.3.tgz", - "integrity": "sha512-ARBt64yEyKbanyRETTjcjJuHr2YXorzQo0etyS5+P6oSeW8xEuzajA9g+zDnMcj1hlX2dQE93foIWQGfpru7gQ==", - "license": "Apache-2.0", - "peerDependencies": { - "@bufbuild/protobuf": "^2.2.0" - } - }, - "node_modules/@connectrpc/connect-web": { - "version": "2.0.0-rc.3", - "resolved": "https://registry.npmjs.org/@connectrpc/connect-web/-/connect-web-2.0.0-rc.3.tgz", - "integrity": "sha512-w88P8Lsn5CCsA7MFRl2e6oLY4J/5toiNtJns/YJrlyQaWOy3RO8pDgkz+iIkG98RPMhj2thuBvsd3Cn4DKKCkw==", - "license": "Apache-2.0", - "peerDependencies": { - "@bufbuild/protobuf": "^2.2.0", - "@connectrpc/connect": "2.0.0-rc.3" - } - }, - "node_modules/@e2b/cli": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/@e2b/cli/-/cli-1.0.9.tgz", - "integrity": "sha512-TNxW0O/y8GqfDfPGS2tC4jtHUcP6IHAKUxT5fc2y0KWhiJ3Za8Xrn4ZehfkDrvGd5ImjsXkvABHs1RJeioEGjQ==", - "license": "MIT", - "dependencies": { - "@iarna/toml": "^2.2.5", - "async-listen": "^3.0.1", - "boxen": "^7.1.1", - "chalk": "^5.3.0", - "cli-highlight": "^2.1.11", - "command-exists": "^1.2.9", - "commander": "^11.1.0", - "console-table-printer": "^2.11.2", - "e2b": "^1.0.1", - "inquirer": "^9.2.12", - "open": "^9.1.0", - "strip-ansi": "^7.1.0", - "update-notifier": "5.1.0", - "yup": "^1.3.2" - }, - "bin": { - "e2b": "dist/index.js" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@iarna/toml": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", - "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", - "license": "ISC" - }, - "node_modules/@inquirer/figures": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.8.tgz", - "integrity": "sha512-tKd+jsmhq21AP1LhexC0pPwsCxEhGgAkg28byjJAd+xhmIs8LUX8JbUc3vBf3PhLxWiB5EvyBE5X7JSPAqMAqg==", - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "license": "MIT", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "license": "ISC", - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-align/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-align/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/ansi-align/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-align/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "license": "MIT", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-escapes/node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "license": "MIT" - }, - "node_modules/async-listen": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/async-listen/-/async-listen-3.0.1.tgz", - "integrity": "sha512-cWMaNwUJnf37C/S5TfCkk/15MwbPRwVYALA2jtjkbHjCmAPiDXyNJy2q3p1KAZzDLHAWyarUWSujUoHR4pEgrA==", - "license": "MIT", - "engines": { - "node": ">= 14" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/big-integer": { - "version": "1.6.52", - "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.52.tgz", - "integrity": "sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==", - "license": "Unlicense", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "license": "MIT", - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/boxen": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", - "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^7.0.1", - "chalk": "^5.2.0", - "cli-boxes": "^3.0.0", - "string-width": "^5.1.2", - "type-fest": "^2.13.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/bplist-parser": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/bplist-parser/-/bplist-parser-0.2.0.tgz", - "integrity": "sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw==", - "license": "MIT", - "dependencies": { - "big-integer": "^1.6.44" - }, - "engines": { - "node": ">= 5.10.0" - } - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/bundle-name": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-3.0.0.tgz", - "integrity": "sha512-PKA4BeSvBpQKQ8iPOGCSiell+N8P+Tf1DlwqmYhpe2gAhKPHn8EYOxVT+ShuGmhg8lN8XiSlS80yiExKXrURlw==", - "license": "MIT", - "dependencies": { - "run-applescript": "^5.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "license": "MIT", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/camelcase": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", - "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/chalk": { - "version": "5.4.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.0.tgz", - "integrity": "sha512-ZkD35Mx92acjB2yNJgziGqT9oKHEOxjTBTDRpOsRWtdecL/0jM3z5kM/CTzHWvHIen1GvkM85p6TuFfDGfc8/Q==", - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "license": "MIT" - }, - "node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "license": "MIT" - }, - "node_modules/cli-boxes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "license": "MIT", - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cli-highlight": { - "version": "2.1.11", - "resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz", - "integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==", - "license": "ISC", - "dependencies": { - "chalk": "^4.0.0", - "highlight.js": "^10.7.1", - "mz": "^2.4.0", - "parse5": "^5.1.1", - "parse5-htmlparser2-tree-adapter": "^6.0.0", - "yargs": "^16.0.0" - }, - "bin": { - "highlight": "bin/highlight" - }, - "engines": { - "node": ">=8.0.0", - "npm": ">=5.0.0" - } - }, - "node_modules/cli-highlight/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cli-highlight/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/cli-spinners": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", - "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", - "license": "MIT", - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-width": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", - "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", - "license": "ISC", - "engines": { - "node": ">= 12" - } - }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/clone": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", - "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/clone-response": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", - "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", - "license": "MIT", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/command-exists": { - "version": "1.2.9", - "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.9.tgz", - "integrity": "sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w==", - "license": "MIT" - }, - "node_modules/commander": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz", - "integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", - "license": "MIT", - "engines": { - "node": ">=16" - } - }, - "node_modules/compare-versions": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-6.1.1.tgz", - "integrity": "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg==", - "license": "MIT" - }, - "node_modules/configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "license": "BSD-2-Clause", - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/console-table-printer": { - "version": "2.12.1", - "resolved": "https://registry.npmjs.org/console-table-printer/-/console-table-printer-2.12.1.tgz", - "integrity": "sha512-wKGOQRRvdnd89pCeH96e2Fn4wkbenSP6LMHfjfyNLMbGuHEFbMqQNuxXqd0oXG9caIOQ1FTvc5Uijp9/4jujnQ==", - "license": "MIT", - "dependencies": { - "simple-wcswidth": "^1.0.1" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", - "license": "MIT", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "license": "MIT", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/default-browser": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-4.0.0.tgz", - "integrity": "sha512-wX5pXO1+BrhMkSbROFsyxUm0i/cJEScyNhA4PPxc41ICuv05ZZB/MX28s8aZx6xjmatvebIapF6hLEKEcpneUA==", - "license": "MIT", - "dependencies": { - "bundle-name": "^3.0.0", - "default-browser-id": "^3.0.0", - "execa": "^7.1.1", - "titleize": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/default-browser-id": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-3.0.0.tgz", - "integrity": "sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA==", - "license": "MIT", - "dependencies": { - "bplist-parser": "^0.2.0", - "untildify": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/defaults": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", - "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", - "license": "MIT", - "dependencies": { - "clone": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==", - "license": "MIT" - }, - "node_modules/define-lazy-prop": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", - "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "license": "MIT", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer3": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz", - "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==", - "license": "BSD-3-Clause" - }, - "node_modules/e2b": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/e2b/-/e2b-1.0.5.tgz", - "integrity": "sha512-0c2xqNQfVcVBmETsd1bXWCYaN3iVl7m81dJVcjB7O2/c15A7t0s/FkydcZGzVvfZchj40/1f09AdjGX6nk1eNQ==", - "license": "MIT", - "dependencies": { - "@bufbuild/protobuf": "^2.2.2", - "@connectrpc/connect": "2.0.0-rc.3", - "@connectrpc/connect-web": "2.0.0-rc.3", - "compare-versions": "^6.1.0", - "openapi-fetch": "^0.9.7", - "platform": "^1.3.6" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "license": "MIT" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "license": "MIT" - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "license": "MIT", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/execa": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-7.2.0.tgz", - "integrity": "sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==", - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.1", - "human-signals": "^4.3.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^3.0.7", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": "^14.18.0 || ^16.14.0 || >=18.0.0" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "license": "MIT", - "dependencies": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", - "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", - "license": "MIT", - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "license": "MIT", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/got/node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "license": "ISC" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/highlight.js": { - "version": "10.7.3", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", - "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", - "license": "BSD-3-Clause", - "engines": { - "node": "*" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "license": "BSD-2-Clause" - }, - "node_modules/human-signals": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz", - "integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==", - "license": "Apache-2.0", - "engines": { - "node": ">=14.18.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "BSD-3-Clause" - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/inquirer": { - "version": "9.3.7", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-9.3.7.tgz", - "integrity": "sha512-LJKFHCSeIRq9hanN14IlOtPSTe3lNES7TYDTE2xxdAy1LS5rYphajK1qtwvj3YmQXvvk0U2Vbmcni8P9EIQW9w==", - "license": "MIT", - "dependencies": { - "@inquirer/figures": "^1.0.3", - "ansi-escapes": "^4.3.2", - "cli-width": "^4.1.0", - "external-editor": "^3.1.0", - "mute-stream": "1.0.0", - "ora": "^5.4.1", - "run-async": "^3.0.0", - "rxjs": "^7.8.1", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^6.2.0", - "yoctocolors-cjs": "^2.1.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/inquirer/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/inquirer/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/inquirer/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "license": "MIT", - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-docker": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", - "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-inside-container": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", - "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", - "license": "MIT", - "dependencies": { - "is-docker": "^3.0.0" - }, - "bin": { - "is-inside-container": "cli.js" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "license": "MIT", - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-interactive": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", - "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-npm": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", - "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", - "license": "MIT" - }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-wsl/node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==", - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==", - "license": "MIT" - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "license": "MIT", - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "license": "MIT", - "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/log-symbols/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/log-symbols/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "license": "MIT", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "license": "MIT" - }, - "node_modules/mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/mute-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-1.0.0.tgz", - "integrity": "sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==", - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "node_modules/normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", - "license": "MIT", - "dependencies": { - "path-key": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "license": "MIT", - "dependencies": { - "mimic-fn": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/open": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/open/-/open-9.1.0.tgz", - "integrity": "sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg==", - "license": "MIT", - "dependencies": { - "default-browser": "^4.0.0", - "define-lazy-prop": "^3.0.0", - "is-inside-container": "^1.0.0", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/openapi-fetch": { - "version": "0.9.8", - "resolved": "https://registry.npmjs.org/openapi-fetch/-/openapi-fetch-0.9.8.tgz", - "integrity": "sha512-zM6elH0EZStD/gSiNlcPrzXcVQ/pZo3BDvC6CDwRDUt1dDzxlshpmQnpD6cZaJ39THaSmwVCxxRrPKNM1hHrDg==", - "license": "MIT", - "dependencies": { - "openapi-typescript-helpers": "^0.0.8" - } - }, - "node_modules/openapi-typescript-helpers": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/openapi-typescript-helpers/-/openapi-typescript-helpers-0.0.8.tgz", - "integrity": "sha512-1eNjQtbfNi5Z/kFhagDIaIRj6qqDzhjNJKz8cmMW0CVdGwT6e1GLbAfgI0d28VTJa1A8jz82jm/4dG8qNoNS8g==", - "license": "MIT" - }, - "node_modules/ora": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", - "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", - "license": "MIT", - "dependencies": { - "bl": "^4.1.0", - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-spinners": "^2.5.0", - "is-interactive": "^1.0.0", - "is-unicode-supported": "^0.1.0", - "log-symbols": "^4.1.0", - "strip-ansi": "^6.0.0", - "wcwidth": "^1.0.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/ora/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/ora/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "license": "MIT", - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", - "license": "MIT" - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "license": "MIT", - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "license": "MIT" - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/platform": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz", - "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==", - "license": "MIT" - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/property-expr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", - "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==", - "license": "MIT" - }, - "node_modules/pump": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz", - "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==", - "license": "MIT", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "license": "MIT", - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "license": "ISC" - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz", - "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==", - "license": "MIT", - "dependencies": { - "rc": "1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "license": "MIT", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", - "license": "MIT", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "license": "MIT", - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/restore-cursor/node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/restore-cursor/node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/run-applescript": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-5.0.0.tgz", - "integrity": "sha512-XcT5rBksx1QdIhlFOCtgZkB99ZEouFZ1E2Kc2LHqNW13U3/74YGdkQRmThTwxy4QIyookibDKYZOPqX//6BlAg==", - "license": "MIT", - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/run-applescript/node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/run-applescript/node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "license": "Apache-2.0", - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/run-applescript/node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/run-applescript/node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/run-applescript/node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "license": "MIT", - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/run-applescript/node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/run-applescript/node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/run-async": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-3.0.0.tgz", - "integrity": "sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==", - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/rxjs": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", - "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.1.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "license": "MIT" - }, - "node_modules/semver": { - "version": "7.6.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", - "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "license": "MIT", - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/semver-diff/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "license": "ISC" - }, - "node_modules/simple-wcswidth": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/simple-wcswidth/-/simple-wcswidth-1.0.1.tgz", - "integrity": "sha512-xMO/8eNREtaROt7tJvWJqHBDTMFN4eiQ5I4JRMuilwfnFcV5W9u7RUkueNkdw0jPqGMX36iCywelS5yilTuOxg==", - "license": "MIT" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "license": "MIT", - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tiny-case": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-case/-/tiny-case-1.0.3.tgz", - "integrity": "sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q==", - "license": "MIT" - }, - "node_modules/titleize": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/titleize/-/titleize-3.0.0.tgz", - "integrity": "sha512-KxVu8EYHDPBdUYdKZdKtU2aj2XfEx9AfjXxE/Aj0vT06w2icA09Vus1rh6eSu1y01akYg6BjIK/hxyLJINoMLQ==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "license": "MIT", - "dependencies": { - "os-tmpdir": "~1.0.2" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/toposort": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", - "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==", - "license": "MIT" - }, - "node_modules/tslib": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "license": "0BSD" - }, - "node_modules/type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "license": "MIT", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "license": "MIT", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/untildify": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", - "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", - "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", - "license": "BSD-2-Clause", - "dependencies": { - "boxen": "^5.0.0", - "chalk": "^4.1.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.4.0", - "is-npm": "^5.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.1.0", - "pupa": "^2.1.1", - "semver": "^7.3.4", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/yeoman/update-notifier?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/boxen": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", - "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^6.2.0", - "chalk": "^4.1.0", - "cli-boxes": "^2.2.1", - "string-width": "^4.2.2", - "type-fest": "^0.20.2", - "widest-line": "^3.1.0", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "license": "MIT", - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/update-notifier/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "license": "MIT", - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", - "license": "MIT", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "license": "MIT" - }, - "node_modules/wcwidth": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", - "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", - "license": "MIT", - "dependencies": { - "defaults": "^1.0.3" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/widest-line": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", - "license": "MIT", - "dependencies": { - "string-width": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "license": "MIT", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yoctocolors-cjs": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz", - "integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/yup": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/yup/-/yup-1.6.1.tgz", - "integrity": "sha512-JED8pB50qbA4FOkDol0bYF/p60qSEDQqBD0/qeIrUCG1KbPBIQ776fCUNb9ldbPcSTxA69g/47XTo4TqWiuXOA==", - "license": "MIT", - "dependencies": { - "property-expr": "^2.0.5", - "tiny-case": "^1.0.3", - "toposort": "^2.0.2", - "type-fest": "^2.19.0" - } - } - } -} diff --git a/package.json b/package.json deleted file mode 100644 index f2ba32ee5..000000000 --- a/package.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "dependencies": { - "@e2b/cli": "^1.0.9" - } -} diff --git a/pyproject.toml b/pyproject.toml index e3ff96df5..22c1252e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "smolagents" -version = "1.4.0.dev" +version = "1.6.0.dev" description = "ЁЯдЧ smolagents: a barebones library for agents. Agents write python code to call tools or orchestrate other agents." authors = [ { name="Aymeric Roucher", email="aymeric@hf.co" }, { name="Thomas Wolf"}, @@ -12,26 +12,36 @@ authors = [ readme = "README.md" requires-python = ">=3.10" dependencies = [ - "transformers>=4.0.0", + "huggingface-hub>=0.24.0", "requests>=2.32.3", "rich>=13.9.4", "pandas>=2.2.3", "jinja2>=3.1.4", "pillow>=11.0.0", "markdownify>=0.14.1", - "gradio>=5.8.0", "duckduckgo-search>=6.3.7", - "python-dotenv>=1.0.1", - "e2b-code-interpreter>=1.0.3", ] [project.optional-dependencies] +torch = [ + "torch", + "torchvision", +] audio = [ "soundfile", + "smolagents[torch]", ] -torch = [ - "torch", +transformers = [ "accelerate", + "transformers>=4.0.0", + "smolagents[torch]", +] +e2b = [ + "e2b-code-interpreter>=1.0.3", + "python-dotenv>=1.0.1", +] +gradio = [ + "gradio>=5.8.0", ] litellm = [ "litellm>=1.55.10", @@ -46,9 +56,15 @@ openai = [ quality = [ "ruff>=0.9.0", ] +all = [ + "smolagents[accelerate,audio,e2b,gradio,litellm,mcp,openai,transformers]", +] test = [ + "ipython>=8.31.0", # for interactive environment tests "pytest>=8.1.0", - "smolagents[audio,litellm,mcp,openai,torch]", + "python-dotenv>=1.0.1", # For test_all_docs + "smolagents[all]", + "rank-bm25", # For test_all_docs ] dev = [ "smolagents[quality,test]", @@ -60,9 +76,18 @@ dev = [ addopts = "-sv --durations=0" [tool.ruff] -lint.ignore = ["F403"] +line-length = 119 +lint.ignore = [ + "F403", # undefined-local-with-import-star + "E501", # line-too-long +] +lint.select = ["E", "F", "I", "W"] [tool.ruff.lint.per-file-ignores] "examples/*" = [ "E402", # module-import-not-at-top-of-file ] + +[tool.ruff.lint.isort] +known-first-party = ["smolagents"] +lines-after-imports = 2 diff --git a/src/smolagents/__init__.py b/src/smolagents/__init__.py index f457b7e34..8b417d5b7 100644 --- a/src/smolagents/__init__.py +++ b/src/smolagents/__init__.py @@ -14,37 +14,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "1.4.0.dev" +__version__ = "1.6.0.dev" -from typing import TYPE_CHECKING - -from transformers.utils import _LazyModule -from transformers.utils.import_utils import define_import_structure - -if TYPE_CHECKING: - from .agents import * - from .default_tools import * - from .e2b_executor import * - from .gradio_ui import * - from .local_python_executor import * - from .models import * - from .monitoring import * - from .prompts import * - from .tools import * - from .types import * - from .utils import * - - -else: - import sys - - _file = globals()["__file__"] - import_structure = define_import_structure(_file) - import_structure[""] = {"__version__": __version__} - sys.modules[__name__] = _LazyModule( - __name__, - _file, - import_structure, - module_spec=__spec__, - extra_objects={"__version__": __version__}, - ) +from .agents import * +from .default_tools import * +from .e2b_executor import * +from .gradio_ui import * +from .local_python_executor import * +from .models import * +from .monitoring import * +from .prompts import * +from .tools import * +from .types import * +from .utils import * diff --git a/src/smolagents/_function_type_hints_utils.py b/src/smolagents/_function_type_hints_utils.py new file mode 100644 index 000000000..5eb950280 --- /dev/null +++ b/src/smolagents/_function_type_hints_utils.py @@ -0,0 +1,396 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This module contains utilities exclusively taken from `transformers` repository. + +Since they are not specific to `transformers` and that `transformers` is an heavy dependencies, those helpers have +been duplicated. + +TODO: move them to `huggingface_hub` to avoid code duplication. +""" + +import inspect +import json +import os +import re +import types +from copy import copy +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Tuple, + Union, + get_args, + get_origin, + get_type_hints, +) + +from huggingface_hub.utils import is_torch_available + +from .utils import _is_pillow_available + + +def get_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Extracts all the libraries (not relative imports this time) that are imported in a file. + + Args: + filename (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all packages required to use the input module. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # filter out try/except block so in custom code we can have try/except imports + content = re.sub(r"\s*try\s*:.*?except.*?:", "", content, flags=re.DOTALL) + + # filter out imports under is_flash_attn_2_available block for avoid import issues in cpu only environment + content = re.sub( + r"if is_flash_attn[a-zA-Z0-9_]+available\(\):\s*(from flash_attn\s*.*\s*)+", + "", + content, + flags=re.MULTILINE, + ) + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + return list(set(imports)) + + +class TypeHintParsingException(Exception): + """Exception raised for errors in parsing type hints to generate JSON schemas""" + + +class DocstringParsingException(Exception): + """Exception raised for errors in parsing docstrings to generate JSON schemas""" + + +def get_json_schema(func: Callable) -> Dict: + """ + This function generates a JSON schema for a given function, based on its docstring and type hints. This is + mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of + the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires + that the function has a docstring, and that each argument has a description in the docstring, in the standard + Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint. + + Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is + optional because most chat templates ignore the return value of the function. + + Args: + func: The function to generate a JSON schema for. + + Returns: + A dictionary containing the JSON schema for the function. + + Examples: + ```python + >>> def multiply(x: float, y: float): + >>> ''' + >>> A function that multiplies two numbers + >>> + >>> Args: + >>> x: The first number to multiply + >>> y: The second number to multiply + >>> ''' + >>> return x * y + >>> + >>> print(get_json_schema(multiply)) + { + "name": "multiply", + "description": "A function that multiplies two numbers", + "parameters": { + "type": "object", + "properties": { + "x": {"type": "number", "description": "The first number to multiply"}, + "y": {"type": "number", "description": "The second number to multiply"} + }, + "required": ["x", "y"] + } + } + ``` + + The general use for these schemas is that they are used to generate tool descriptions for chat templates that + support them, like so: + + ```python + >>> from transformers import AutoTokenizer + >>> from transformers.utils import get_json_schema + >>> + >>> def multiply(x: float, y: float): + >>> ''' + >>> A function that multiplies two numbers + >>> + >>> Args: + >>> x: The first number to multiply + >>> y: The second number to multiply + >>> return x * y + >>> ''' + >>> + >>> multiply_schema = get_json_schema(multiply) + >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") + >>> messages = [{"role": "user", "content": "What is 179 x 4571?"}] + >>> formatted_chat = tokenizer.apply_chat_template( + >>> messages, + >>> tools=[multiply_schema], + >>> chat_template="tool_use", + >>> return_dict=True, + >>> return_tensors="pt", + >>> add_generation_prompt=True + >>> ) + >>> # The formatted chat can now be passed to model.generate() + ``` + + Each argument description can also have an optional `(choices: ...)` block at the end, such as + `(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will + only be parsed correctly if it is at the end of the line: + + ```python + >>> def drink_beverage(beverage: str): + >>> ''' + >>> A function that drinks a beverage + >>> + >>> Args: + >>> beverage: The beverage to drink (choices: ["tea", "coffee"]) + >>> ''' + >>> pass + >>> + >>> print(get_json_schema(drink_beverage)) + ``` + { + 'name': 'drink_beverage', + 'description': 'A function that drinks a beverage', + 'parameters': { + 'type': 'object', + 'properties': { + 'beverage': { + 'type': 'string', + 'enum': ['tea', 'coffee'], + 'description': 'The beverage to drink' + } + }, + 'required': ['beverage'] + } + } + """ + doc = inspect.getdoc(func) + if not doc: + raise DocstringParsingException( + f"Cannot generate JSON schema for {func.__name__} because it has no docstring!" + ) + doc = doc.strip() + main_doc, param_descriptions, return_doc = _parse_google_format_docstring(doc) + + json_schema = _convert_type_hints_to_json_schema(func) + if (return_dict := json_schema["properties"].pop("return", None)) is not None: + if return_doc is not None: # We allow a missing return docstring since most templates ignore it + return_dict["description"] = return_doc + for arg, schema in json_schema["properties"].items(): + if arg not in param_descriptions: + raise DocstringParsingException( + f"Cannot generate JSON schema for {func.__name__} because the docstring has no description for the argument '{arg}'" + ) + desc = param_descriptions[arg] + enum_choices = re.search(r"\(choices:\s*(.*?)\)\s*$", desc, flags=re.IGNORECASE) + if enum_choices: + schema["enum"] = [c.strip() for c in json.loads(enum_choices.group(1))] + desc = enum_choices.string[: enum_choices.start()].strip() + schema["description"] = desc + + output = {"name": func.__name__, "description": main_doc, "parameters": json_schema} + if return_dict is not None: + output["return"] = return_dict + return {"type": "function", "function": output} + + +# Extracts the initial segment of the docstring, containing the function description +description_re = re.compile(r"^(.*?)[\n\s]*(Args:|Returns:|Raises:|\Z)", re.DOTALL) +# Extracts the Args: block from the docstring +args_re = re.compile(r"\n\s*Args:\n\s*(.*?)[\n\s]*(Returns:|Raises:|\Z)", re.DOTALL) +# Splits the Args: block into individual arguments +args_split_re = re.compile( + r""" +(?:^|\n) # Match the start of the args block, or a newline +\s*(\w+):\s* # Capture the argument name and strip spacing +(.*?)\s* # Capture the argument description, which can span multiple lines, and strip trailing spacing +(?=\n\s*\w+:|\Z) # Stop when you hit the next argument or the end of the block +""", + re.DOTALL | re.VERBOSE, +) +# Extracts the Returns: block from the docstring, if present. Note that most chat templates ignore the return type/doc! +returns_re = re.compile(r"\n\s*Returns:\n\s*(.*?)[\n\s]*(Raises:|\Z)", re.DOTALL) + + +def _parse_google_format_docstring( + docstring: str, +) -> Tuple[Optional[str], Optional[Dict], Optional[str]]: + """ + Parses a Google-style docstring to extract the function description, + argument descriptions, and return description. + + Args: + docstring (str): The docstring to parse. + + Returns: + The function description, arguments, and return description. + """ + + # Extract the sections + description_match = description_re.search(docstring) + args_match = args_re.search(docstring) + returns_match = returns_re.search(docstring) + + # Clean and store the sections + description = description_match.group(1).strip() if description_match else None + docstring_args = args_match.group(1).strip() if args_match else None + returns = returns_match.group(1).strip() if returns_match else None + + # Parsing the arguments into a dictionary + if docstring_args is not None: + docstring_args = "\n".join([line for line in docstring_args.split("\n") if line.strip()]) # Remove blank lines + matches = args_split_re.findall(docstring_args) + args_dict = {match[0]: re.sub(r"\s*\n+\s*", " ", match[1].strip()) for match in matches} + else: + args_dict = {} + + return description, args_dict, returns + + +def _convert_type_hints_to_json_schema(func: Callable, error_on_missing_type_hints: bool = True) -> Dict: + type_hints = get_type_hints(func) + signature = inspect.signature(func) + + properties = {} + for param_name, param_type in type_hints.items(): + properties[param_name] = _parse_type_hint(param_type) + + required = [] + for param_name, param in signature.parameters.items(): + if param.annotation == inspect.Parameter.empty and error_on_missing_type_hints: + raise TypeHintParsingException(f"Argument {param.name} is missing a type hint in function {func.__name__}") + if param_name not in properties: + properties[param_name] = {} + + if param.default == inspect.Parameter.empty: + required.append(param_name) + else: + properties[param_name]["nullable"] = True + + schema = {"type": "object", "properties": properties} + if required: + schema["required"] = required + + return schema + + +def _parse_type_hint(hint: str) -> Dict: + origin = get_origin(hint) + args = get_args(hint) + + if origin is None: + try: + return _get_json_schema_type(hint) + except KeyError: + raise TypeHintParsingException( + "Couldn't parse this type hint, likely due to a custom class or object: ", + hint, + ) + + elif origin is Union or (hasattr(types, "UnionType") and origin is types.UnionType): + # Recurse into each of the subtypes in the Union, except None, which is handled separately at the end + subtypes = [_parse_type_hint(t) for t in args if t is not type(None)] + if len(subtypes) == 1: + # A single non-null type can be expressed directly + return_dict = subtypes[0] + elif all(isinstance(subtype["type"], str) for subtype in subtypes): + # A union of basic types can be expressed as a list in the schema + return_dict = {"type": sorted([subtype["type"] for subtype in subtypes])} + else: + # A union of more complex types requires "anyOf" + return_dict = {"anyOf": subtypes} + if type(None) in args: + return_dict["nullable"] = True + return return_dict + + elif origin is list: + if not args: + return {"type": "array"} + else: + # Lists can only have a single type argument, so recurse into it + return {"type": "array", "items": _parse_type_hint(args[0])} + + elif origin is tuple: + if not args: + return {"type": "array"} + if len(args) == 1: + raise TypeHintParsingException( + f"The type hint {str(hint).replace('typing.', '')} is a Tuple with a single element, which " + "we do not automatically convert to JSON schema as it is rarely necessary. If this input can contain " + "more than one element, we recommend " + "using a List[] type instead, or if it really is a single element, remove the Tuple[] wrapper and just " + "pass the element directly." + ) + if ... in args: + raise TypeHintParsingException( + "Conversion of '...' is not supported in Tuple type hints. " + "Use List[] types for variable-length" + " inputs instead." + ) + return {"type": "array", "prefixItems": [_parse_type_hint(t) for t in args]} + + elif origin is dict: + # The JSON equivalent to a dict is 'object', which mandates that all keys are strings + # However, we can specify the type of the dict values with "additionalProperties" + out = {"type": "object"} + if len(args) == 2: + out["additionalProperties"] = _parse_type_hint(args[1]) + return out + + raise TypeHintParsingException("Couldn't parse this type hint, likely due to a custom class or object: ", hint) + + +_BASE_TYPE_MAPPING = { + int: {"type": "integer"}, + float: {"type": "number"}, + str: {"type": "string"}, + bool: {"type": "boolean"}, + Any: {"type": "any"}, + types.NoneType: {"type": "null"}, +} + + +def _get_json_schema_type(param_type: str) -> Dict[str, str]: + if param_type in _BASE_TYPE_MAPPING: + return copy(_BASE_TYPE_MAPPING[param_type]) + if str(param_type) == "Image" and _is_pillow_available(): + from PIL.Image import Image + + if param_type == Image: + return {"type": "image"} + if str(param_type) == "Tensor" and is_torch_available(): + from torch import Tensor + + if param_type == Tensor: + return {"type": "audio"} + return {"type": "object"} diff --git a/src/smolagents/agents.py b/src/smolagents/agents.py index da791aa43..b7111e824 100644 --- a/src/smolagents/agents.py +++ b/src/smolagents/agents.py @@ -14,27 +14,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect import time +from collections import deque from dataclasses import dataclass -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union -from enum import IntEnum from rich import box from rich.console import Group from rich.panel import Panel from rich.rule import Rule from rich.syntax import Syntax from rich.text import Text -from rich.console import Console -from .default_tools import FinalAnswerTool, TOOL_MAPPING +from .default_tools import TOOL_MAPPING, FinalAnswerTool from .e2b_executor import E2BExecutor from .local_python_executor import ( BASE_BUILTIN_MODULES, LocalPythonInterpreter, fix_final_answer_code, ) -from .models import MessageRole +from .models import ( + ChatMessage, + MessageRole, +) from .monitoring import Monitor from .prompts import ( CODE_SYSTEM_PROMPT, @@ -59,9 +62,10 @@ AgentError, AgentExecutionError, AgentGenerationError, + AgentLogger, AgentMaxStepsError, AgentParsingError, - console, + LogLevel, parse_code_blobs, parse_json_tool_call, truncate_content, @@ -85,11 +89,12 @@ class ActionStep(AgentStepLog): tool_calls: List[ToolCall] | None = None start_time: float | None = None end_time: float | None = None - step: int | None = None + step_number: int | None = None error: AgentError | None = None duration: float | None = None llm_output: str | None = None observations: str | None = None + observations_images: List[str] | None = None action_output: Any = None @@ -102,6 +107,7 @@ class PlanningStep(AgentStepLog): @dataclass class TaskStep(AgentStepLog): task: str + task_images: List[str] | None = None @dataclass @@ -109,20 +115,11 @@ class SystemPromptStep(AgentStepLog): system_prompt: str -def get_tool_descriptions( - tools: Dict[str, Tool], tool_description_template: str -) -> str: - return "\n".join( - [ - get_tool_description_with_args(tool, tool_description_template) - for tool in tools.values() - ] - ) +def get_tool_descriptions(tools: Dict[str, Tool], tool_description_template: str) -> str: + return "\n".join([get_tool_description_with_args(tool, tool_description_template) for tool in tools.values()]) -def format_prompt_with_tools( - tools: Dict[str, Tool], prompt_template: str, tool_description_template: str -) -> str: +def format_prompt_with_tools(tools: Dict[str, Tool], prompt_template: str, tool_description_template: str) -> str: tool_descriptions = get_tool_descriptions(tools, tool_description_template) prompt = prompt_template.replace("{{tool_descriptions}}", tool_descriptions) if "{{tool_names}}" in prompt: @@ -156,9 +153,7 @@ def format_prompt_with_managed_agents_descriptions( f"Provided prompt template does not contain the managed agents descriptions placeholder '{agent_descriptions_placeholder}'" ) if len(managed_agents.keys()) > 0: - return prompt_template.replace( - agent_descriptions_placeholder, show_agents_descriptions(managed_agents) - ) + return prompt_template.replace(agent_descriptions_placeholder, show_agents_descriptions(managed_agents)) else: return prompt_template.replace(agent_descriptions_placeholder, "") @@ -166,32 +161,30 @@ def format_prompt_with_managed_agents_descriptions( YELLOW_HEX = "#d4b702" -class LogLevel(IntEnum): - ERROR = 0 # Only errors - INFO = 1 # Normal output (default) - DEBUG = 2 # Detailed output - - -class AgentLogger: - def __init__(self, level: LogLevel = LogLevel.INFO): - self.level = level - self.console = Console() - - def log(self, *args, level: LogLevel = LogLevel.INFO, **kwargs): - if level <= self.level: - console.print(*args, **kwargs) - - class MultiStepAgent: """ Agent class that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of action (given by the LLM) and observation (obtained from the environment). + + Args: + tools (`list[Tool]`): [`Tool`]s that the agent can use. + model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions. + system_prompt (`str`, *optional*): System prompt that will be used to generate the agent's actions. + tool_description_template (`str`, *optional*): Template used to describe the tools in the system prompt. + max_steps (`int`, default `6`): Maximum number of steps the agent can take to solve the task. + tool_parser (`Callable`, *optional*): Function used to parse the tool calls from the LLM output. + add_base_tools (`bool`, default `False`): Whether to add the base tools to the agent's tools. + verbosity_level (`int`, default `1`): Level of verbosity of the agent's logs. + grammar (`dict[str, str]`, *optional*): Grammar used to parse the LLM output. + managed_agents (`list`, *optional*): Managed agents that the agent can call. + step_callbacks (`list[Callable]`, *optional*): Callbacks that will be called at each step. + planning_interval (`int`, *optional*): Interval at which the agent will run a planning step. """ def __init__( self, tools: List[Tool], - model: Callable[[List[Dict[str, str]]], str], + model: Callable[[List[Dict[str, str]]], ChatMessage], system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, max_steps: int = 6, @@ -211,9 +204,7 @@ def __init__( self.model = model self.system_prompt_template = system_prompt self.tool_description_template = ( - tool_description_template - if tool_description_template - else DEFAULT_TOOL_DESCRIPTION_TEMPLATE + tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE ) self.max_steps = max_steps self.tool_parser = tool_parser @@ -225,13 +216,12 @@ def __init__( if managed_agents is not None: self.managed_agents = {agent.name: agent for agent in managed_agents} + for tool in tools: + assert isinstance(tool, Tool), f"This element is not of class Tool: {str(tool)}" self.tools = {tool.name: tool for tool in tools} if add_base_tools: for tool_name, tool_class in TOOL_MAPPING.items(): - if ( - tool_name != "python_interpreter" - or self.__class__.__name__ == "ToolCallingAgent" - ): + if tool_name != "python_interpreter" or self.__class__.__name__ == "ToolCallingAgent": self.tools[tool_name] = tool_class() self.tools["final_answer"] = FinalAnswerTool() @@ -250,18 +240,17 @@ def initialize_system_prompt(self): self.system_prompt_template, self.tool_description_template, ) - self.system_prompt = format_prompt_with_managed_agents_descriptions( - self.system_prompt, self.managed_agents - ) + self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) return self.system_prompt - def write_inner_memory_from_logs( - self, summary_mode: Optional[bool] = False - ) -> List[Dict[str, str]]: + def write_inner_memory_from_logs(self, summary_mode: bool = False) -> List[Dict[str, str]]: """ Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages that can be used as input to the LLM. + + Args: + summary_mode (`bool`): Whether to write a summary of the logs or the full logs. """ memory = [] for i, step_log in enumerate(self.logs): @@ -269,7 +258,7 @@ def write_inner_memory_from_logs( if not summary_mode: thought_message = { "role": MessageRole.SYSTEM, - "content": step_log.system_prompt.strip(), + "content": [{"type": "text", "text": step_log.system_prompt.strip()}], } memory.append(thought_message) @@ -290,72 +279,87 @@ def write_inner_memory_from_logs( elif isinstance(step_log, TaskStep): task_message = { "role": MessageRole.USER, - "content": "New task:\n" + step_log.task, + "content": [{"type": "text", "text": f"New task:\n{step_log.task}"}], } + if step_log.task_images: + for image in step_log.task_images: + task_message["content"].append({"type": "image", "image": image}) memory.append(task_message) elif isinstance(step_log, ActionStep): if step_log.llm_output is not None and not summary_mode: thought_message = { "role": MessageRole.ASSISTANT, - "content": step_log.llm_output.strip(), + "content": [{"type": "text", "text": step_log.llm_output.strip()}], } memory.append(thought_message) - if step_log.tool_calls is not None: tool_call_message = { "role": MessageRole.ASSISTANT, - "content": str( - [ - { - "id": tool_call.id, - "type": "function", - "function": { - "name": tool_call.name, - "arguments": tool_call.arguments, - }, - } - for tool_call in step_log.tool_calls - ] - ), + "content": [ + { + "type": "text", + "text": str( + [ + { + "id": tool_call.id, + "type": "function", + "function": { + "name": tool_call.name, + "arguments": tool_call.arguments, + }, + } + for tool_call in step_log.tool_calls + ] + ), + } + ], } memory.append(tool_call_message) - - if step_log.tool_calls is None and step_log.error is not None: - message_content = ( - "Error:\n" - + str(step_log.error) - + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" - ) - tool_response_message = { + if step_log.error is not None: + error_message = { "role": MessageRole.ASSISTANT, - "content": message_content, + "content": [ + { + "type": "text", + "text": ( + "Error:\n" + + str(step_log.error) + + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" + ), + } + ], } - if step_log.tool_calls is not None and ( - step_log.error is not None or step_log.observations is not None - ): - if step_log.error is not None: - message_content = ( - "Error:\n" - + str(step_log.error) - + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" - ) - elif step_log.observations is not None: - message_content = f"Observation:\n{step_log.observations}" + memory.append(error_message) + if step_log.observations is not None: + if step_log.tool_calls: + tool_call_reference = f"Call id: {(step_log.tool_calls[0].id if getattr(step_log.tool_calls[0], 'id') else 'call_0')}\n" + else: + tool_call_reference = "" + text_observations = f"Observation:\n{step_log.observations}" tool_response_message = { "role": MessageRole.TOOL_RESPONSE, - "content": f"Call id: {(step_log.tool_calls[0].id if getattr(step_log.tool_calls[0], 'id') else 'call_0')}\n" - + message_content, + "content": [{"type": "text", "text": tool_call_reference + text_observations}], } memory.append(tool_response_message) + if step_log.observations_images: + thought_message_image = { + "role": MessageRole.USER, + "content": [{"type": "text", "text": "Here are the observed images:"}] + + [ + { + "type": "image", + "image": image, + } + for image in step_log.observations_images + ], + } + memory.append(thought_message_image) return memory def get_succinct_logs(self): - return [ - {key: value for key, value in log.items() if key != "agent_memory"} - for log in self.logs - ] + return [{key: value for key, value in log.items() if key != "agent_memory"} for log in self.logs] def extract_action(self, llm_output: str, split_token: str) -> Tuple[str, str]: """ @@ -373,35 +377,67 @@ def extract_action(self, llm_output: str, split_token: str) -> Tuple[str, str]: ) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output except Exception: raise AgentParsingError( - f"No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!" + f"No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!", + self.logger, ) return rationale.strip(), action.strip() - def provide_final_answer(self, task) -> str: + def provide_final_answer(self, task: str, images: Optional[list[str]]) -> str: """ - This method provides a final answer to the task, based on the logs of the agent's interactions. + Provide the final answer to the task, based on the logs of the agent's interactions. + + Args: + task (`str`): Task to perform. + images (`list[str]`, *optional*): Paths to image(s). + + Returns: + `str`: Final answer to the task. """ - self.input_messages = [ - { - "role": MessageRole.SYSTEM, - "content": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", - } - ] - self.input_messages += self.write_inner_memory_from_logs()[1:] - self.input_messages += [ - { - "role": MessageRole.USER, - "content": f"Based on the above, please provide an answer to the following user request:\n{task}", - } - ] + if images: + self.input_messages[0]["content"] = [ + { + "type": "text", + "text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", + } + ] + self.input_messages[0]["content"].append({"type": "image"}) + self.input_messages += self.write_inner_memory_from_logs()[1:] + self.input_messages += [ + { + "role": MessageRole.USER, + "content": [ + { + "type": "text", + "text": f"Based on the above, please provide an answer to the following user request:\n{task}", + } + ], + } + ] + else: + self.input_messages[0]["content"] = [ + { + "type": "text", + "text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", + } + ] + self.input_messages += self.write_inner_memory_from_logs()[1:] + self.input_messages += [ + { + "role": MessageRole.USER, + "content": [ + { + "type": "text", + "text": f"Based on the above, please provide an answer to the following user request:\n{task}", + } + ], + } + ] try: return self.model(self.input_messages).content except Exception as e: return f"Error in generating final LLM output:\n{e}" - def execute_tool_call( - self, tool_name: str, arguments: Union[Dict[str, str], str] - ) -> Any: + def execute_tool_call(self, tool_name: str, arguments: Union[Dict[str, str], str]) -> Any: """ Execute tool with the provided input and returns the result. This method replaces arguments with the actual values from the state if they refer to state variables. @@ -413,16 +449,14 @@ def execute_tool_call( available_tools = {**self.tools, **self.managed_agents} if tool_name not in available_tools: error_msg = f"Unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}." - raise AgentExecutionError(error_msg) + raise AgentExecutionError(error_msg, self.logger) try: if isinstance(arguments, str): if tool_name in self.managed_agents: observation = available_tools[tool_name].__call__(arguments) else: - observation = available_tools[tool_name].__call__( - arguments, sanitize_inputs_outputs=True - ) + observation = available_tools[tool_name].__call__(arguments, sanitize_inputs_outputs=True) elif isinstance(arguments, dict): for key, value in arguments.items(): if isinstance(value, str) and value in self.state: @@ -430,29 +464,25 @@ def execute_tool_call( if tool_name in self.managed_agents: observation = available_tools[tool_name].__call__(**arguments) else: - observation = available_tools[tool_name].__call__( - **arguments, sanitize_inputs_outputs=True - ) + observation = available_tools[tool_name].__call__(**arguments, sanitize_inputs_outputs=True) else: error_msg = f"Arguments passed to tool should be a dict or string: got a {type(arguments)}." - raise AgentExecutionError(error_msg) + raise AgentExecutionError(error_msg, self.logger) return observation except Exception as e: if tool_name in self.tools: - tool_description = get_tool_description_with_args( - available_tools[tool_name] - ) + tool_description = get_tool_description_with_args(available_tools[tool_name]) error_msg = ( f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\n" f"As a reminder, this tool's description is the following:\n{tool_description}" ) - raise AgentExecutionError(error_msg) + raise AgentExecutionError(error_msg, self.logger) elif tool_name in self.managed_agents: error_msg = ( f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\n" f"As a reminder, this team member's description is the following:\n{available_tools[tool_name]}" ) - raise AgentExecutionError(error_msg) + raise AgentExecutionError(error_msg, self.logger) def step(self, log_entry: ActionStep) -> Union[None, Any]: """To be implemented in children classes. Should return either None if the step is not final.""" @@ -464,16 +494,18 @@ def run( stream: bool = False, reset: bool = True, single_step: bool = False, + images: Optional[List[str]] = None, additional_args: Optional[Dict] = None, ): """ - Runs the agent for the given task. + Run the agent for the given task. Args: - task (`str`): The task to perform. + task (`str`): Task to perform. stream (`bool`): Whether to run in a streaming way. reset (`bool`): Whether to reset the conversation or keep it going from previous run. single_step (`bool`): Whether to run the agent in one-shot fashion. + images (`list[str]`, *optional*): Paths to image(s). additional_args (`dict`): Any other variables that you want to pass to the agent run, for instance images or dataframes. Give them clear names! Example: @@ -483,6 +515,7 @@ def run( agent.run("What is the result of 2 power 3.7384?") ``` """ + self.task = task if additional_args is not None: self.state.update(additional_args) @@ -514,11 +547,10 @@ def run( level=LogLevel.INFO, ) - self.logs.append(TaskStep(task=self.task)) - + self.logs.append(TaskStep(task=self.task, task_images=images)) if single_step: step_start_time = time.time() - step_log = ActionStep(start_time=step_start_time) + step_log = ActionStep(start_time=step_start_time, observations_images=images) step_log.end_time = time.time() step_log.duration = step_log.end_time - step_start_time @@ -527,24 +559,30 @@ def run( return result if stream: - return self.stream_run(self.task) - else: - return self.direct_run(self.task) + # The steps are returned as they are executed through a generator to iterate on. + return self._run(task=self.task, images=images) + # Outputs are returned only at the end as a string. We only look at the last step + return deque(self._run(task=self.task, images=images), maxlen=1)[0] - def stream_run(self, task: str): + def _run(self, task: str, images: List[str] | None = None) -> Generator[str, None, None]: """ - Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method. + Run the agent in streaming mode and returns a generator of all the steps. + + Args: + task (`str`): Task to perform. + images (`list[str]`): Paths to image(s). """ final_answer = None self.step_number = 0 while final_answer is None and self.step_number < self.max_steps: step_start_time = time.time() - step_log = ActionStep(step=self.step_number, start_time=step_start_time) + step_log = ActionStep( + step_number=self.step_number, + start_time=step_start_time, + observations_images=images, + ) try: - if ( - self.planning_interval is not None - and self.step_number % self.planning_interval == 0 - ): + if self.planning_interval is not None and self.step_number % self.planning_interval == 0: self.planning_step( task, is_first_step=(self.step_number == 0), @@ -568,86 +606,41 @@ def stream_run(self, task: str): step_log.duration = step_log.end_time - step_start_time self.logs.append(step_log) for callback in self.step_callbacks: - callback(step_log) + # For compatibility with old callbacks that don't take the agent as an argument + if len(inspect.signature(callback).parameters) == 1: + callback(step_log) + else: + callback(step_log=step_log, agent=self) self.step_number += 1 yield step_log if final_answer is None and self.step_number == self.max_steps: error_message = "Reached max steps." - final_step_log = ActionStep(error=AgentMaxStepsError(error_message)) + final_step_log = ActionStep( + step_number=self.step_number, error=AgentMaxStepsError(error_message, self.logger) + ) self.logs.append(final_step_log) - final_answer = self.provide_final_answer(task) + final_answer = self.provide_final_answer(task, images) self.logger.log(Text(f"Final answer: {final_answer}"), level=LogLevel.INFO) final_step_log.action_output = final_answer final_step_log.end_time = time.time() final_step_log.duration = step_log.end_time - step_start_time for callback in self.step_callbacks: - callback(final_step_log) + # For compatibility with old callbacks that don't take the agent as an argument + if len(inspect.signature(callback).parameters) == 1: + callback(final_step_log) + else: + callback(step_log=final_step_log, agent=self) yield final_step_log yield handle_agent_output_types(final_answer) - def direct_run(self, task: str): - """ - Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method. - """ - final_answer = None - self.step_number = 0 - while final_answer is None and self.step_number < self.max_steps: - step_start_time = time.time() - step_log = ActionStep(step=self.step_number, start_time=step_start_time) - try: - if ( - self.planning_interval is not None - and self.step_number % self.planning_interval == 0 - ): - self.planning_step( - task, - is_first_step=(self.step_number == 0), - step=self.step_number, - ) - self.logger.log( - Rule( - f"[bold]Step {self.step_number}", - characters="тФБ", - style=YELLOW_HEX, - ), - level=LogLevel.INFO, - ) - - # Run one step! - final_answer = self.step(step_log) - - except AgentError as e: - step_log.error = e - finally: - step_end_time = time.time() - step_log.end_time = step_end_time - step_log.duration = step_end_time - step_start_time - self.logs.append(step_log) - for callback in self.step_callbacks: - callback(step_log) - self.step_number += 1 - - if final_answer is None and self.step_number == self.max_steps: - error_message = "Reached max steps." - final_step_log = ActionStep(error=AgentMaxStepsError(error_message)) - self.logs.append(final_step_log) - final_answer = self.provide_final_answer(task) - self.logger.log(Text(f"Final answer: {final_answer}"), level=LogLevel.INFO) - final_step_log.action_output = final_answer - final_step_log.duration = 0 - for callback in self.step_callbacks: - callback(final_step_log) - - return handle_agent_output_types(final_answer) - - def planning_step(self, task, is_first_step: bool, step: int): + def planning_step(self, task, is_first_step: bool, step: int) -> None: """ Used periodically by the agent to plan the next steps to reach the objective. Args: - task (`str`): The task to perform + task (`str`): Task to perform. is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan. step (`int`): The number of the current step, used as an indication for the LLM. """ @@ -665,9 +658,7 @@ def planning_step(self, task, is_first_step: bool, step: int): Now begin!""", } - answer_facts = self.model( - [message_prompt_facts, message_prompt_task] - ).content + answer_facts = self.model([message_prompt_facts, message_prompt_task]).content message_system_prompt_plan = { "role": MessageRole.SYSTEM, @@ -677,12 +668,8 @@ def planning_step(self, task, is_first_step: bool, step: int): "role": MessageRole.USER, "content": USER_PROMPT_PLAN.format( task=task, - tool_descriptions=get_tool_descriptions( - self.tools, self.tool_description_template - ), - managed_agents_descriptions=( - show_agents_descriptions(self.managed_agents) - ), + tool_descriptions=get_tool_descriptions(self.tools, self.tool_description_template), + managed_agents_descriptions=(show_agents_descriptions(self.managed_agents)), answer_facts=answer_facts, ), } @@ -699,9 +686,7 @@ def planning_step(self, task, is_first_step: bool, step: int): ``` {answer_facts} ```""".strip() - self.logs.append( - PlanningStep(plan=final_plan_redaction, facts=final_facts_redaction) - ) + self.logs.append(PlanningStep(plan=final_plan_redaction, facts=final_facts_redaction)) self.logger.log( Rule("[bold]Initial plan", style="orange"), Text(final_plan_redaction), @@ -721,9 +706,7 @@ def planning_step(self, task, is_first_step: bool, step: int): "role": MessageRole.USER, "content": USER_PROMPT_FACTS_UPDATE, } - facts_update = self.model( - [facts_update_system_prompt] + agent_memory + [facts_update_message] - ).content + facts_update = self.model([facts_update_system_prompt] + agent_memory + [facts_update_message]).content # Redact updated plan plan_update_message = { @@ -734,12 +717,8 @@ def planning_step(self, task, is_first_step: bool, step: int): "role": MessageRole.USER, "content": USER_PROMPT_PLAN_UPDATE.format( task=task, - tool_descriptions=get_tool_descriptions( - self.tools, self.tool_description_template - ), - managed_agents_descriptions=( - show_agents_descriptions(self.managed_agents) - ), + tool_descriptions=get_tool_descriptions(self.tools, self.tool_description_template), + managed_agents_descriptions=(show_agents_descriptions(self.managed_agents)), facts_update=facts_update, remaining_steps=(self.max_steps - step), ), @@ -750,16 +729,12 @@ def planning_step(self, task, is_first_step: bool, step: int): ).content # Log final facts and plan - final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format( - task=task, plan_update=plan_update - ) + final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update) final_facts_redaction = f"""Here is the updated list of the facts that I know: ``` {facts_update} ```""" - self.logs.append( - PlanningStep(plan=final_plan_redaction, facts=final_facts_redaction) - ) + self.logs.append(PlanningStep(plan=final_plan_redaction, facts=final_facts_redaction)) self.logger.log( Rule("[bold]Updated plan", style="orange"), Text(final_plan_redaction), @@ -770,12 +745,20 @@ def planning_step(self, task, is_first_step: bool, step: int): class ToolCallingAgent(MultiStepAgent): """ This agent uses JSON-like tool calls, using method `model.get_tool_call` to leverage the LLM engine's tool calling capabilities. + + Args: + tools (`list[Tool]`): [`Tool`]s that the agent can use. + model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions. + system_prompt (`str`, *optional*): System prompt that will be used to generate the agent's actions. + planning_interval (`int`, *optional*): Interval at which the agent will run a planning step. + **kwargs: Additional keyword arguments. + """ def __init__( self, tools: List[Tool], - model: Callable, + model: Callable[[List[Dict[str, str]]], ChatMessage], system_prompt: Optional[str] = None, planning_interval: Optional[int] = None, **kwargs, @@ -808,24 +791,20 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: tools_to_call_from=list(self.tools.values()), stop_sequences=["Observation:"], ) + if model_message.tool_calls is None or len(model_message.tool_calls) == 0: + raise Exception("Model did not call any tools. Call `final_answer` tool to return a final answer.") tool_call = model_message.tool_calls[0] tool_name, tool_call_id = tool_call.function.name, tool_call.id tool_arguments = tool_call.function.arguments except Exception as e: - raise AgentGenerationError( - f"Error in generating tool call with model:\n{e}" - ) + raise AgentGenerationError(f"Error in generating tool call with model:\n{e}", self.logger) - log_entry.tool_calls = [ - ToolCall(name=tool_name, arguments=tool_arguments, id=tool_call_id) - ] + log_entry.tool_calls = [ToolCall(name=tool_name, arguments=tool_arguments, id=tool_call_id)] # Execute self.logger.log( - Panel( - Text(f"Calling tool: '{tool_name}' with arguments: {tool_arguments}") - ), + Panel(Text(f"Calling tool: '{tool_name}' with arguments: {tool_arguments}")), level=LogLevel.INFO, ) if tool_name == "final_answer": @@ -880,12 +859,24 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: class CodeAgent(MultiStepAgent): """ In this agent, the tool calls will be formulated by the LLM in code format, then parsed and executed. + + Args: + tools (`list[Tool]`): [`Tool`]s that the agent can use. + model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions. + system_prompt (`str`, *optional*): System prompt that will be used to generate the agent's actions. + grammar (`dict[str, str]`, *optional*): Grammar used to parse the LLM output. + additional_authorized_imports (`list[str]`, *optional*): Additional authorized imports for the agent. + planning_interval (`int`, *optional*): Interval at which the agent will run a planning step. + use_e2b_executor (`bool`, default `False`): Whether to use the E2B executor for remote code execution. + max_print_outputs_length (`int`, *optional*): Maximum length of the print outputs. + **kwargs: Additional keyword arguments. + """ def __init__( self, tools: List[Tool], - model: Callable, + model: Callable[[List[Dict[str, str]]], ChatMessage], system_prompt: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, additional_authorized_imports: Optional[List[str]] = None, @@ -897,23 +888,10 @@ def __init__( if system_prompt is None: system_prompt = CODE_SYSTEM_PROMPT - self.additional_authorized_imports = ( - additional_authorized_imports if additional_authorized_imports else [] - ) - self.authorized_imports = list( - set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports) - ) + self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] + self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports)) if "{{authorized_imports}}" not in system_prompt: - raise AgentError( - "Tag '{{authorized_imports}}' should be provided in the prompt." - ) - - if "*" in self.additional_authorized_imports: - self.logger.log( - "Caution: you set an authorization for all imports, meaning your agent can decide to import any package it deems necessary. This might raise issues if the package is not installed in your environment.", - 0, - ) - + raise ValueError("Tag '{{authorized_imports}}' should be provided in the prompt.") super().__init__( tools=tools, model=model, @@ -922,6 +900,12 @@ def __init__( planning_interval=planning_interval, **kwargs, ) + if "*" in self.additional_authorized_imports: + self.logger.log( + "Caution: you set an authorization for all imports, meaning your agent can decide to import any package it deems necessary. This might raise issues if the package is not installed in your environment.", + 0, + ) + if use_e2b_executor and len(self.managed_agents) > 0: raise Exception( f"You passed both {use_e2b_executor=} and some managed agents. Managed agents is not yet supported with remote code execution." @@ -945,9 +929,11 @@ def initialize_system_prompt(self): super().initialize_system_prompt() self.system_prompt = self.system_prompt.replace( "{{authorized_imports}}", - "You can import from any package you want." - if "*" in self.authorized_imports - else str(self.authorized_imports), + ( + "You can import from any package you want." + if "*" in self.authorized_imports + else str(self.authorized_imports) + ), ) return self.system_prompt @@ -962,11 +948,8 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: # Add new step in logs log_entry.agent_memory = agent_memory.copy() - try: - additional_args = ( - {"grammar": self.grammar} if self.grammar is not None else {} - ) + additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.model( self.input_messages, stop_sequences=["", "Observation:"], @@ -974,7 +957,7 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: ).content log_entry.llm_output = llm_output except Exception as e: - raise AgentGenerationError(f"Error in generating model output:\n{e}") + raise AgentGenerationError(f"Error in generating model output:\n{e}", self.logger) from e self.logger.log( Group( @@ -997,10 +980,8 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: try: code_action = fix_final_answer_code(parse_code_blobs(llm_output)) except Exception as e: - error_msg = ( - f"Error in code parsing:\n{e}\nMake sure to provide correct code blobs." - ) - raise AgentParsingError(error_msg) + error_msg = f"Error in code parsing:\n{e}\nMake sure to provide correct code blobs." + raise AgentParsingError(error_msg, self.logger) log_entry.tool_calls = [ ToolCall( @@ -1040,21 +1021,13 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: ] observation += "Execution logs:\n" + execution_logs except Exception as e: - if isinstance(e, SyntaxError): - error_msg = ( - f"Code execution failed on line {e.lineno} due to: {type(e).__name__}\n" - f"{e.text}" - f"{' ' * (e.offset or 0)}^\n" - f"Error: {str(e)}" - ) - else: - error_msg = str(e) - if "Import of " in str(e) and " is not allowed" in str(e): + error_msg = str(e) + if "Import of " in error_msg and " is not allowed" in error_msg: self.logger.log( "[bold red]Warning to user: Code execution failed due to an unauthorized import - Consider passing said import under `additional_authorized_imports` when initializing your CodeAgent.", level=LogLevel.INFO, ) - raise AgentExecutionError(error_msg) + raise AgentExecutionError(error_msg, self.logger) truncated_output = truncate_content(str(output)) observation += "Last output from code snippet:\n" + truncated_output @@ -1072,6 +1045,19 @@ def step(self, log_entry: ActionStep) -> Union[None, Any]: class ManagedAgent: + """ + ManagedAgent class that manages an agent and provides additional prompting and run summaries. + + Args: + agent (`object`): The agent to be managed. + name (`str`): The name of the managed agent. + description (`str`): A description of the managed agent. + additional_prompting (`Optional[str]`, *optional*): Additional prompting for the managed agent. Defaults to None. + provide_run_summary (`bool`, *optional*): Whether to provide a run summary after the agent completes its task. Defaults to False. + managed_agent_prompt (`Optional[str]`, *optional*): Custom prompt for the managed agent. Defaults to None. + + """ + def __init__( self, agent, @@ -1086,28 +1072,22 @@ def __init__( self.description = description self.additional_prompting = additional_prompting self.provide_run_summary = provide_run_summary - self.managed_agent_prompt = ( - managed_agent_prompt if managed_agent_prompt else MANAGED_AGENT_PROMPT - ) + self.managed_agent_prompt = managed_agent_prompt if managed_agent_prompt else MANAGED_AGENT_PROMPT def write_full_task(self, task): """Adds additional prompting for the managed agent, like 'add more detail in your answer'.""" full_task = self.managed_agent_prompt.format(name=self.name, task=task) if self.additional_prompting: - full_task = full_task.replace( - "\n{{additional_prompting}}", self.additional_prompting - ).strip() + full_task = full_task.replace("\n{additional_prompting}", self.additional_prompting).strip() else: - full_task = full_task.replace("\n{{additional_prompting}}", "").strip() + full_task = full_task.replace("\n{additional_prompting}", "").strip() return full_task def __call__(self, request, **kwargs): full_task = self.write_full_task(request) output = self.agent.run(full_task, **kwargs) if self.provide_run_summary: - answer = ( - f"Here is the final answer from your managed agent '{self.name}':\n" - ) + answer = f"Here is the final answer from your managed agent '{self.name}':\n" answer += str(output) answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n" for message in self.agent.write_inner_memory_from_logs(summary_mode=True): diff --git a/src/smolagents/default_tools.py b/src/smolagents/default_tools.py index 14a46ae24..3f3af93e7 100644 --- a/src/smolagents/default_tools.py +++ b/src/smolagents/default_tools.py @@ -14,33 +14,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import re from dataclasses import dataclass -from typing import Dict, Optional - -from huggingface_hub import hf_hub_download, list_spaces - - -from transformers.utils import is_offline_mode, is_torch_available +from typing import Any, Dict, Optional from .local_python_executor import ( BASE_BUILTIN_MODULES, BASE_PYTHON_TOOLS, evaluate_python_code, ) -from .tools import TOOL_CONFIG_FILE, PipelineTool, Tool +from .tools import PipelineTool, Tool from .types import AgentAudio -if is_torch_available(): - from transformers.models.whisper import ( - WhisperForConditionalGeneration, - WhisperProcessor, - ) -else: - WhisperForConditionalGeneration = object - WhisperProcessor = object - @dataclass class PreTool: @@ -52,33 +37,6 @@ class PreTool: repo_id: str -def get_remote_tools(logger, organization="huggingface-tools"): - if is_offline_mode(): - logger.info("You are in offline mode, so remote tools are not available.") - return {} - - spaces = list_spaces(author=organization) - tools = {} - for space_info in spaces: - repo_id = space_info.id - resolved_config_file = hf_hub_download( - repo_id, TOOL_CONFIG_FILE, repo_type="space" - ) - with open(resolved_config_file, encoding="utf-8") as reader: - config = json.load(reader) - task = repo_id.split("/")[-1] - tools[config["name"]] = PreTool( - task=task, - description=config["description"], - repo_id=repo_id, - name=task, - inputs=config["inputs"], - output_type=config["output_type"], - ) - - return tools - - class PythonInterpreterTool(Tool): name = "python_interpreter" description = "This is a tool that evaluates python code. It can be used to perform calculations." @@ -94,9 +52,7 @@ def __init__(self, *args, authorized_imports=None, **kwargs): if authorized_imports is None: self.authorized_imports = list(set(BASE_BUILTIN_MODULES)) else: - self.authorized_imports = list( - set(BASE_BUILTIN_MODULES) | set(authorized_imports) - ) + self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(authorized_imports)) self.inputs = { "code": { "type": "string", @@ -126,21 +82,17 @@ def forward(self, code: str) -> str: class FinalAnswerTool(Tool): name = "final_answer" description = "Provides a final answer to the given problem." - inputs = { - "answer": {"type": "any", "description": "The final answer to the problem"} - } + inputs = {"answer": {"type": "any", "description": "The final answer to the problem"}} output_type = "any" - def forward(self, answer): + def forward(self, answer: Any) -> Any: return answer class UserInputTool(Tool): name = "user_input" description = "Asks for user's input on a specific question" - inputs = { - "question": {"type": "string", "description": "The question to ask the user"} - } + inputs = {"question": {"type": "string", "description": "The question to ask the user"}} output_type = "string" def forward(self, question): @@ -151,9 +103,7 @@ def forward(self, question): class DuckDuckGoSearchTool(Tool): name = "web_search" description = """Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results.""" - inputs = { - "query": {"type": "string", "description": "The search query to perform."} - } + inputs = {"query": {"type": "string", "description": "The search query to perform."}} output_type = "string" def __init__(self, *args, max_results=10, **kwargs): @@ -161,18 +111,17 @@ def __init__(self, *args, max_results=10, **kwargs): self.max_results = max_results try: from duckduckgo_search import DDGS - except ImportError: + except ImportError as e: raise ImportError( "You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`." - ) + ) from e self.ddgs = DDGS() def forward(self, query: str) -> str: results = self.ddgs.text(query, max_results=self.max_results) - postprocessed_results = [ - f"[{result['title']}]({result['href']})\n{result['body']}" - for result in results - ] + if len(results) == 0: + raise Exception("No results found! Try a less restrictive/shorter query.") + postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results] return "## Search Results\n\n" + "\n\n".join(postprocessed_results) @@ -199,9 +148,7 @@ def forward(self, query: str, filter_year: Optional[int] = None) -> str: import requests if self.serpapi_key is None: - raise ValueError( - "Missing SerpAPI key. Make sure you have 'SERPAPI_API_KEY' in your env variables." - ) + raise ValueError("Missing SerpAPI key. Make sure you have 'SERPAPI_API_KEY' in your env variables.") params = { "engine": "google", @@ -210,9 +157,7 @@ def forward(self, query: str, filter_year: Optional[int] = None) -> str: "google_domain": "google.com", } if filter_year is not None: - params["tbs"] = ( - f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}" - ) + params["tbs"] = f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}" response = requests.get("https://serpapi.com/search.json", params=params) @@ -227,13 +172,9 @@ def forward(self, query: str, filter_year: Optional[int] = None) -> str: f"'organic_results' key not found for query: '{query}' with filtering on year={filter_year}. Use a less restrictive query or do not filter on year." ) else: - raise Exception( - f"'organic_results' key not found for query: '{query}'. Use a less restrictive query." - ) + raise Exception(f"'organic_results' key not found for query: '{query}'. Use a less restrictive query.") if len(results["organic_results"]) == 0: - year_filter_message = ( - f" with filter year={filter_year}" if filter_year is not None else "" - ) + year_filter_message = f" with filter year={filter_year}" if filter_year is not None else "" return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter." web_snippets = [] @@ -253,9 +194,7 @@ def forward(self, query: str, filter_year: Optional[int] = None) -> str: redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}" - redacted_version = redacted_version.replace( - "Your browser can't play this video.", "" - ) + redacted_version = redacted_version.replace("Your browser can't play this video.", "") web_snippets.append(redacted_version) return "## Search Results\n" + "\n\n".join(web_snippets) @@ -263,7 +202,9 @@ def forward(self, query: str, filter_year: Optional[int] = None) -> str: class VisitWebpageTool(Tool): name = "visit_webpage" - description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages." + description = ( + "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages." + ) inputs = { "url": { "type": "string", @@ -277,14 +218,15 @@ def forward(self, url: str) -> str: import requests from markdownify import markdownify from requests.exceptions import RequestException + from smolagents.utils import truncate_content - except ImportError: + except ImportError as e: raise ImportError( "You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`." - ) + ) from e try: - # Send a GET request to the URL - response = requests.get(url) + # Send a GET request to the URL with a 20-second timeout + response = requests.get(url, timeout=20) response.raise_for_status() # Raise an exception for bad status codes # Convert the HTML content to Markdown @@ -295,6 +237,8 @@ def forward(self, url: str) -> str: return truncate_content(markdown_content, 10000) + except requests.exceptions.Timeout: + return "The request timed out. Please try again later or check the URL." except RequestException as e: return f"Error fetching the webpage: {str(e)}" except Exception as e: @@ -305,9 +249,6 @@ class SpeechToTextTool(PipelineTool): default_checkpoint = "openai/whisper-large-v3-turbo" description = "This is a tool that transcribes an audio into text. It returns the transcribed text." name = "transcriber" - pre_processor_class = WhisperProcessor - model_class = WhisperForConditionalGeneration - inputs = { "audio": { "type": "audio", @@ -316,6 +257,18 @@ class SpeechToTextTool(PipelineTool): } output_type = "string" + def __new__(cls): + from transformers.models.whisper import ( + WhisperForConditionalGeneration, + WhisperProcessor, + ) + + if not hasattr(cls, "pre_processor_class"): + cls.pre_processor_class = WhisperProcessor + if not hasattr(cls, "model_class"): + cls.model_class = WhisperForConditionalGeneration + return super().__new__() + def encode(self, audio): audio = AgentAudio(audio).to_raw() return self.pre_processor(audio, return_tensors="pt") diff --git a/src/smolagents/e2b_executor.py b/src/smolagents/e2b_executor.py index 8a20a9e27..404a8e26e 100644 --- a/src/smolagents/e2b_executor.py +++ b/src/smolagents/e2b_executor.py @@ -20,19 +20,30 @@ from io import BytesIO from typing import Any, List, Tuple -from dotenv import load_dotenv -from e2b_code_interpreter import Sandbox from PIL import Image from .tool_validation import validate_tool_attributes from .tools import Tool from .utils import BASE_BUILTIN_MODULES, instance_to_source -load_dotenv() + +try: + from dotenv import load_dotenv + + load_dotenv() +except ModuleNotFoundError: + pass class E2BExecutor: def __init__(self, additional_imports: List[str], tools: List[Tool], logger): + try: + from e2b_code_interpreter import Sandbox + except ModuleNotFoundError: + raise ModuleNotFoundError( + """Please install 'e2b' extra to use E2BExecutor: `pip install "smolagents[e2b]"`""" + ) + self.custom_tools = {} self.sbx = Sandbox() # "qywp2ctmu2q7jzprcf4j") # TODO: validate installing agents package or not @@ -43,11 +54,9 @@ def __init__(self, additional_imports: List[str], tools: List[Tool], logger): # ) # print("Installation of agents package finished.") self.logger = logger - additional_imports = additional_imports + ["pickle5", "smolagents"] + additional_imports = additional_imports + ["smolagents"] if len(additional_imports) > 0: - execution = self.sbx.commands.run( - "pip install " + " ".join(additional_imports) - ) + execution = self.sbx.commands.run("pip install " + " ".join(additional_imports)) if execution.error: raise Exception(f"Error installing dependencies: {execution.error}") else: @@ -61,9 +70,7 @@ def __init__(self, additional_imports: List[str], tools: List[Tool], logger): tool_code += f"\n{tool.name} = {tool.__class__.__name__}()\n" tool_codes.append(tool_code) - tool_definition_code = "\n".join( - [f"import {module}" for module in BASE_BUILTIN_MODULES] - ) + tool_definition_code = "\n".join([f"import {module}" for module in BASE_BUILTIN_MODULES]) tool_definition_code += textwrap.dedent(""" class Tool: def __call__(self, *args, **kwargs): @@ -122,9 +129,7 @@ def __call__(self, code_action: str, additional_args: dict) -> Tuple[Any, Any]: for attribute_name in ["jpeg", "png"]: if getattr(result, attribute_name) is not None: image_output = getattr(result, attribute_name) - decoded_bytes = base64.b64decode( - image_output.encode("utf-8") - ) + decoded_bytes = base64.b64decode(image_output.encode("utf-8")) return Image.open(BytesIO(decoded_bytes)), execution_logs for attribute_name in [ "chart", diff --git a/src/smolagents/gradio_ui.py b/src/smolagents/gradio_ui.py index e04056dbe..52f952b75 100644 --- a/src/smolagents/gradio_ui.py +++ b/src/smolagents/gradio_ui.py @@ -13,20 +13,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import gradio as gr -import shutil -import os import mimetypes +import os import re - +import shutil from typing import Optional from .agents import ActionStep, AgentStepLog, MultiStepAgent from .types import AgentAudio, AgentImage, AgentText, handle_agent_output_types +from .utils import _is_package_available -def pull_messages_from_step(step_log: AgentStepLog, test_mode: bool = True): +def pull_messages_from_step(step_log: AgentStepLog): """Extract ChatMessage objects from agent steps""" + import gradio as gr + if isinstance(step_log, ActionStep): yield gr.ChatMessage(role="assistant", content=step_log.llm_output or "") if step_log.tool_calls is not None: @@ -53,16 +54,18 @@ def pull_messages_from_step(step_log: AgentStepLog, test_mode: bool = True): def stream_to_gradio( agent, task: str, - test_mode: bool = False, reset_agent_memory: bool = False, additional_args: Optional[dict] = None, ): """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages.""" + if not _is_package_available("gradio"): + raise ModuleNotFoundError( + "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[audio]'`" + ) + import gradio as gr - for step_log in agent.run( - task, stream=True, reset=reset_agent_memory, additional_args=additional_args - ): - for message in pull_messages_from_step(step_log, test_mode=test_mode): + for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args): + for message in pull_messages_from_step(step_log): yield message final_answer = step_log # Last log is the run's final_answer @@ -91,6 +94,10 @@ class GradioUI: """A one-line interface to launch your agent in Gradio""" def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None): + if not _is_package_available("gradio"): + raise ModuleNotFoundError( + "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[audio]'`" + ) self.agent = agent self.file_upload_folder = file_upload_folder if self.file_upload_folder is not None: @@ -98,6 +105,8 @@ def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None) os.mkdir(file_upload_folder) def interact_with_agent(self, prompt, messages): + import gradio as gr + messages.append(gr.ChatMessage(role="user", content=prompt)) yield messages for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False): @@ -118,6 +127,7 @@ def upload_file( """ Handle file uploads, default allowed types are .pdf, .docx, and .txt """ + import gradio as gr if file is None: return gr.Textbox("No file uploaded", visible=True), file_uploads_log @@ -147,14 +157,10 @@ def upload_file( sanitized_name = "".join(sanitized_name) # Save the uploaded file to the specified folder - file_path = os.path.join( - self.file_upload_folder, os.path.basename(sanitized_name) - ) + file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name)) shutil.copy(file.name, file_path) - return gr.Textbox( - f"File uploaded: {file_path}", visible=True - ), file_uploads_log + [file_path] + return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path] def log_user_message(self, text_input, file_uploads_log): return ( @@ -168,6 +174,8 @@ def log_user_message(self, text_input, file_uploads_log): ) def launch(self): + import gradio as gr + with gr.Blocks() as demo: stored_messages = gr.State([]) file_uploads_log = gr.State([]) @@ -183,9 +191,7 @@ def launch(self): # If an upload folder is provided, enable the upload feature if self.file_upload_folder is not None: upload_file = gr.File(label="Upload a file") - upload_status = gr.Textbox( - label="Upload Status", interactive=False, visible=False - ) + upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False) upload_file.change( self.upload_file, [upload_file, file_uploads_log], diff --git a/src/smolagents/local_python_executor.py b/src/smolagents/local_python_executor.py index 0c7b5bc38..a4f046dca 100644 --- a/src/smolagents/local_python_executor.py +++ b/src/smolagents/local_python_executor.py @@ -17,6 +17,7 @@ import ast import builtins import difflib +import inspect import math import re from collections.abc import Mapping @@ -42,8 +43,7 @@ class InterpreterError(ValueError): ERRORS = { name: getattr(builtins, name) for name in dir(builtins) - if isinstance(getattr(builtins, name), type) - and issubclass(getattr(builtins, name), BaseException) + if isinstance(getattr(builtins, name), type) and issubclass(getattr(builtins, name), BaseException) } PRINT_OUTPUTS, DEFAULT_MAX_LEN_OUTPUT = "", 50000 @@ -167,9 +167,7 @@ def evaluate_unaryop( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: - operand = evaluate_ast( - expression.operand, state, static_tools, custom_tools, authorized_imports - ) + operand = evaluate_ast(expression.operand, state, static_tools, custom_tools, authorized_imports) if isinstance(expression.op, ast.USub): return -operand elif isinstance(expression.op, ast.UAdd): @@ -179,9 +177,7 @@ def evaluate_unaryop( elif isinstance(expression.op, ast.Invert): return ~operand else: - raise InterpreterError( - f"Unary operation {expression.op.__class__.__name__} is not supported." - ) + raise InterpreterError(f"Unary operation {expression.op.__class__.__name__} is not supported.") def evaluate_lambda( @@ -217,23 +213,17 @@ def evaluate_while( ) -> None: max_iterations = 1000 iterations = 0 - while evaluate_ast( - while_loop.test, state, static_tools, custom_tools, authorized_imports - ): + while evaluate_ast(while_loop.test, state, static_tools, custom_tools, authorized_imports): for node in while_loop.body: try: - evaluate_ast( - node, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) except BreakException: return None except ContinueException: break iterations += 1 if iterations > max_iterations: - raise InterpreterError( - f"Maximum number of {max_iterations} iterations in While loop exceeded" - ) + raise InterpreterError(f"Maximum number of {max_iterations} iterations in While loop exceeded") return None @@ -248,8 +238,7 @@ def new_func(*args: Any, **kwargs: Any) -> Any: func_state = state.copy() arg_names = [arg.arg for arg in func_def.args.args] default_values = [ - evaluate_ast(d, state, static_tools, custom_tools, authorized_imports) - for d in func_def.args.defaults + evaluate_ast(d, state, static_tools, custom_tools, authorized_imports) for d in func_def.args.defaults ] # Apply default values @@ -286,9 +275,7 @@ def new_func(*args: Any, **kwargs: Any) -> Any: result = None try: for stmt in func_def.body: - result = evaluate_ast( - stmt, func_state, static_tools, custom_tools, authorized_imports - ) + result = evaluate_ast(stmt, func_state, static_tools, custom_tools, authorized_imports) except ReturnException as e: result = e.value @@ -307,9 +294,7 @@ def evaluate_function_def( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Callable: - custom_tools[func_def.name] = create_function( - func_def, state, static_tools, custom_tools, authorized_imports - ) + custom_tools[func_def.name] = create_function(func_def, state, static_tools, custom_tools, authorized_imports) return custom_tools[func_def.name] @@ -321,17 +306,12 @@ def evaluate_class_def( authorized_imports: List[str], ) -> type: class_name = class_def.name - bases = [ - evaluate_ast(base, state, static_tools, custom_tools, authorized_imports) - for base in class_def.bases - ] + bases = [evaluate_ast(base, state, static_tools, custom_tools, authorized_imports) for base in class_def.bases] class_dict = {} for stmt in class_def.body: if isinstance(stmt, ast.FunctionDef): - class_dict[stmt.name] = evaluate_function_def( - stmt, state, static_tools, custom_tools, authorized_imports - ) + class_dict[stmt.name] = evaluate_function_def(stmt, state, static_tools, custom_tools, authorized_imports) elif isinstance(stmt, ast.Assign): for target in stmt.targets: if isinstance(target, ast.Name): @@ -351,9 +331,7 @@ def evaluate_class_def( authorized_imports, ) else: - raise InterpreterError( - f"Unsupported statement in class body: {stmt.__class__.__name__}" - ) + raise InterpreterError(f"Unsupported statement in class body: {stmt.__class__.__name__}") new_class = type(class_name, tuple(bases), class_dict) state[class_name] = new_class @@ -371,79 +349,65 @@ def get_current_value(target: ast.AST) -> Any: if isinstance(target, ast.Name): return state.get(target.id, 0) elif isinstance(target, ast.Subscript): - obj = evaluate_ast( - target.value, state, static_tools, custom_tools, authorized_imports - ) - key = evaluate_ast( - target.slice, state, static_tools, custom_tools, authorized_imports - ) + obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) + key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports) return obj[key] elif isinstance(target, ast.Attribute): - obj = evaluate_ast( - target.value, state, static_tools, custom_tools, authorized_imports - ) + obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) return getattr(obj, target.attr) elif isinstance(target, ast.Tuple): return tuple(get_current_value(elt) for elt in target.elts) elif isinstance(target, ast.List): return [get_current_value(elt) for elt in target.elts] else: - raise InterpreterError( - "AugAssign not supported for {type(target)} targets." - ) + raise InterpreterError("AugAssign not supported for {type(target)} targets.") current_value = get_current_value(expression.target) - value_to_add = evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + value_to_add = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) if isinstance(expression.op, ast.Add): if isinstance(current_value, list): if not isinstance(value_to_add, list): - raise InterpreterError( - f"Cannot add non-list value {value_to_add} to a list." - ) - updated_value = current_value + value_to_add + raise InterpreterError(f"Cannot add non-list value {value_to_add} to a list.") + current_value += value_to_add else: - updated_value = current_value + value_to_add + current_value += value_to_add elif isinstance(expression.op, ast.Sub): - updated_value = current_value - value_to_add + current_value -= value_to_add elif isinstance(expression.op, ast.Mult): - updated_value = current_value * value_to_add + current_value *= value_to_add elif isinstance(expression.op, ast.Div): - updated_value = current_value / value_to_add + current_value /= value_to_add elif isinstance(expression.op, ast.Mod): - updated_value = current_value % value_to_add + current_value %= value_to_add elif isinstance(expression.op, ast.Pow): - updated_value = current_value**value_to_add + current_value **= value_to_add elif isinstance(expression.op, ast.FloorDiv): - updated_value = current_value // value_to_add + current_value //= value_to_add elif isinstance(expression.op, ast.BitAnd): - updated_value = current_value & value_to_add + current_value &= value_to_add elif isinstance(expression.op, ast.BitOr): - updated_value = current_value | value_to_add + current_value |= value_to_add elif isinstance(expression.op, ast.BitXor): - updated_value = current_value ^ value_to_add + current_value ^= value_to_add elif isinstance(expression.op, ast.LShift): - updated_value = current_value << value_to_add + current_value <<= value_to_add elif isinstance(expression.op, ast.RShift): - updated_value = current_value >> value_to_add + current_value >>= value_to_add else: - raise InterpreterError( - f"Operation {type(expression.op).__name__} is not supported." - ) + raise InterpreterError(f"Operation {type(expression.op).__name__} is not supported.") - # Update the state + # Update the state: current_value has been updated in-place set_value( expression.target, - updated_value, + current_value, state, static_tools, custom_tools, authorized_imports, ) - return updated_value + return current_value def evaluate_boolop( @@ -455,16 +419,12 @@ def evaluate_boolop( ) -> bool: if isinstance(node.op, ast.And): for value in node.values: - if not evaluate_ast( - value, state, static_tools, custom_tools, authorized_imports - ): + if not evaluate_ast(value, state, static_tools, custom_tools, authorized_imports): return False return True elif isinstance(node.op, ast.Or): for value in node.values: - if evaluate_ast( - value, state, static_tools, custom_tools, authorized_imports - ): + if evaluate_ast(value, state, static_tools, custom_tools, authorized_imports): return True return False @@ -477,12 +437,8 @@ def evaluate_binop( authorized_imports: List[str], ) -> Any: # Recursively evaluate the left and right operands - left_val = evaluate_ast( - binop.left, state, static_tools, custom_tools, authorized_imports - ) - right_val = evaluate_ast( - binop.right, state, static_tools, custom_tools, authorized_imports - ) + left_val = evaluate_ast(binop.left, state, static_tools, custom_tools, authorized_imports) + right_val = evaluate_ast(binop.right, state, static_tools, custom_tools, authorized_imports) # Determine the operation based on the type of the operator in the BinOp if isinstance(binop.op, ast.Add): @@ -510,9 +466,7 @@ def evaluate_binop( elif isinstance(binop.op, ast.RShift): return left_val >> right_val else: - raise NotImplementedError( - f"Binary operation {type(binop.op).__name__} is not implemented." - ) + raise NotImplementedError(f"Binary operation {type(binop.op).__name__} is not implemented.") def evaluate_assign( @@ -522,17 +476,13 @@ def evaluate_assign( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: - result = evaluate_ast( - assign.value, state, static_tools, custom_tools, authorized_imports - ) + result = evaluate_ast(assign.value, state, static_tools, custom_tools, authorized_imports) if len(assign.targets) == 1: target = assign.targets[0] set_value(target, result, state, static_tools, custom_tools, authorized_imports) else: if len(assign.targets) != len(result): - raise InterpreterError( - f"Assign failed: expected {len(result)} values but got {len(assign.targets)}." - ) + raise InterpreterError(f"Assign failed: expected {len(result)} values but got {len(assign.targets)}.") expanded_values = [] for tgt in assign.targets: if isinstance(tgt, ast.Starred): @@ -554,9 +504,7 @@ def set_value( ) -> None: if isinstance(target, ast.Name): if target.id in static_tools: - raise InterpreterError( - f"Cannot assign to name '{target.id}': doing this would erase the existing tool!" - ) + raise InterpreterError(f"Cannot assign to name '{target.id}': doing this would erase the existing tool!") state[target.id] = value elif isinstance(target, ast.Tuple): if not isinstance(value, tuple): @@ -567,21 +515,13 @@ def set_value( if len(target.elts) != len(value): raise InterpreterError("Cannot unpack tuple of wrong size") for i, elem in enumerate(target.elts): - set_value( - elem, value[i], state, static_tools, custom_tools, authorized_imports - ) + set_value(elem, value[i], state, static_tools, custom_tools, authorized_imports) elif isinstance(target, ast.Subscript): - obj = evaluate_ast( - target.value, state, static_tools, custom_tools, authorized_imports - ) - key = evaluate_ast( - target.slice, state, static_tools, custom_tools, authorized_imports - ) + obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) + key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports) obj[key] = value elif isinstance(target, ast.Attribute): - obj = evaluate_ast( - target.value, state, static_tools, custom_tools, authorized_imports - ) + obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) setattr(obj, target.attr, value) @@ -593,15 +533,11 @@ def evaluate_call( authorized_imports: List[str], ) -> Any: if not ( - isinstance(call.func, ast.Attribute) - or isinstance(call.func, ast.Name) - or isinstance(call.func, ast.Subscript) + isinstance(call.func, ast.Attribute) or isinstance(call.func, ast.Name) or isinstance(call.func, ast.Subscript) ): raise InterpreterError(f"This is not a correct function: {call.func}).") if isinstance(call.func, ast.Attribute): - obj = evaluate_ast( - call.func.value, state, static_tools, custom_tools, authorized_imports - ) + obj = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports) func_name = call.func.attr if not hasattr(obj, func_name): raise InterpreterError(f"Object {obj} has no attribute {func_name}") @@ -619,22 +555,16 @@ def evaluate_call( func = ERRORS[func_name] else: raise InterpreterError( - f"It is not permitted to evaluate other functions than the provided tools or functions defined in previous code (tried to execute {call.func.id})." + f"It is not permitted to evaluate other functions than the provided tools or functions defined/imported in previous code (tried to execute {call.func.id})." ) elif isinstance(call.func, ast.Subscript): - value = evaluate_ast( - call.func.value, state, static_tools, custom_tools, authorized_imports - ) - index = evaluate_ast( - call.func.slice, state, static_tools, custom_tools, authorized_imports - ) + value = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports) + index = evaluate_ast(call.func.slice, state, static_tools, custom_tools, authorized_imports) if isinstance(value, (list, tuple)): func = value[index] else: - raise InterpreterError( - f"Cannot subscript object of type {type(value).__name__}" - ) + raise InterpreterError(f"Cannot subscript object of type {type(value).__name__}") if not callable(func): raise InterpreterError(f"This is not a correct function: {call.func}).") @@ -642,20 +572,12 @@ def evaluate_call( args = [] for arg in call.args: if isinstance(arg, ast.Starred): - args.extend( - evaluate_ast( - arg.value, state, static_tools, custom_tools, authorized_imports - ) - ) + args.extend(evaluate_ast(arg.value, state, static_tools, custom_tools, authorized_imports)) else: - args.append( - evaluate_ast(arg, state, static_tools, custom_tools, authorized_imports) - ) + args.append(evaluate_ast(arg, state, static_tools, custom_tools, authorized_imports)) kwargs = { - keyword.arg: evaluate_ast( - keyword.value, state, static_tools, custom_tools, authorized_imports - ) + keyword.arg: evaluate_ast(keyword.value, state, static_tools, custom_tools, authorized_imports) for keyword in call.keywords } @@ -683,6 +605,14 @@ def evaluate_call( # cap the number of lines return None else: # Assume it's a callable object + if ( + (inspect.getmodule(func) == builtins) + and inspect.isbuiltin(func) + and (func not in static_tools.values()) + ): + raise InterpreterError( + f"Invoking a builtin function that has not been explicitly added as a tool is not allowed ({func_name})." + ) return func(*args, **kwargs) @@ -693,17 +623,11 @@ def evaluate_subscript( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: - index = evaluate_ast( - subscript.slice, state, static_tools, custom_tools, authorized_imports - ) - value = evaluate_ast( - subscript.value, state, static_tools, custom_tools, authorized_imports - ) + index = evaluate_ast(subscript.slice, state, static_tools, custom_tools, authorized_imports) + value = evaluate_ast(subscript.value, state, static_tools, custom_tools, authorized_imports) if isinstance(value, str) and isinstance(index, str): - raise InterpreterError( - "You're trying to subscript a string with a string index, which is impossible" - ) + raise InterpreterError("You're trying to subscript a string with a string index, which is impossible") if isinstance(value, pd.core.indexing._LocIndexer): parent_object = value.obj return parent_object.loc[index] @@ -718,23 +642,21 @@ def evaluate_subscript( return value[index] elif isinstance(value, (list, tuple)): if not (-len(value) <= index < len(value)): - raise InterpreterError( - f"Index {index} out of bounds for list of length {len(value)}" - ) + raise InterpreterError(f"Index {index} out of bounds for list of length {len(value)}") return value[int(index)] elif isinstance(value, str): if not (-len(value) <= index < len(value)): - raise InterpreterError( - f"Index {index} out of bounds for string of length {len(value)}" - ) + raise InterpreterError(f"Index {index} out of bounds for string of length {len(value)}") return value[index] elif index in value: return value[index] - elif isinstance(index, str) and isinstance(value, Mapping): - close_matches = difflib.get_close_matches(index, list(value.keys())) - if len(close_matches) > 0: - return value[close_matches[0]] - raise InterpreterError(f"Could not index {value} with '{index}'.") + else: + error_message = f"Could not index {value} with '{index}'." + if isinstance(index, str) and isinstance(value, Mapping): + close_matches = difflib.get_close_matches(index, list(value.keys())) + if len(close_matches) > 0: + error_message += f" Maybe you meant one of these indexes instead: {str(close_matches)}" + raise InterpreterError(error_message) def evaluate_name( @@ -765,12 +687,9 @@ def evaluate_condition( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> bool: - left = evaluate_ast( - condition.left, state, static_tools, custom_tools, authorized_imports - ) + left = evaluate_ast(condition.left, state, static_tools, custom_tools, authorized_imports) comparators = [ - evaluate_ast(c, state, static_tools, custom_tools, authorized_imports) - for c in condition.comparators + evaluate_ast(c, state, static_tools, custom_tools, authorized_imports) for c in condition.comparators ] ops = [type(op) for op in condition.ops] @@ -818,21 +737,15 @@ def evaluate_if( authorized_imports: List[str], ) -> Any: result = None - test_result = evaluate_ast( - if_statement.test, state, static_tools, custom_tools, authorized_imports - ) + test_result = evaluate_ast(if_statement.test, state, static_tools, custom_tools, authorized_imports) if test_result: for line in if_statement.body: - line_result = evaluate_ast( - line, state, static_tools, custom_tools, authorized_imports - ) + line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result else: for line in if_statement.orelse: - line_result = evaluate_ast( - line, state, static_tools, custom_tools, authorized_imports - ) + line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result return result @@ -846,9 +759,7 @@ def evaluate_for( authorized_imports: List[str], ) -> Any: result = None - iterator = evaluate_ast( - for_loop.iter, state, static_tools, custom_tools, authorized_imports - ) + iterator = evaluate_ast(for_loop.iter, state, static_tools, custom_tools, authorized_imports) for counter in iterator: set_value( for_loop.target, @@ -860,9 +771,7 @@ def evaluate_for( ) for node in for_loop.body: try: - line_result = evaluate_ast( - node, state, static_tools, custom_tools, authorized_imports - ) + line_result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result except BreakException: @@ -882,9 +791,7 @@ def evaluate_listcomp( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> List[Any]: - def inner_evaluate( - generators: List[ast.comprehension], index: int, current_state: Dict[str, Any] - ) -> List[Any]: + def inner_evaluate(generators: List[ast.comprehension], index: int, current_state: Dict[str, Any]) -> List[Any]: if index >= len(generators): return [ evaluate_ast( @@ -912,9 +819,7 @@ def inner_evaluate( else: new_state[generator.target.id] = value if all( - evaluate_ast( - if_clause, new_state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports) for if_clause in generator.ifs ): result.extend(inner_evaluate(generators, index + 1, new_state)) @@ -938,32 +843,24 @@ def evaluate_try( for handler in try_node.handlers: if handler.type is None or isinstance( e, - evaluate_ast( - handler.type, state, static_tools, custom_tools, authorized_imports - ), + evaluate_ast(handler.type, state, static_tools, custom_tools, authorized_imports), ): matched = True if handler.name: state[handler.name] = e for stmt in handler.body: - evaluate_ast( - stmt, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) break if not matched: raise e else: if try_node.orelse: for stmt in try_node.orelse: - evaluate_ast( - stmt, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) finally: if try_node.finalbody: for stmt in try_node.finalbody: - evaluate_ast( - stmt, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) def evaluate_raise( @@ -974,15 +871,11 @@ def evaluate_raise( authorized_imports: List[str], ) -> None: if raise_node.exc is not None: - exc = evaluate_ast( - raise_node.exc, state, static_tools, custom_tools, authorized_imports - ) + exc = evaluate_ast(raise_node.exc, state, static_tools, custom_tools, authorized_imports) else: exc = None if raise_node.cause is not None: - cause = evaluate_ast( - raise_node.cause, state, static_tools, custom_tools, authorized_imports - ) + cause = evaluate_ast(raise_node.cause, state, static_tools, custom_tools, authorized_imports) else: cause = None if exc is not None: @@ -1001,14 +894,10 @@ def evaluate_assert( custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: - test_result = evaluate_ast( - assert_node.test, state, static_tools, custom_tools, authorized_imports - ) + test_result = evaluate_ast(assert_node.test, state, static_tools, custom_tools, authorized_imports) if not test_result: if assert_node.msg: - msg = evaluate_ast( - assert_node.msg, state, static_tools, custom_tools, authorized_imports - ) + msg = evaluate_ast(assert_node.msg, state, static_tools, custom_tools, authorized_imports) raise AssertionError(msg) else: # Include the failing condition in the assertion message @@ -1025,9 +914,7 @@ def evaluate_with( ) -> None: contexts = [] for item in with_node.items: - context_expr = evaluate_ast( - item.context_expr, state, static_tools, custom_tools, authorized_imports - ) + context_expr = evaluate_ast(item.context_expr, state, static_tools, custom_tools, authorized_imports) if item.optional_vars: state[item.optional_vars.id] = context_expr.__enter__() contexts.append(state[item.optional_vars.id]) @@ -1069,19 +956,14 @@ def get_safe_module(unsafe_module, dangerous_patterns, visited=None): # Copy all attributes by reference, recursively checking modules for attr_name in dir(unsafe_module): # Skip dangerous patterns at any level - if any( - pattern in f"{unsafe_module.__name__}.{attr_name}" - for pattern in dangerous_patterns - ): + if any(pattern in f"{unsafe_module.__name__}.{attr_name}" for pattern in dangerous_patterns): continue attr_value = getattr(unsafe_module, attr_name) # Recursively process nested modules, passing visited set if isinstance(attr_value, ModuleType): - attr_value = get_safe_module( - attr_value, dangerous_patterns, visited=visited - ) + attr_value = get_safe_module(attr_value, dangerous_patterns, visited=visited) setattr(safe_module, attr_name, attr_value) @@ -1116,18 +998,14 @@ def check_module_authorized(module_name): module_path = module_name.split(".") if any([module in dangerous_patterns for module in module_path]): return False - module_subpaths = [ - ".".join(module_path[:i]) for i in range(1, len(module_path) + 1) - ] + module_subpaths = [".".join(module_path[:i]) for i in range(1, len(module_path) + 1)] return any(subpath in authorized_imports for subpath in module_subpaths) if isinstance(expression, ast.Import): for alias in expression.names: if check_module_authorized(alias.name): raw_module = import_module(alias.name) - state[alias.asname or alias.name] = get_safe_module( - raw_module, dangerous_patterns - ) + state[alias.asname or alias.name] = get_safe_module(raw_module, dangerous_patterns) else: raise InterpreterError( f"Import of {alias.name} is not allowed. Authorized imports are: {str(authorized_imports)}" @@ -1135,13 +1013,21 @@ def check_module_authorized(module_name): return None elif isinstance(expression, ast.ImportFrom): if check_module_authorized(expression.module): - raw_module = __import__( - expression.module, fromlist=[alias.name for alias in expression.names] - ) - for alias in expression.names: - state[alias.asname or alias.name] = get_safe_module( - getattr(raw_module, alias.name), dangerous_patterns - ) + module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) + if expression.names[0].name == "*": # Handle "from module import *" + if hasattr(module, "__all__"): # If module has __all__, import only those names + for name in module.__all__: + state[name] = getattr(module, name) + else: # If no __all__, import all public names (those not starting with '_') + for name in dir(module): + if not name.startswith("_"): + state[name] = getattr(module, name) + else: # regular from imports + for alias in expression.names: + if hasattr(module, alias.name): + state[alias.asname or alias.name] = getattr(module, alias.name) + else: + raise InterpreterError(f"Module {expression.module} has no attribute {alias.name}") else: raise InterpreterError(f"Import from {expression.module} is not allowed.") return None @@ -1156,9 +1042,7 @@ def evaluate_dictcomp( ) -> Dict[Any, Any]: result = {} for gen in dictcomp.generators: - iter_value = evaluate_ast( - gen.iter, state, static_tools, custom_tools, authorized_imports - ) + iter_value = evaluate_ast(gen.iter, state, static_tools, custom_tools, authorized_imports) for value in iter_value: new_state = state.copy() set_value( @@ -1170,9 +1054,7 @@ def evaluate_dictcomp( authorized_imports, ) if all( - evaluate_ast( - if_clause, new_state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports) for if_clause in gen.ifs ): key = evaluate_ast( @@ -1204,7 +1086,7 @@ def evaluate_ast( Evaluate an abstract syntax tree using the content of the variables stored in a state and only evaluating a given set of functions. - This function will recurse trough the nodes of the tree provided. + This function will recurse through the nodes of the tree provided. Args: expression (`ast.AST`): @@ -1229,202 +1111,116 @@ def evaluate_ast( if isinstance(expression, ast.Assign): # Assignment -> we evaluate the assignment which should update the state # We return the variable assigned as it may be used to determine the final result. - return evaluate_assign( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_assign(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.AugAssign): - return evaluate_augassign( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_augassign(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Call): # Function call -> we return the value of the function call - return evaluate_call( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_call(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Constant): # Constant -> just return the value return expression.value elif isinstance(expression, ast.Tuple): return tuple( - evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) - for elt in expression.elts + evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts ) elif isinstance(expression, (ast.ListComp, ast.GeneratorExp)): - return evaluate_listcomp( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_listcomp(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.UnaryOp): - return evaluate_unaryop( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_unaryop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Starred): - return evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.BoolOp): # Boolean operation -> evaluate the operation - return evaluate_boolop( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_boolop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Break): raise BreakException() elif isinstance(expression, ast.Continue): raise ContinueException() elif isinstance(expression, ast.BinOp): # Binary operation -> execute operation - return evaluate_binop( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_binop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Compare): # Comparison -> evaluate the comparison - return evaluate_condition( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_condition(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Lambda): - return evaluate_lambda( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_lambda(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.FunctionDef): - return evaluate_function_def( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_function_def(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Dict): # Dict -> evaluate all keys and values - keys = [ - evaluate_ast(k, state, static_tools, custom_tools, authorized_imports) - for k in expression.keys - ] - values = [ - evaluate_ast(v, state, static_tools, custom_tools, authorized_imports) - for v in expression.values - ] + keys = [evaluate_ast(k, state, static_tools, custom_tools, authorized_imports) for k in expression.keys] + values = [evaluate_ast(v, state, static_tools, custom_tools, authorized_imports) for v in expression.values] return dict(zip(keys, values)) elif isinstance(expression, ast.Expr): # Expression -> evaluate the content - return evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.For): # For loop -> execute the loop - return evaluate_for( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_for(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.FormattedValue): # Formatted value (part of f-string) -> evaluate the content and return - return evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.If): # If -> execute the right branch - return evaluate_if( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_if(expression, state, static_tools, custom_tools, authorized_imports) elif hasattr(ast, "Index") and isinstance(expression, ast.Index): - return evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.JoinedStr): return "".join( - [ - str( - evaluate_ast( - v, state, static_tools, custom_tools, authorized_imports - ) - ) - for v in expression.values - ] + [str(evaluate_ast(v, state, static_tools, custom_tools, authorized_imports)) for v in expression.values] ) elif isinstance(expression, ast.List): # List -> evaluate all elements - return [ - evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) - for elt in expression.elts - ] + return [evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts] elif isinstance(expression, ast.Name): # Name -> pick up the value in the state - return evaluate_name( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_name(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Subscript): # Subscript -> return the value of the indexing - return evaluate_subscript( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_subscript(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.IfExp): - test_val = evaluate_ast( - expression.test, state, static_tools, custom_tools, authorized_imports - ) + test_val = evaluate_ast(expression.test, state, static_tools, custom_tools, authorized_imports) if test_val: - return evaluate_ast( - expression.body, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.body, state, static_tools, custom_tools, authorized_imports) else: - return evaluate_ast( - expression.orelse, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_ast(expression.orelse, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Attribute): - value = evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + value = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) return getattr(value, expression.attr) elif isinstance(expression, ast.Slice): return slice( - evaluate_ast( - expression.lower, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(expression.lower, state, static_tools, custom_tools, authorized_imports) if expression.lower is not None else None, - evaluate_ast( - expression.upper, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(expression.upper, state, static_tools, custom_tools, authorized_imports) if expression.upper is not None else None, - evaluate_ast( - expression.step, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(expression.step, state, static_tools, custom_tools, authorized_imports) if expression.step is not None else None, ) elif isinstance(expression, ast.DictComp): - return evaluate_dictcomp( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_dictcomp(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.While): - return evaluate_while( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_while(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, (ast.Import, ast.ImportFrom)): return import_modules(expression, state, authorized_imports) elif isinstance(expression, ast.ClassDef): - return evaluate_class_def( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_class_def(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Try): - return evaluate_try( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_try(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Raise): - return evaluate_raise( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_raise(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Assert): - return evaluate_assert( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_assert(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.With): - return evaluate_with( - expression, state, static_tools, custom_tools, authorized_imports - ) + return evaluate_with(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Set): - return { - evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) - for elt in expression.elts - } + return {evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts} elif isinstance(expression, ast.Return): raise ReturnException( - evaluate_ast( - expression.value, state, static_tools, custom_tools, authorized_imports - ) + evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) if expression.value else None ) @@ -1468,13 +1264,20 @@ def evaluate_python_code( updated by this function to contain all variables as they are evaluated. The print outputs will be stored in the state under the key 'print_outputs'. """ - expression = ast.parse(code) + try: + expression = ast.parse(code) + except SyntaxError as e: + raise InterpreterError( + f"Code execution failed on line {e.lineno} due to: {type(e).__name__}\n" + f"{e.text}" + f"{' ' * (e.offset or 0)}^\n" + f"Error: {str(e)}" + ) + if state is None: state = {} - if static_tools is None: - static_tools = {} - if custom_tools is None: - custom_tools = {} + static_tools = static_tools.copy() if static_tools is not None else {} + custom_tools = custom_tools if custom_tools is not None else {} result = None global PRINT_OUTPUTS PRINT_OUTPUTS = "" @@ -1488,24 +1291,21 @@ def final_answer(value): try: for node in expression.body: - result = evaluate_ast( - node, state, static_tools, custom_tools, authorized_imports - ) - state["print_outputs"] = truncate_content( - PRINT_OUTPUTS, max_length=max_print_outputs_length - ) + result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) + state["print_outputs"] = truncate_content(PRINT_OUTPUTS, max_length=max_print_outputs_length) is_final_answer = False return result, is_final_answer except FinalAnswerException as e: - state["print_outputs"] = truncate_content( - PRINT_OUTPUTS, max_length=max_print_outputs_length - ) + state["print_outputs"] = truncate_content(PRINT_OUTPUTS, max_length=max_print_outputs_length) is_final_answer = True return e.value, is_final_answer - except InterpreterError as e: - msg = truncate_content(PRINT_OUTPUTS, max_length=max_print_outputs_length) - msg += f"Code execution failed at line '{ast.get_source_segment(code, node)}' because of the following error:\n{e}" - raise InterpreterError(msg) + except Exception as e: + exception_type = type(e).__name__ + error_msg = truncate_content(PRINT_OUTPUTS, max_length=max_print_outputs_length) + error_msg = ( + f"Code execution failed at line '{ast.get_source_segment(code, node)}' due to: {exception_type}:{str(e)}" + ) + raise InterpreterError(error_msg) class LocalPythonInterpreter: @@ -1521,9 +1321,7 @@ def __init__( if max_print_outputs_length is None: self.max_print_outputs_length = DEFAULT_MAX_LEN_OUTPUT self.additional_authorized_imports = additional_authorized_imports - self.authorized_imports = list( - set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports) - ) + self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports)) # Add base trusted tools to list self.static_tools = { **tools, @@ -1531,9 +1329,7 @@ def __init__( } # TODO: assert self.authorized imports are all installed locally - def __call__( - self, code_action: str, additional_variables: Dict - ) -> Tuple[Any, str, bool]: + def __call__(self, code_action: str, additional_variables: Dict) -> Tuple[Any, str, bool]: self.state.update(additional_variables) output, is_final_answer = evaluate_python_code( code_action, diff --git a/src/smolagents/models.py b/src/smolagents/models.py index ca234f2a3..eb613dffc 100644 --- a/src/smolagents/models.py +++ b/src/smolagents/models.py @@ -14,26 +14,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from dataclasses import dataclass, asdict import json import logging import os import random from copy import deepcopy +from dataclasses import asdict, dataclass from enum import Enum -from typing import Dict, List, Optional, Union, Any +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from huggingface_hub import InferenceClient - +from PIL import Image from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - StoppingCriteria, + AutoModelForImageTextToText, + AutoProcessor, StoppingCriteriaList, is_torch_available, ) from .tools import Tool +from .utils import _is_package_available, encode_image_base64, make_image_url + + +if TYPE_CHECKING: + from transformers import StoppingCriteriaList logger = logging.getLogger(__name__) @@ -100,11 +104,36 @@ def model_dump_json(self): def from_hf_api(cls, message) -> "ChatMessage": tool_calls = None if getattr(message, "tool_calls", None) is not None: + tool_calls = [ChatMessageToolCall.from_hf_api(tool_call) for tool_call in message.tool_calls] + return cls(role=message.role, content=message.content, tool_calls=tool_calls) + + @classmethod + def from_dict(cls, data: dict) -> "ChatMessage": + if data.get("tool_calls"): tool_calls = [ - ChatMessageToolCall.from_hf_api(tool_call) - for tool_call in message.tool_calls + ChatMessageToolCall( + function=ChatMessageToolCallDefinition(**tc["function"]), id=tc["id"], type=tc["type"] + ) + for tc in data["tool_calls"] ] - return cls(role=message.role, content=message.content, tool_calls=tool_calls) + data["tool_calls"] = tool_calls + return cls(**data) + + +def parse_json_if_needed(arguments: Union[str, dict]) -> Union[str, dict]: + if isinstance(arguments, dict): + return arguments + else: + try: + return json.loads(arguments) + except Exception: + return arguments + + +def parse_tool_args_if_needed(message: ChatMessage) -> ChatMessage: + for tool_call in message.tool_calls: + tool_call.function.arguments = parse_json_if_needed(tool_call.function.arguments) + return message class MessageRole(str, Enum): @@ -125,7 +154,7 @@ def roles(cls): } -def get_json_schema(tool: Tool) -> Dict: +def get_tool_json_schema(tool: Tool) -> Dict: properties = deepcopy(tool.inputs) required = [] for key, value in properties.items(): @@ -157,53 +186,116 @@ def remove_stop_sequences(content: str, stop_sequences: List[str]) -> str: def get_clean_message_list( message_list: List[Dict[str, str]], role_conversions: Dict[MessageRole, MessageRole] = {}, + convert_images_to_image_urls: bool = False, + flatten_messages_as_text: bool = False, ) -> List[Dict[str, str]]: """ Subsequent messages with the same role will be concatenated to a single message. + output_message_list is a list of messages that will be used to generate the final message that is chat template compatible with transformers LLM chat template. Args: - message_list (`List[Dict[str, str]]`): List of chat messages. + message_list (`list[dict[str, str]]`): List of chat messages. + role_conversions (`dict[MessageRole, MessageRole]`, *optional* ): Mapping to convert roles. + convert_images_to_image_urls (`bool`, default `False`): Whether to convert images to image URLs. + flatten_messages_as_text (`bool`, default `False`): Whether to flatten messages as text. """ - final_message_list = [] + output_message_list = [] message_list = deepcopy(message_list) # Avoid modifying the original list for message in message_list: - # if not set(message.keys()) == {"role", "content"}: - # raise ValueError("Message should contain only 'role' and 'content' keys!") - role = message["role"] if role not in MessageRole.roles(): - raise ValueError( - f"Incorrect role {role}, only {MessageRole.roles()} are supported for now." - ) + raise ValueError(f"Incorrect role {role}, only {MessageRole.roles()} are supported for now.") if role in role_conversions: message["role"] = role_conversions[role] - - if ( - len(final_message_list) > 0 - and message["role"] == final_message_list[-1]["role"] - ): - final_message_list[-1]["content"] += "\n=======\n" + message["content"] + # encode images if needed + if isinstance(message["content"], list): + for i, element in enumerate(message["content"]): + if element["type"] == "image": + assert not flatten_messages_as_text, f"Cannot use images with {flatten_messages_as_text=}" + if convert_images_to_image_urls: + message["content"][i] = { + "type": "image_url", + "image_url": {"url": make_image_url(encode_image_base64(element["image"]))}, + } + else: + message["content"][i]["image"] = encode_image_base64(element["image"]) + + if len(output_message_list) > 0 and message["role"] == output_message_list[-1]["role"]: + assert isinstance(message["content"], list), "Error: wrong content:" + str(message["content"]) + if flatten_messages_as_text: + output_message_list[-1]["content"] += message["content"][0]["text"] + else: + output_message_list[-1]["content"] += message["content"] else: - final_message_list.append(message) - return final_message_list - - -def parse_dictionary(possible_dictionary: str) -> Union[Dict, str]: - try: - start, end = ( - possible_dictionary.find("{"), - possible_dictionary.rfind("}") + 1, - ) - return json.loads(possible_dictionary[start:end]) - except Exception: - return possible_dictionary + if flatten_messages_as_text: + content = message["content"][0]["text"] + else: + content = message["content"] + output_message_list.append({"role": message["role"], "content": content}) + return output_message_list class Model: - def __init__(self): + def __init__(self, **kwargs): self.last_input_token_count = None self.last_output_token_count = None + # Set default values for common parameters + kwargs.setdefault("max_tokens", 4096) + self.kwargs = kwargs + + def _prepare_completion_kwargs( + self, + messages: List[Dict[str, str]], + stop_sequences: Optional[List[str]] = None, + grammar: Optional[str] = None, + tools_to_call_from: Optional[List[Tool]] = None, + custom_role_conversions: Optional[Dict[str, str]] = None, + convert_images_to_image_urls: bool = False, + flatten_messages_as_text: bool = False, + **kwargs, + ) -> Dict: + """ + Prepare parameters required for model invocation, handling parameter priorities. + + Parameter priority from high to low: + 1. Explicitly passed kwargs + 2. Specific parameters (stop_sequences, grammar, etc.) + 3. Default values in self.kwargs + """ + # Clean and standardize the message list + messages = get_clean_message_list( + messages, + role_conversions=custom_role_conversions or tool_role_conversions, + convert_images_to_image_urls=convert_images_to_image_urls, + flatten_messages_as_text=flatten_messages_as_text, + ) + + # Use self.kwargs as the base configuration + completion_kwargs = { + **self.kwargs, + "messages": messages, + } + + # Handle specific parameters + if stop_sequences is not None: + completion_kwargs["stop"] = stop_sequences + if grammar is not None: + completion_kwargs["grammar"] = grammar + + # Handle tools parameter + if tools_to_call_from: + completion_kwargs.update( + { + "tools": [get_tool_json_schema(tool) for tool in tools_to_call_from], + "tool_choice": "required", + } + ) + + # Finally, use the passed-in kwargs to override all settings + completion_kwargs.update(kwargs) + + return completion_kwargs def get_token_counts(self) -> Dict[str, int]: return { @@ -216,6 +308,8 @@ def __call__( messages: List[Dict[str, str]], stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, + tools_to_call_from: Optional[List[Tool]] = None, + **kwargs, ) -> ChatMessage: """Process the input messages and return the model's response. @@ -226,8 +320,13 @@ def __call__( A list of strings that will stop the generation if encountered in the model's output. grammar (`str`, *optional*): The grammar or formatting structure to use in the model's response. + tools_to_call_from (`List[Tool]`, *optional*): + A list of tools that the model can use to generate responses. + **kwargs: + Additional keyword arguments to be passed to the underlying model. + Returns: - `str`: The text content of the model's response. + `ChatMessage`: A chat message object containing the model's response. """ pass # To be implemented in child classes! @@ -246,6 +345,8 @@ class HfApiModel(Model): If not provided, the class will try to use environment variable 'HF_TOKEN', else use the token stored in the Hugging Face CLI configuration. timeout (`int`, *optional*, defaults to 120): Timeout for the API request, in seconds. + **kwargs: + Additional keyword arguments to pass to the Hugging Face API. Raises: ValueError: @@ -270,16 +371,13 @@ def __init__( model_id: str = "Qwen/Qwen2.5-Coder-32B-Instruct", token: Optional[str] = None, timeout: Optional[int] = 120, - temperature: float = 0.5, **kwargs, ): - super().__init__() + super().__init__(**kwargs) self.model_id = model_id if token is None: token = os.getenv("HF_TOKEN") self.client = InferenceClient(self.model_id, token=token, timeout=timeout) - self.temperature = temperature - self.kwargs = kwargs def __call__( self, @@ -287,34 +385,25 @@ def __call__( stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, + **kwargs, ) -> ChatMessage: - """ - Gets an LLM output message for the given list of input messages. - If argument `tools_to_call_from` is passed, the model's tool calling options will be used to return a tool call. - """ - messages = get_clean_message_list( - messages, role_conversions=tool_role_conversions + completion_kwargs = self._prepare_completion_kwargs( + messages=messages, + stop_sequences=stop_sequences, + grammar=grammar, + tools_to_call_from=tools_to_call_from, + convert_images_to_image_urls=True, + **kwargs, ) - if tools_to_call_from: - response = self.client.chat.completions.create( - messages=messages, - tools=[get_json_schema(tool) for tool in tools_to_call_from], - tool_choice="auto", - stop=stop_sequences, - temperature=self.temperature, - **self.kwargs, - ) - else: - response = self.client.chat.completions.create( - model=self.model_id, - messages=messages, - stop=stop_sequences, - temperature=self.temperature, - **self.kwargs, - ) + + response = self.client.chat_completion(**completion_kwargs) + self.last_input_token_count = response.usage.prompt_tokens self.last_output_token_count = response.usage.completion_tokens - return ChatMessage.from_hf_api(response.choices[0].message) + message = ChatMessage.from_hf_api(response.choices[0].message) + if tools_to_call_from is not None: + return parse_tool_args_if_needed(message) + return message class TransformersModel(Model): @@ -322,6 +411,9 @@ class TransformersModel(Model): This model allows you to communicate with Hugging Face's models using the Inference API. It can be used in both serverless mode or with a dedicated endpoint, supporting features like stop sequences and grammar customization. + > [!TIP] + > You must have `transformers` and `torch` installed on your machine. Please run `pip install smolagents[transformers]` if it's not the case. + Parameters: model_id (`str`, *optional*, defaults to `"Qwen/Qwen2.5-Coder-32B-Instruct"`): The Hugging Face model ID to be used for inference. This can be a path or model identifier from the Hugging Face model hub. @@ -329,10 +421,15 @@ class TransformersModel(Model): The device_map to initialize your model with. torch_dtype (`str`, *optional*): The torch_dtype to initialize your model with. - trust_remote_code (bool): + trust_remote_code (bool, default `False`): Some models on the Hub require running remote code: for this model, you would have to set this flag to True. + flatten_messages_as_text (`bool`, default `True`): + Whether to flatten messages as text: this must be sent to False to use VLMs (as opposed to LLMs for which this flag can be ignored). + Caution: this parameter is experimental and will be removed in an upcoming PR as we auto-detect VLMs. kwargs (dict, *optional*): Any additional keyword arguments that you want to use in model.generate(), for instance `max_new_tokens` or `device`. + **kwargs: + Additional keyword arguments to pass to `model.generate()`, for instance `max_new_tokens` or `device`. Raises: ValueError: If the model name is not provided. @@ -357,43 +454,52 @@ def __init__( device_map: Optional[str] = None, torch_dtype: Optional[str] = None, trust_remote_code: bool = False, + flatten_messages_as_text: bool = True, **kwargs, ): - super().__init__() - if not is_torch_available(): - raise ImportError("Please install torch in order to use TransformersModel.") + super().__init__(**kwargs) + if not is_torch_available() or not _is_package_available("transformers"): + raise ModuleNotFoundError( + "Please install 'transformers' extra to use 'TransformersModel': `pip install 'smolagents[transformers]'`" + ) import torch + from transformers import AutoModelForCausalLM, AutoTokenizer default_model_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct" if model_id is None: model_id = default_model_id - logger.warning( - f"`model_id`not provided, using this default tokenizer for token counts: '{model_id}'" - ) + logger.warning(f"`model_id`not provided, using this default tokenizer for token counts: '{model_id}'") self.model_id = model_id self.kwargs = kwargs if device_map is None: device_map = "cuda" if torch.cuda.is_available() else "cpu" logger.info(f"Using device: {device_map}") try: - self.tokenizer = AutoTokenizer.from_pretrained(model_id) self.model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, ) + self.tokenizer = AutoTokenizer.from_pretrained(model_id) + except ValueError as e: + if "Unrecognized configuration class" in str(e): + self.model = AutoModelForImageTextToText.from_pretrained(model_id, device_map=device_map) + self.processor = AutoProcessor.from_pretrained(model_id) + else: + raise e except Exception as e: logger.warning( f"Failed to load tokenizer and model for {model_id=}: {e}. Loading default tokenizer and model instead from {default_model_id=}." ) self.model_id = default_model_id self.tokenizer = AutoTokenizer.from_pretrained(default_model_id) - self.model = AutoModelForCausalLM.from_pretrained( - model_id, device_map=device_map, torch_dtype=torch_dtype - ) + self.model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device_map, torch_dtype=torch_dtype) + self.flatten_messages_as_text = flatten_messages_as_text + + def make_stopping_criteria(self, stop_sequences: List[str], tokenizer) -> "StoppingCriteriaList": + from transformers import StoppingCriteria, StoppingCriteriaList - def make_stopping_criteria(self, stop_sequences: List[str]) -> StoppingCriteriaList: class StopOnStrings(StoppingCriteria): def __init__(self, stop_strings: List[str], tokenizer): self.stop_strings = stop_strings @@ -404,20 +510,13 @@ def reset(self): self.stream = "" def __call__(self, input_ids, scores, **kwargs): - generated = self.tokenizer.decode( - input_ids[0][-1], skip_special_tokens=True - ) + generated = self.tokenizer.decode(input_ids[0][-1], skip_special_tokens=True) self.stream += generated - if any( - [ - self.stream.endswith(stop_string) - for stop_string in self.stop_strings - ] - ): + if any([self.stream.endswith(stop_string) for stop_string in self.stop_strings]): return True return False - return StoppingCriteriaList([StopOnStrings(stop_sequences, self.tokenizer)]) + return StoppingCriteriaList([StopOnStrings(stop_sequences, tokenizer)]) def __call__( self, @@ -425,41 +524,77 @@ def __call__( stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, + images: Optional[List[Image.Image]] = None, + **kwargs, ) -> ChatMessage: - messages = get_clean_message_list( - messages, role_conversions=tool_role_conversions + completion_kwargs = self._prepare_completion_kwargs( + messages=messages, + stop_sequences=stop_sequences, + grammar=grammar, + tools_to_call_from=tools_to_call_from, + flatten_messages_as_text=self.flatten_messages_as_text, + **kwargs, ) - if tools_to_call_from is not None: - prompt_tensor = self.tokenizer.apply_chat_template( + + messages = completion_kwargs.pop("messages") + stop_sequences = completion_kwargs.pop("stop", None) + + max_new_tokens = ( + kwargs.get("max_new_tokens") + or kwargs.get("max_tokens") + or self.kwargs.get("max_new_tokens") + or self.kwargs.get("max_tokens") + ) + + if max_new_tokens: + completion_kwargs["max_new_tokens"] = max_new_tokens + + if hasattr(self, "processor"): + images = [Image.open(image) for image in images] if images else None + prompt_tensor = self.processor.apply_chat_template( messages, - tools=[get_json_schema(tool) for tool in tools_to_call_from], + tools=[get_tool_json_schema(tool) for tool in tools_to_call_from] if tools_to_call_from else None, return_tensors="pt", + tokenize=True, return_dict=True, - add_generation_prompt=True, + images=images, + add_generation_prompt=True if tools_to_call_from else False, ) else: prompt_tensor = self.tokenizer.apply_chat_template( messages, + tools=[get_tool_json_schema(tool) for tool in tools_to_call_from] if tools_to_call_from else None, return_tensors="pt", return_dict=True, + add_generation_prompt=True if tools_to_call_from else False, ) + prompt_tensor = prompt_tensor.to(self.model.device) count_prompt_tokens = prompt_tensor["input_ids"].shape[1] + if stop_sequences: + stopping_criteria = self.make_stopping_criteria( + stop_sequences, tokenizer=self.processor if hasattr(self, "processor") else self.tokenizer + ) + else: + stopping_criteria = None + out = self.model.generate( **prompt_tensor, - stopping_criteria=( - self.make_stopping_criteria(stop_sequences) if stop_sequences else None - ), - **self.kwargs, + stopping_criteria=stopping_criteria, + **completion_kwargs, ) generated_tokens = out[0, count_prompt_tokens:] - output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True) + if hasattr(self, "processor"): + output = self.processor.decode(generated_tokens, skip_special_tokens=True) + else: + output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True) self.last_input_token_count = count_prompt_tokens self.last_output_token_count = len(generated_tokens) if stop_sequences is not None: output = remove_stop_sequences(output, stop_sequences) + if tools_to_call_from is None: return ChatMessage(role="assistant", content=output) else: @@ -475,9 +610,7 @@ def __call__( ChatMessageToolCall( id="".join(random.choices("0123456789", k=5)), type="function", - function=ChatMessageToolCallDefinition( - name=tool_name, arguments=tool_arguments - ), + function=ChatMessageToolCallDefinition(name=tool_name, arguments=tool_arguments), ) ], ) @@ -489,9 +622,9 @@ class LiteLLMModel(Model): Parameters: model_id (`str`): The model identifier to use on the server (e.g. "gpt-3.5-turbo"). - api_base (`str`): + api_base (`str`, *optional*): The base URL of the OpenAI-compatible API server. - api_key (`str`): + api_key (`str`, *optional*): The API key to use for authentication. **kwargs: Additional keyword arguments to pass to the OpenAI API. @@ -510,13 +643,13 @@ def __init__( raise ModuleNotFoundError( "Please install 'litellm' extra to use LiteLLMModel: `pip install 'smolagents[litellm]'`" ) - super().__init__() + + super().__init__(**kwargs) self.model_id = model_id # IMPORTANT - Set this to TRUE to add the function to the prompt for Non OpenAI LLMs litellm.add_function_to_prompt = True self.api_base = api_base self.api_key = api_key - self.kwargs = kwargs def __call__( self, @@ -524,35 +657,34 @@ def __call__( stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, + **kwargs, ) -> ChatMessage: - messages = get_clean_message_list( - messages, role_conversions=tool_role_conversions - ) import litellm - if tools_to_call_from: - response = litellm.completion( - model=self.model_id, - messages=messages, - tools=[get_json_schema(tool) for tool in tools_to_call_from], - tool_choice="required", - stop=stop_sequences, - api_base=self.api_base, - api_key=self.api_key, - **self.kwargs, - ) - else: - response = litellm.completion( - model=self.model_id, - messages=messages, - stop=stop_sequences, - api_base=self.api_base, - api_key=self.api_key, - **self.kwargs, - ) + completion_kwargs = self._prepare_completion_kwargs( + messages=messages, + stop_sequences=stop_sequences, + grammar=grammar, + tools_to_call_from=tools_to_call_from, + model=self.model_id, + api_base=self.api_base, + api_key=self.api_key, + convert_images_to_image_urls=True, + **kwargs, + ) + + response = litellm.completion(**completion_kwargs) + self.last_input_token_count = response.usage.prompt_tokens self.last_output_token_count = response.usage.completion_tokens - return response.choices[0].message + + message = ChatMessage.from_dict( + response.choices[0].message.model_dump(include={"role", "content", "tool_calls"}) + ) + + if tools_to_call_from is not None: + return parse_tool_args_if_needed(message) + return message class OpenAIServerModel(Model): @@ -565,7 +697,7 @@ class OpenAIServerModel(Model): The base URL of the OpenAI-compatible API server. api_key (`str`, *optional*): The API key to use for authentication. - custom_role_conversions (`Dict{str, str]`, *optional*): + custom_role_conversions (`dict[str, str]`, *optional*): Custom role conversion mapping to convert message roles in others. Useful for specific models that do not support specific message roles like "system". **kwargs: @@ -586,13 +718,13 @@ def __init__( raise ModuleNotFoundError( "Please install 'openai' extra to use OpenAIServerModel: `pip install 'smolagents[openai]'`" ) from None - super().__init__() + + super().__init__(**kwargs) self.model_id = model_id self.client = openai.OpenAI( base_url=api_base, api_key=api_key, ) - self.kwargs = kwargs self.custom_role_conversions = custom_role_conversions def __call__( @@ -601,34 +733,68 @@ def __call__( stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, tools_to_call_from: Optional[List[Tool]] = None, + **kwargs, ) -> ChatMessage: - messages = get_clean_message_list( - messages, - role_conversions=( - self.custom_role_conversions - if self.custom_role_conversions - else tool_role_conversions - ), + completion_kwargs = self._prepare_completion_kwargs( + messages=messages, + stop_sequences=stop_sequences, + grammar=grammar, + tools_to_call_from=tools_to_call_from, + model=self.model_id, + custom_role_conversions=self.custom_role_conversions, + convert_images_to_image_urls=True, + **kwargs, ) - if tools_to_call_from: - response = self.client.chat.completions.create( - model=self.model_id, - messages=messages, - tools=[get_json_schema(tool) for tool in tools_to_call_from], - tool_choice="auto", - stop=stop_sequences, - **self.kwargs, - ) - else: - response = self.client.chat.completions.create( - model=self.model_id, - messages=messages, - stop=stop_sequences, - **self.kwargs, - ) + + response = self.client.chat.completions.create(**completion_kwargs) self.last_input_token_count = response.usage.prompt_tokens self.last_output_token_count = response.usage.completion_tokens - return response.choices[0].message + + message = ChatMessage.from_dict( + response.choices[0].message.model_dump(include={"role", "content", "tool_calls"}) + ) + if tools_to_call_from is not None: + return parse_tool_args_if_needed(message) + return message + + +class AzureOpenAIServerModel(OpenAIServerModel): + """This model connects to an Azure OpenAI deployment. + + Parameters: + model_id (`str`): + The model deployment name to use when connecting (e.g. "gpt-4o-mini"). + azure_endpoint (`str`, *optional*): + The Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`. If not provided, it will be inferred from the `AZURE_OPENAI_ENDPOINT` environment variable. + api_key (`str`, *optional*): + The API key to use for authentication. If not provided, it will be inferred from the `AZURE_OPENAI_API_KEY` environment variable. + api_version (`str`, *optional*): + The API version to use. If not provided, it will be inferred from the `OPENAI_API_VERSION` environment variable. + custom_role_conversions (`dict[str, str]`, *optional*): + Custom role conversion mapping to convert message roles in others. + Useful for specific models that do not support specific message roles like "system". + **kwargs: + Additional keyword arguments to pass to the Azure OpenAI API. + """ + + def __init__( + self, + model_id: str, + azure_endpoint: Optional[str] = None, + api_key: Optional[str] = None, + api_version: Optional[str] = None, + custom_role_conversions: Optional[Dict[str, str]] = None, + **kwargs, + ): + # read the api key manually, to avoid super().__init__() trying to use the wrong api_key (OPENAI_API_KEY) + if api_key is None: + api_key = os.environ.get("AZURE_OPENAI_API_KEY") + + super().__init__(model_id=model_id, api_key=api_key, custom_role_conversions=custom_role_conversions, **kwargs) + # if we've reached this point, it means the openai package is available (checked in baseclass) so go ahead and import it + import openai + + self.client = openai.AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=azure_endpoint) __all__ = [ @@ -640,5 +806,6 @@ def __call__( "HfApiModel", "LiteLLMModel", "OpenAIServerModel", + "AzureOpenAIServerModel", "ChatMessage", ] diff --git a/src/smolagents/monitoring.py b/src/smolagents/monitoring.py index 13de79646..59f43f443 100644 --- a/src/smolagents/monitoring.py +++ b/src/smolagents/monitoring.py @@ -22,10 +22,7 @@ def __init__(self, tracked_model, logger): self.step_durations = [] self.tracked_model = tracked_model self.logger = logger - if ( - getattr(self.tracked_model, "last_input_token_count", "Not found") - != "Not found" - ): + if getattr(self.tracked_model, "last_input_token_count", "Not found") != "Not found": self.total_input_token_count = 0 self.total_output_token_count = 0 @@ -41,6 +38,11 @@ def reset(self): self.total_output_token_count = 0 def update_metrics(self, step_log): + """Update the metrics of the monitor. + + Args: + step_log ([`AgentStepLog`]): Step log to update the monitor with. + """ step_duration = step_log.duration self.step_durations.append(step_duration) console_outputs = f"[Step {len(self.step_durations) - 1}: Duration {step_duration:.2f} seconds" @@ -48,7 +50,9 @@ def update_metrics(self, step_log): if getattr(self.tracked_model, "last_input_token_count", None) is not None: self.total_input_token_count += self.tracked_model.last_input_token_count self.total_output_token_count += self.tracked_model.last_output_token_count - console_outputs += f"| Input tokens: {self.total_input_token_count:,} | Output tokens: {self.total_output_token_count:,}" + console_outputs += ( + f"| Input tokens: {self.total_input_token_count:,} | Output tokens: {self.total_output_token_count:,}" + ) console_outputs += "]" self.logger.log(Text(console_outputs, style="dim"), level=1) diff --git a/src/smolagents/prompts.py b/src/smolagents/prompts.py index af68b2785..ce905c81b 100644 --- a/src/smolagents/prompts.py +++ b/src/smolagents/prompts.py @@ -125,15 +125,15 @@ Action: { - "tool_name": "image_transformer", - "tool_arguments": {"image": "image_1.jpg"} + "name": "image_transformer", + "arguments": {"image": "image_1.jpg"} } -To provide the final answer to the task, use an action blob with "tool_name": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this: +To provide the final answer to the task, use an action blob with "name": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this: Action: { - "tool_name": "final_answer", - "tool_arguments": {"answer": "insert your final answer here"} + "name": "final_answer", + "arguments": {"answer": "insert your final answer here"} } @@ -143,22 +143,22 @@ Action: { - "tool_name": "document_qa", - "tool_arguments": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"} + "name": "document_qa", + "arguments": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"} } Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Action: { - "tool_name": "image_generator", - "tool_arguments": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."} + "name": "image_generator", + "arguments": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."} } Observation: "image.png" Action: { - "tool_name": "final_answer", - "tool_arguments": "image.png" + "name": "final_answer", + "arguments": "image.png" } --- @@ -166,15 +166,15 @@ Action: { - "tool_name": "python_interpreter", - "tool_arguments": {"code": "5 + 3 + 1294.678"} + "name": "python_interpreter", + "arguments": {"code": "5 + 3 + 1294.678"} } Observation: 1302.678 Action: { - "tool_name": "final_answer", - "tool_arguments": "1302.678" + "name": "final_answer", + "arguments": "1302.678" } --- @@ -182,23 +182,23 @@ Action: { - "tool_name": "search", - "tool_arguments": "Population Guangzhou" + "name": "search", + "arguments": "Population Guangzhou" } Observation: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Action: { - "tool_name": "search", - "tool_arguments": "Population Shanghai" + "name": "search", + "arguments": "Population Shanghai" } Observation: '26 million (2019)' Action: { - "tool_name": "final_answer", - "tool_arguments": "Shanghai" + "name": "final_answer", + "arguments": "Shanghai" } diff --git a/src/smolagents/tool_validation.py b/src/smolagents/tool_validation.py index 821c315a2..9ac157c4e 100644 --- a/src/smolagents/tool_validation.py +++ b/src/smolagents/tool_validation.py @@ -1,10 +1,10 @@ import ast import builtins import inspect -import textwrap from typing import Set -from .utils import BASE_BUILTIN_MODULES +from .utils import BASE_BUILTIN_MODULES, get_source + _BUILTIN_NAMES = set(vars(builtins)) @@ -131,7 +131,7 @@ def validate_tool_attributes(cls, check_imports: bool = True) -> None: """ errors = [] - source = textwrap.dedent(inspect.getsource(cls)) + source = get_source(cls) tree = ast.parse(source) @@ -141,9 +141,7 @@ def validate_tool_attributes(cls, check_imports: bool = True) -> None: # Check that __init__ method takes no arguments if not cls.__init__.__qualname__ == "Tool.__init__": sig = inspect.signature(cls.__init__) - non_self_params = list( - [arg_name for arg_name in sig.parameters.keys() if arg_name != "self"] - ) + non_self_params = list([arg_name for arg_name in sig.parameters.keys() if arg_name != "self"]) if len(non_self_params) > 0: errors.append( f"This tool has additional args specified in __init__(self): {non_self_params}. Make sure it does not, all values should be hardcoded!" @@ -174,9 +172,7 @@ def visit_Assign(self, node): # Check if the assignment is more complex than simple literals if not all( - isinstance( - val, (ast.Str, ast.Num, ast.Constant, ast.Dict, ast.List, ast.Set) - ) + isinstance(val, (ast.Str, ast.Num, ast.Constant, ast.Dict, ast.List, ast.Set)) for val in ast.walk(node.value) ): for target in node.targets: @@ -195,9 +191,7 @@ def visit_Assign(self, node): # Run checks on all methods for node in class_node.body: if isinstance(node, ast.FunctionDef): - method_checker = MethodChecker( - class_level_checker.class_attributes, check_imports=check_imports - ) + method_checker = MethodChecker(class_level_checker.class_attributes, check_imports=check_imports) method_checker.visit(node) errors += [f"- {node.name}: {error}" for error in method_checker.errors] diff --git a/src/smolagents/tools.py b/src/smolagents/tools.py index fc85979ee..10b22ea03 100644 --- a/src/smolagents/tools.py +++ b/src/smolagents/tools.py @@ -26,7 +26,7 @@ from contextlib import contextmanager from functools import lru_cache, wraps from pathlib import Path -from typing import Callable, Dict, List, Optional, Union, get_type_hints +from typing import Callable, Dict, List, Optional, Union from huggingface_hub import ( create_repo, @@ -35,55 +35,21 @@ metadata_update, upload_folder, ) -from huggingface_hub.utils import RepositoryNotFoundError - +from huggingface_hub.utils import is_torch_available from packaging import version -from transformers.dynamic_module_utils import get_imports -from transformers.utils import ( + +from ._function_type_hints_utils import ( TypeHintParsingException, - cached_file, + _convert_type_hints_to_json_schema, + get_imports, get_json_schema, - is_accelerate_available, - is_torch_available, ) -from transformers.utils.chat_template_utils import _parse_type_hint - from .tool_validation import MethodChecker, validate_tool_attributes -from .types import ImageType, handle_agent_input_types, handle_agent_output_types -from .utils import instance_to_source - -logger = logging.getLogger(__name__) - -if is_accelerate_available(): - from accelerate import PartialState - from accelerate.utils import send_to_device - -if is_torch_available(): - from transformers import AutoProcessor -else: - AutoProcessor = object +from .types import handle_agent_input_types, handle_agent_output_types +from .utils import _is_package_available, _is_pillow_available, get_source, instance_to_source -TOOL_CONFIG_FILE = "tool_config.json" - -def get_repo_type(repo_id, repo_type=None, **hub_kwargs): - if repo_type is not None: - return repo_type - try: - hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space", **hub_kwargs) - return "space" - except RepositoryNotFoundError: - try: - hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="model", **hub_kwargs) - return "model" - except RepositoryNotFoundError: - raise EnvironmentError( - f"`{repo_id}` does not seem to be a valid repo identifier on the Hub." - ) - except Exception: - return "model" - except Exception: - return "space" +logger = logging.getLogger(__name__) def validate_after_init(cls): @@ -98,24 +64,6 @@ def new_init(self, *args, **kwargs): return cls -def _convert_type_hints_to_json_schema(func: Callable) -> Dict: - type_hints = get_type_hints(func) - signature = inspect.signature(func) - properties = {} - for param_name, param_type in type_hints.items(): - if param_name != "return": - properties[param_name] = _parse_type_hint(param_type) - if signature.parameters[param_name].default != inspect.Parameter.empty: - properties[param_name]["nullable"] = True - for param_name in signature.parameters.keys(): - if signature.parameters[param_name].default != inspect.Parameter.empty: - if ( - param_name not in properties - ): # this can happen if the param has no type hint but a default value - properties[param_name] = {"nullable": True} - return properties - - AUTHORIZED_TYPES = [ "string", "boolean", @@ -123,8 +71,10 @@ def _convert_type_hints_to_json_schema(func: Callable) -> Dict: "number", "image", "audio", - "any", + "array", "object", + "any", + "null", ] CONVERSION_DICT = {"str": "string", "int": "integer", "float": "number"} @@ -181,9 +131,7 @@ def validate_arguments(self): f"Attribute {attr} should have type {expected_type.__name__}, got {type(attr_value)} instead." ) for input_name, input_content in self.inputs.items(): - assert isinstance(input_content, dict), ( - f"Input '{input_name}' should be a dictionary." - ) + assert isinstance(input_content, dict), f"Input '{input_name}' should be a dictionary." assert "type" in input_content and "description" in input_content, ( f"Input '{input_name}' should have keys 'type' and 'description', has only {list(input_content.keys())}." ) @@ -194,7 +142,7 @@ def validate_arguments(self): assert getattr(self, "output_type", None) in AUTHORIZED_TYPES - # Validate forward function signature, except for Tools that use a "generic" signature (PipelineTool, SpaceToolWrapper) + # Validate forward function signature, except for Tools that use a "generic" signature (PipelineTool, SpaceToolWrapper, LangChainToolWrapper) if not ( hasattr(self, "skip_forward_signature_validation") and getattr(self, "skip_forward_signature_validation") is True @@ -206,10 +154,15 @@ def validate_arguments(self): "Tool's 'forward' method should take 'self' as its first argument, then its next arguments should match the keys of tool attribute 'inputs'." ) - json_schema = _convert_type_hints_to_json_schema(self.forward) + json_schema = _convert_type_hints_to_json_schema(self.forward, error_on_missing_type_hints=False)[ + "properties" + ] # This function will not raise an error on missing docstrings, contrary to get_json_schema for key, value in self.inputs.items(): + assert key in json_schema, ( + f"Input '{key}' should be present in function signature, found only {json_schema.keys()}" + ) if "nullable" in value: - assert key in json_schema and "nullable" in json_schema[key], ( + assert "nullable" in json_schema[key], ( f"Nullable argument '{key}' in inputs should have key 'nullable' set to True in function signature." ) if key in json_schema and "nullable" in json_schema[key]: @@ -267,8 +220,8 @@ def save(self, output_dir): # Save tool file if type(self).__name__ == "SimpleTool": # Check that imports are self-contained - source_code = inspect.getsource(self.forward).replace("@tool", "") - forward_node = ast.parse(textwrap.dedent(source_code)) + source_code = get_source(self.forward).replace("@tool", "") + forward_node = ast.parse(source_code) # If tool was created using '@tool' decorator, it has only a forward pass, so it's simpler to just get its code method_checker = MethodChecker(set()) method_checker.visit(forward_node) @@ -276,7 +229,7 @@ def save(self, output_dir): if len(method_checker.errors) > 0: raise (ValueError("\n".join(method_checker.errors))) - forward_source_code = inspect.getsource(self.forward) + forward_source_code = get_source(self.forward) tool_code = textwrap.dedent( f""" from smolagents import Tool @@ -343,20 +296,8 @@ def replacement(match): ) # Save requirements file + imports = {el for el in get_imports(tool_file) if el not in sys.stdlib_module_names} | {"smolagents"} requirements_file = os.path.join(output_dir, "requirements.txt") - - imports = [] - for module in [tool_file]: - imports.extend(get_imports(module)) - imports = list( - set( - [ - el - for el in imports + ["smolagents"] - if el not in sys.stdlib_module_names - ] - ) - ) with open(requirements_file, "w", encoding="utf-8") as f: f.write("\n".join(imports) + "\n") @@ -407,12 +348,9 @@ def push_to_hub( with tempfile.TemporaryDirectory() as work_dir: # Save all files. self.save(work_dir) - print(work_dir) with open(work_dir + "/tool.py", "r") as f: print("\n".join(f.readlines())) - logger.info( - f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}" - ) + logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") return upload_folder( repo_id=repo_id, commit_message=commit_message, @@ -455,53 +393,27 @@ def from_hub( `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others will be passed along to its init. """ - assert trust_remote_code, ( - "Loading a tool from Hub requires to trust remote code. Make sure you've inspected the repo and pass `trust_remote_code=True` to load the tool." - ) - - hub_kwargs_names = [ - "cache_dir", - "force_download", - "resume_download", - "proxies", - "revision", - "repo_type", - "subfolder", - "local_files_only", - ] - hub_kwargs = {k: v for k, v in kwargs.items() if k in hub_kwargs_names} - - tool_file = "tool.py" + if not trust_remote_code: + raise ValueError( + "Loading a tool from Hub requires to trust remote code. Make sure you've inspected the repo and pass `trust_remote_code=True` to load the tool." + ) # Get the tool's tool.py file. - hub_kwargs["repo_type"] = get_repo_type(repo_id, **hub_kwargs) - resolved_tool_file = cached_file( + tool_file = hf_hub_download( repo_id, - tool_file, + "tool.py", token=token, - **hub_kwargs, - _raise_exceptions_for_gated_repo=False, - _raise_exceptions_for_missing_entries=False, - _raise_exceptions_for_connection_errors=False, + repo_type="space", + cache_dir=kwargs.get("cache_dir"), + force_download=kwargs.get("force_download"), + resume_download=kwargs.get("resume_download"), + proxies=kwargs.get("proxies"), + revision=kwargs.get("revision"), + subfolder=kwargs.get("subfolder"), + local_files_only=kwargs.get("local_files_only"), ) - tool_code = resolved_tool_file is not None - if resolved_tool_file is None: - resolved_tool_file = cached_file( - repo_id, - tool_file, - token=token, - **hub_kwargs, - _raise_exceptions_for_gated_repo=False, - _raise_exceptions_for_missing_entries=False, - _raise_exceptions_for_connection_errors=False, - ) - if resolved_tool_file is None: - raise EnvironmentError( - f"{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`." - ) - with open(resolved_tool_file, encoding="utf-8") as reader: - tool_code = "".join(reader.readlines()) + tool_code = Path(tool_file).read_text() # Find the Tool subclass in the namespace with tempfile.TemporaryDirectory() as temp_dir: @@ -592,9 +504,7 @@ def __init__( self.name = name self.description = description self.client = Client(space_id, hf_token=token) - space_description = self.client.view_api( - return_format="dict", print_info=False - )["named_endpoints"] + space_description = self.client.view_api(return_format="dict", print_info=False)["named_endpoints"] # If api_name is not defined, take the first of the available APIs for this space if api_name is None: @@ -607,9 +517,7 @@ def __init__( try: space_description_api = space_description[api_name] except KeyError: - raise KeyError( - f"Could not find specified {api_name=} among available api names." - ) + raise KeyError(f"Could not find specified {api_name=} among available api names.") self.inputs = {} for parameter in space_description_api["parameters"]: @@ -633,7 +541,10 @@ def __init__( def sanitize_argument_for_prediction(self, arg): from gradio_client.utils import is_http_url_like - if isinstance(arg, ImageType): + if _is_pillow_available(): + from PIL.Image import Image + + if _is_pillow_available() and isinstance(arg, Image): temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) arg.save(temp_file.name) arg = temp_file.name @@ -683,8 +594,7 @@ def __init__(self, _gradio_tool): self._gradio_tool = _gradio_tool func_args = list(inspect.signature(_gradio_tool.run).parameters.items()) self.inputs = { - key: {"type": CONVERSION_DICT[value.annotation], "description": ""} - for key, value in func_args + key: {"type": CONVERSION_DICT[value.annotation], "description": ""} for key, value in func_args } self.forward = self._gradio_tool.run @@ -697,6 +607,8 @@ def from_langchain(langchain_tool): """ class LangChainToolWrapper(Tool): + skip_forward_signature_validation = True + def __init__(self, _langchain_tool): self.name = _langchain_tool.name.lower() self.description = _langchain_tool.description @@ -707,6 +619,7 @@ def __init__(self, _langchain_tool): input_content["description"] = "" self.output_type = "string" self.langchain_tool = _langchain_tool + self.is_initialized = True def forward(self, *args, **kwargs): tool_input = kwargs.copy() @@ -726,9 +639,7 @@ def forward(self, *args, **kwargs): """ -def get_tool_description_with_args( - tool: Tool, description_template: Optional[str] = None -) -> str: +def get_tool_description_with_args(tool: Tool, description_template: Optional[str] = None) -> str: if description_template is None: description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE compiled_template = compile_jinja_template(description_template) @@ -748,10 +659,7 @@ def compile_jinja_template(template): raise ImportError("template requires jinja2 to be installed.") if version.parse(jinja2.__version__) < version.parse("3.1.0"): - raise ImportError( - "template requires jinja2>=3.1.0 to be installed. Your version is " - f"{jinja2.__version__}." - ) + raise ImportError(f"template requires jinja2>=3.1.0 to be installed. Your version is {jinja2.__version__}.") def raise_exception(message): raise TemplateError(message) @@ -772,9 +680,7 @@ def launch_gradio_demo(tool: Tool): try: import gradio as gr except ImportError: - raise ImportError( - "Gradio should be installed in order to launch a gradio demo." - ) + raise ImportError("Gradio should be installed in order to launch a gradio demo.") TYPE_TO_COMPONENT_CLASS_MAPPING = { "image": gr.Image, @@ -791,9 +697,7 @@ def tool_forward(*args, **kwargs): gradio_inputs = [] for input_name, input_details in tool.inputs.items(): - input_gradio_component_class = TYPE_TO_COMPONENT_CLASS_MAPPING[ - input_details["type"] - ] + input_gradio_component_class = TYPE_TO_COMPONENT_CLASS_MAPPING[input_details["type"]] new_component = input_gradio_component_class(label=input_name) gradio_inputs.append(new_component) @@ -922,14 +826,9 @@ def from_hub( ``` """ _collection = get_collection(collection_slug, token=token) - _hub_repo_ids = { - item.item_id for item in _collection.items if item.item_type == "space" - } + _hub_repo_ids = {item.item_id for item in _collection.items if item.item_type == "space"} - tools = { - Tool.from_hub(repo_id, token, trust_remote_code) - for repo_id in _hub_repo_ids - } + tools = {Tool.from_hub(repo_id, token, trust_remote_code) for repo_id in _hub_repo_ids} return cls(tools) @@ -984,14 +883,19 @@ def tool(tool_function: Callable) -> Tool: tool_function: Your function. Should have type hints for each input and a type hint for the output. Should also have a docstring description including an 'Args:' part where each argument is described. """ - parameters = get_json_schema(tool_function)["function"] - if "return" not in parameters: - raise TypeHintParsingException( - "Tool return type not found: make sure your function has a return type hint!" - ) + tool_json_schema = get_json_schema(tool_function)["function"] + if "return" not in tool_json_schema: + raise TypeHintParsingException("Tool return type not found: make sure your function has a return type hint!") class SimpleTool(Tool): - def __init__(self, name, description, inputs, output_type, function): + def __init__( + self, + name: str, + description: str, + inputs: Dict[str, Dict[str, str]], + output_type: str, + function: Callable, + ): self.name = name self.description = description self.inputs = inputs @@ -1000,16 +904,16 @@ def __init__(self, name, description, inputs, output_type, function): self.is_initialized = True simple_tool = SimpleTool( - parameters["name"], - parameters["description"], - parameters["parameters"]["properties"], - parameters["return"]["type"], + name=tool_json_schema["name"], + description=tool_json_schema["description"], + inputs=tool_json_schema["parameters"]["properties"], + output_type=tool_json_schema["return"]["type"], function=tool_function, ) original_signature = inspect.signature(tool_function) - new_parameters = [ - inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY) - ] + list(original_signature.parameters.values()) + new_parameters = [inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)] + list( + original_signature.parameters.values() + ) new_signature = original_signature.replace(parameters=new_parameters) simple_tool.forward.__signature__ = new_signature return simple_tool @@ -1022,13 +926,13 @@ class PipelineTool(Tool): - **model_class** (`type`) -- The class to use to load the model in this tool. - **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one. - - **pre_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the + - **pre_processor_class** (`type`, *optional*, defaults to [`transformers.AutoProcessor`]) -- The class to use to load the pre-processor - - **post_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the + - **post_processor_class** (`type`, *optional*, defaults to [`transformers.AutoProcessor`]) -- The class to use to load the post-processor (when different from the pre-processor). Args: - model (`str` or [`PreTrainedModel`], *optional*): + model (`str` or [`transformers.PreTrainedModel`], *optional*): The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the value of the class attribute `default_checkpoint`. pre_processor (`str` or `Any`, *optional*): @@ -1053,9 +957,9 @@ class PipelineTool(Tool): Any additional keyword argument to send to the methods that will load the data from the Hub. """ - pre_processor_class = AutoProcessor + pre_processor_class = None model_class = None - post_processor_class = AutoProcessor + post_processor_class = None default_checkpoint = None description = "This is a pipeline tool" name = "pipeline" @@ -1074,17 +978,14 @@ def __init__( token=None, **hub_kwargs, ): - if not is_torch_available(): - raise ImportError("Please install torch in order to use this tool.") - - if not is_accelerate_available(): - raise ImportError("Please install accelerate in order to use this tool.") + if not is_torch_available() or not _is_package_available("accelerate"): + raise ModuleNotFoundError( + "Please install 'transformers' extra to use a PipelineTool: `pip install 'smolagents[transformers]'`" + ) if model is None: if self.default_checkpoint is None: - raise ValueError( - "This tool does not implement a default checkpoint, you need to pass one." - ) + raise ValueError("This tool does not implement a default checkpoint, you need to pass one.") model = self.default_checkpoint if pre_processor is None: pre_processor = model @@ -1107,26 +1008,30 @@ def setup(self): Instantiates the `pre_processor`, `model` and `post_processor` if necessary. """ if isinstance(self.pre_processor, str): - self.pre_processor = self.pre_processor_class.from_pretrained( - self.pre_processor, **self.hub_kwargs - ) + if self.pre_processor_class is None: + from transformers import AutoProcessor + + self.pre_processor_class = AutoProcessor + self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) if isinstance(self.model, str): - self.model = self.model_class.from_pretrained( - self.model, **self.model_kwargs, **self.hub_kwargs - ) + self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) if self.post_processor is None: self.post_processor = self.pre_processor elif isinstance(self.post_processor, str): - self.post_processor = self.post_processor_class.from_pretrained( - self.post_processor, **self.hub_kwargs - ) + if self.post_processor_class is None: + from transformers import AutoProcessor + + self.post_processor_class = AutoProcessor + self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) if self.device is None: if self.device_map is not None: self.device = list(self.model.hf_device_map.values())[0] else: + from accelerate import PartialState + self.device = PartialState().default_device if self.device_map is None: @@ -1157,6 +1062,7 @@ def decode(self, outputs): def __call__(self, *args, **kwargs): import torch + from accelerate.utils import send_to_device args, kwargs = handle_agent_input_types(*args, **kwargs) @@ -1165,12 +1071,8 @@ def __call__(self, *args, **kwargs): encoded_inputs = self.encode(*args, **kwargs) - tensor_inputs = { - k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor) - } - non_tensor_inputs = { - k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor) - } + tensor_inputs = {k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor)} + non_tensor_inputs = {k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor)} encoded_inputs = send_to_device(tensor_inputs, self.device) outputs = self.forward({**encoded_inputs, **non_tensor_inputs}) diff --git a/src/smolagents/types.py b/src/smolagents/types.py index 038885f88..7077daa59 100644 --- a/src/smolagents/types.py +++ b/src/smolagents/types.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import importlib.util import logging import os import pathlib @@ -22,24 +21,14 @@ import numpy as np import requests -from transformers.utils import ( - is_torch_available, - is_vision_available, -) +from huggingface_hub.utils import is_torch_available +from PIL import Image +from PIL.Image import Image as ImageType -logger = logging.getLogger(__name__) +from .utils import _is_package_available -if is_vision_available(): - from PIL import Image - from PIL.Image import Image as ImageType -else: - ImageType = object -if is_torch_available(): - import torch - from torch import Tensor -else: - Tensor = object +logger = logging.getLogger(__name__) class AgentType: @@ -93,9 +82,6 @@ def __init__(self, value): AgentType.__init__(self, value) ImageType.__init__(self) - if not is_vision_available(): - raise ImportError("PIL must be installed in order to handle images.") - self._path = None self._raw = None self._tensor = None @@ -108,14 +94,16 @@ def __init__(self, value): self._raw = Image.open(BytesIO(value)) elif isinstance(value, (str, pathlib.Path)): self._path = value - elif isinstance(value, torch.Tensor): - self._tensor = value - elif isinstance(value, np.ndarray): - self._tensor = torch.from_numpy(value) - else: - raise TypeError( - f"Unsupported type for {self.__class__.__name__}: {type(value)}" - ) + elif is_torch_available(): + import torch + + if isinstance(value, torch.Tensor): + self._tensor = value + if isinstance(value, np.ndarray): + self._tensor = torch.from_numpy(value) + + if self._path is None and self._raw is None and self._tensor is None: + raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") def _ipython_display_(self, include=None, exclude=None): """ @@ -184,10 +172,12 @@ class AgentAudio(AgentType, str): """ def __init__(self, value, samplerate=16_000): - if importlib.util.find_spec("soundfile") is None: + if not _is_package_available("soundfile") or not is_torch_available: raise ModuleNotFoundError( "Please install 'audio' extra to use AgentAudio: `pip install 'smolagents[audio]'`" ) + import torch + super().__init__(value) self._path = None @@ -224,6 +214,8 @@ def to_raw(self): if self._tensor is not None: return self._tensor + import torch + if self._path is not None: if "://" in str(self._path): response = requests.get(self._path) @@ -251,39 +243,32 @@ def to_string(self): return self._path -AGENT_TYPE_MAPPING = {"string": AgentText, "image": AgentImage, "audio": AgentAudio} -INSTANCE_TYPE_MAPPING = { - str: AgentText, - ImageType: AgentImage, - Tensor: AgentAudio, -} - -if is_torch_available(): - INSTANCE_TYPE_MAPPING[Tensor] = AgentAudio +_AGENT_TYPE_MAPPING = {"string": AgentText, "image": AgentImage, "audio": AgentAudio} def handle_agent_input_types(*args, **kwargs): args = [(arg.to_raw() if isinstance(arg, AgentType) else arg) for arg in args] - kwargs = { - k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items() - } + kwargs = {k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items()} return args, kwargs def handle_agent_output_types(output, output_type=None): - if output_type in AGENT_TYPE_MAPPING: + if output_type in _AGENT_TYPE_MAPPING: # If the class has defined outputs, we can map directly according to the class definition - decoded_outputs = AGENT_TYPE_MAPPING[output_type](output) + decoded_outputs = _AGENT_TYPE_MAPPING[output_type](output) return decoded_outputs - else: - # If the class does not have defined output, then we map according to the type - for _k, _v in INSTANCE_TYPE_MAPPING.items(): - if isinstance(output, _k): - if ( - _k is not object - ): # avoid converting to audio if torch is not installed - return _v(output) - return output + + # If the class does not have defined output, then we map according to the type + if isinstance(output, str): + return AgentText(output) + if isinstance(output, ImageType): + return AgentImage(output) + if is_torch_available(): + import torch + + if isinstance(output, torch.Tensor): + return AgentAudio(output) + return output __all__ = ["AgentType", "AgentImage", "AgentText", "AgentAudio"] diff --git a/src/smolagents/utils.py b/src/smolagents/utils.py index ac4565f3d..8aa631f1a 100644 --- a/src/smolagents/utils.py +++ b/src/smolagents/utils.py @@ -15,18 +15,37 @@ # See the License for the specific language governing permissions and # limitations under the License. import ast +import base64 +import importlib.metadata import importlib.util import inspect import json import re +import textwrap import types +from enum import IntEnum +from functools import lru_cache +from io import BytesIO from typing import Dict, Tuple, Union from rich.console import Console -def is_pygments_available(): - return importlib.util.find_spec("soundfile") is not None +__all__ = ["AgentError"] + + +@lru_cache +def _is_package_available(package_name: str) -> bool: + try: + importlib.metadata.version(package_name) + return True + except importlib.metadata.PackageNotFoundError: + return False + + +@lru_cache +def _is_pillow_available(): + return importlib.util.find_spec("PIL") is not None console = Console() @@ -46,13 +65,29 @@ def is_pygments_available(): ] +class LogLevel(IntEnum): + ERROR = 0 # Only errors + INFO = 1 # Normal output (default) + DEBUG = 2 # Detailed output + + +class AgentLogger: + def __init__(self, level: LogLevel = LogLevel.INFO): + self.level = level + self.console = Console() + + def log(self, *args, level: LogLevel = LogLevel.INFO, **kwargs): + if level <= self.level: + self.console.print(*args, **kwargs) + + class AgentError(Exception): """Base class for other agent-related exceptions""" - def __init__(self, message): + def __init__(self, message, logger: AgentLogger): super().__init__(message) self.message = message - console.print(f"[bold red]{message}[/bold red]") + logger.log(f"[bold red]{message}[/bold red]", level=LogLevel.ERROR) class AgentParsingError(AgentError): @@ -83,9 +118,7 @@ def parse_json_blob(json_blob: str) -> Dict[str, str]: try: first_accolade_index = json_blob.find("{") last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1] - json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace( - '\\"', "'" - ) + json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace('\\"', "'") json_data = json.loads(json_blob, strict=False) return json_data except json.JSONDecodeError as e: @@ -162,9 +195,7 @@ def parse_json_tool_call(json_blob: str) -> Tuple[str, Union[str, None]]: MAX_LENGTH_TRUNCATE_CONTENT = 20000 -def truncate_content( - content: str, max_length: int = MAX_LENGTH_TRUNCATE_CONTENT -) -> str: +def truncate_content(content: str, max_length: int = MAX_LENGTH_TRUNCATE_CONTENT) -> str: if len(content) <= max_length: return content else: @@ -196,7 +227,7 @@ def get_method_source(method): """Get source code for a method, including bound methods.""" if isinstance(method, types.MethodType): method = method.__func__ - return inspect.getsource(method).strip() + return get_source(method) def is_same_method(method1, method2): @@ -206,12 +237,8 @@ def is_same_method(method1, method2): source2 = get_method_source(method2) # Remove method decorators if any - source1 = "\n".join( - line for line in source1.split("\n") if not line.strip().startswith("@") - ) - source2 = "\n".join( - line for line in source2.split("\n") if not line.strip().startswith("@") - ) + source1 = "\n".join(line for line in source1.split("\n") if not line.strip().startswith("@")) + source2 = "\n".join(line for line in source2.split("\n") if not line.strip().startswith("@")) return source1 == source2 except (TypeError, OSError): @@ -248,9 +275,7 @@ def instance_to_source(instance, base_cls=None): for name, value in cls.__dict__.items() if not name.startswith("__") and not callable(value) - and not ( - base_cls and hasattr(base_cls, name) and getattr(base_cls, name) == value - ) + and not (base_cls and hasattr(base_cls, name) and getattr(base_cls, name) == value) } for name, value in class_attrs.items(): @@ -271,22 +296,18 @@ def instance_to_source(instance, base_cls=None): for name, func in cls.__dict__.items() if callable(func) and not ( - base_cls - and hasattr(base_cls, name) - and getattr(base_cls, name).__code__.co_code == func.__code__.co_code + base_cls and hasattr(base_cls, name) and getattr(base_cls, name).__code__.co_code == func.__code__.co_code ) } for name, method in methods.items(): - method_source = inspect.getsource(method) + method_source = get_source(method) # Clean up the indentation method_lines = method_source.split("\n") first_line = method_lines[0] indent = len(first_line) - len(first_line.lstrip()) method_lines = [line[indent:] for line in method_lines] - method_source = "\n".join( - [" " + line if line.strip() else line for line in method_lines] - ) + method_source = "\n".join([" " + line if line.strip() else line for line in method_lines]) class_lines.append(method_source) class_lines.append("") @@ -315,4 +336,63 @@ def instance_to_source(instance, base_cls=None): return "\n".join(final_lines) -__all__ = ["AgentError"] +def get_source(obj) -> str: + """Get the source code of a class or callable object (e.g.: function, method). + First attempts to get the source code using `inspect.getsource`. + In a dynamic environment (e.g.: Jupyter, IPython), if this fails, + falls back to retrieving the source code from the current interactive shell session. + + Args: + obj: A class or callable object (e.g.: function, method) + + Returns: + str: The source code of the object, dedented and stripped + + Raises: + TypeError: If object is not a class or callable + OSError: If source code cannot be retrieved from any source + ValueError: If source cannot be found in IPython history + + Note: + TODO: handle Python standard REPL + """ + if not (isinstance(obj, type) or callable(obj)): + raise TypeError(f"Expected class or callable, got {type(obj)}") + + inspect_error = None + try: + return textwrap.dedent(inspect.getsource(obj)).strip() + except OSError as e: + # let's keep track of the exception to raise it if all further methods fail + inspect_error = e + try: + import IPython + + shell = IPython.get_ipython() + if not shell: + raise ImportError("No active IPython shell found") + all_cells = "\n".join(shell.user_ns.get("In", [])).strip() + if not all_cells: + raise ValueError("No code cells found in IPython session") + + tree = ast.parse(all_cells) + for node in ast.walk(tree): + if isinstance(node, (ast.ClassDef, ast.FunctionDef)) and node.name == obj.__name__: + return textwrap.dedent("\n".join(all_cells.split("\n")[node.lineno - 1 : node.end_lineno])).strip() + raise ValueError(f"Could not find source code for {obj.__name__} in IPython history") + except ImportError: + # IPython is not available, let's just raise the original inspect error + raise inspect_error + except ValueError as e: + # IPython is available but we couldn't find the source code, let's raise the error + raise e from inspect_error + + +def encode_image_base64(image): + buffered = BytesIO() + image.save(buffered, format="PNG") + return base64.b64encode(buffered.getvalue()).decode("utf-8") + + +def make_image_url(base64_image): + return f"data:image/png;base64,{base64_image}" diff --git a/tests/test_agents.py b/tests/test_agents.py index 4a031374b..1dcb5e933 100644 --- a/tests/test_agents.py +++ b/tests/test_agents.py @@ -28,13 +28,13 @@ ToolCallingAgent, ) from smolagents.default_tools import PythonInterpreterTool -from smolagents.tools import tool -from smolagents.types import AgentImage, AgentText from smolagents.models import ( ChatMessage, ChatMessageToolCall, ChatMessageToolCallDefinition, ) +from smolagents.tools import tool +from smolagents.types import AgentImage, AgentText from smolagents.utils import BASE_BUILTIN_MODULES @@ -44,9 +44,7 @@ def get_new_path(suffix="") -> str: class FakeToolCallModel: - def __call__( - self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None - ): + def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None): if len(messages) < 3: return ChatMessage( role="assistant", @@ -69,18 +67,14 @@ def __call__( ChatMessageToolCall( id="call_1", type="function", - function=ChatMessageToolCallDefinition( - name="final_answer", arguments={"answer": "7.2904"} - ), + function=ChatMessageToolCallDefinition(name="final_answer", arguments={"answer": "7.2904"}), ) ], ) class FakeToolCallModelImage: - def __call__( - self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None - ): + def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None): if len(messages) < 3: return ChatMessage( role="assistant", @@ -104,12 +98,44 @@ def __call__( ChatMessageToolCall( id="call_1", type="function", + function=ChatMessageToolCallDefinition(name="final_answer", arguments="image.png"), + ) + ], + ) + + +class FakeToolCallModelVL: + def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None): + if len(messages) < 3: + return ChatMessage( + role="assistant", + content="", + tool_calls=[ + ChatMessageToolCall( + id="call_0", + type="function", function=ChatMessageToolCallDefinition( - name="final_answer", arguments="image.png" + name="fake_image_understanding_tool", + arguments={ + "prompt": "What is in this image?", + "image": "image.png", + }, ), ) ], ) + else: + return ChatMessage( + role="assistant", + content="", + tool_calls=[ + ChatMessageToolCall( + id="call_1", + type="function", + function=ChatMessageToolCallDefinition(name="final_answer", arguments="The image is a cat."), + ) + ], + ) def fake_code_model(messages, stop_sequences=None, grammar=None) -> str: @@ -147,10 +173,10 @@ def fake_code_model_error(messages, stop_sequences=None) -> str: Thought: I should multiply 2 by 3.6452. special_marker Code: ```py -a = 2 -b = a * 2 -print = 2 -print("Ok, calculation done!") +def error_function(): + raise ValueError("error") + +error_function() ``` """, ) @@ -158,7 +184,7 @@ def fake_code_model_error(messages, stop_sequences=None) -> str: return ChatMessage( role="assistant", content=""" -Thought: I can now answer the initial question +Thought: I faced an error in the previous step. Code: ```py final_answer("got an error") @@ -271,17 +297,13 @@ def fake_code_model_no_return(messages, stop_sequences=None, grammar=None) -> st class AgentTests(unittest.TestCase): def test_fake_single_step_code_agent(self): - agent = CodeAgent( - tools=[PythonInterpreterTool()], model=fake_code_model_single_step - ) + agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_single_step) output = agent.run("What is 2 multiplied by 3.6452?", single_step=True) assert isinstance(output, str) assert "7.2904" in output def test_fake_toolcalling_agent(self): - agent = ToolCallingAgent( - tools=[PythonInterpreterTool()], model=FakeToolCallModel() - ) + agent = ToolCallingAgent(tools=[PythonInterpreterTool()], model=FakeToolCallModel()) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, str) assert "7.2904" in output @@ -301,13 +323,30 @@ def fake_image_generation_tool(prompt: str) -> Image.Image: """ return Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") - agent = ToolCallingAgent( - tools=[fake_image_generation_tool], model=FakeToolCallModelImage() - ) + agent = ToolCallingAgent(tools=[fake_image_generation_tool], model=FakeToolCallModelImage()) output = agent.run("Make me an image.") assert isinstance(output, AgentImage) assert isinstance(agent.state["image.png"], Image.Image) + def test_toolcalling_agent_handles_image_inputs(self): + from PIL import Image + + image = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") # dummy input + + @tool + def fake_image_understanding_tool(prompt: str, image: Image.Image) -> str: + """Tool that creates a caption for an image. + + Args: + prompt: The prompt + image: The image + """ + return "The image is a cat." + + agent = ToolCallingAgent(tools=[fake_image_understanding_tool], model=FakeToolCallModelVL()) + output = agent.run("Caption this image.", images=[image]) + assert output == "The image is a cat." + def test_fake_code_agent(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model) output = agent.run("What is 2 multiplied by 3.6452?") @@ -315,9 +354,7 @@ def test_fake_code_agent(self): assert output == 7.2904 assert agent.logs[1].task == "What is 2 multiplied by 3.6452?" assert agent.logs[3].tool_calls == [ - ToolCall( - name="python_interpreter", arguments="final_answer(7.2904)", id="call_3" - ) + ToolCall(name="python_interpreter", arguments="final_answer(7.2904)", id="call_3") ] def test_additional_args_added_to_task(self): @@ -343,17 +380,16 @@ def test_reset_conversations(self): assert output == 7.2904 assert len(agent.logs) == 4 - def test_code_agent_code_errors_show_offending_lines(self): + def test_code_agent_code_errors_show_offending_line_and_error(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_error) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" - assert "Code execution failed at line 'print = 2' because of" in str(agent.logs) + assert "Code execution failed at line 'error_function()'" in str(agent.logs[2].error) + assert "ValueError" in str(agent.logs) def test_code_agent_syntax_error_show_offending_lines(self): - agent = CodeAgent( - tools=[PythonInterpreterTool()], model=fake_code_model_syntax_error - ) + agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_syntax_error) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" @@ -391,9 +427,7 @@ def test_module_imports_get_baked_in_system_prompt(self): def test_init_agent_with_different_toolsets(self): toolset_1 = [] agent = CodeAgent(tools=toolset_1, model=fake_code_model) - assert ( - len(agent.tools) == 1 - ) # when no tools are provided, only the final_answer tool is added by default + assert len(agent.tools) == 1 # when no tools are provided, only the final_answer tool is added by default toolset_2 = [PythonInterpreterTool(), PythonInterpreterTool()] agent = CodeAgent(tools=toolset_2, model=fake_code_model) @@ -436,19 +470,15 @@ def test_agent_description_gets_correctly_inserted_in_system_prompt(self): assert "You can also give requests to team members." not in agent.system_prompt print("ok1") assert "{{managed_agents_descriptions}}" not in agent.system_prompt - assert ( - "You can also give requests to team members." in manager_agent.system_prompt - ) + assert "You can also give requests to team members." in manager_agent.system_prompt def test_code_agent_missing_import_triggers_advice_in_error_log(self): agent = CodeAgent(tools=[], model=fake_code_model_import) - from smolagents.agents import console - - with console.capture() as capture: + with agent.logger.console.capture() as capture: agent.run("Count to 3") str_output = capture.get() - assert "import under `additional_authorized_imports`" in str_output + assert "Consider passing said import under" in str_output.replace("\n", "") def test_multiagents(self): class FakeModelMultiagentsManagerAgent: diff --git a/tests/test_all_docs.py b/tests/test_all_docs.py index d1adabd73..68a88d369 100644 --- a/tests/test_all_docs.py +++ b/tests/test_all_docs.py @@ -136,9 +136,7 @@ def test_single_doc(self, doc_path: Path): try: code_blocks = [ ( - block.replace( - "", os.getenv("HF_TOKEN") - ) + block.replace("", os.getenv("HF_TOKEN")) .replace("YOUR_ANTHROPIC_API_KEY", os.getenv("ANTHROPIC_API_KEY")) .replace("{your_username}", "m-ric") ) @@ -150,9 +148,7 @@ def test_single_doc(self, doc_path: Path): except SubprocessCallException as e: pytest.fail(f"\nError while testing {doc_path.name}:\n{str(e)}") except Exception: - pytest.fail( - f"\nUnexpected error while testing {doc_path.name}:\n{traceback.format_exc()}" - ) + pytest.fail(f"\nUnexpected error while testing {doc_path.name}:\n{traceback.format_exc()}") @pytest.fixture(autouse=True) def _setup(self): @@ -174,6 +170,4 @@ def pytest_generate_tests(metafunc): test_class.setup_class() # Parameterize with the markdown files - metafunc.parametrize( - "doc_path", test_class.md_files, ids=[f.stem for f in test_class.md_files] - ) + metafunc.parametrize("doc_path", test_class.md_files, ids=[f.stem for f in test_class.md_files]) diff --git a/tests/test_default_tools.py b/tests/test_default_tools.py index d966b84a9..91c40c6a5 100644 --- a/tests/test_default_tools.py +++ b/tests/test_default_tools.py @@ -13,24 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest + import pytest from smolagents.default_tools import PythonInterpreterTool, VisitWebpageTool -from smolagents.types import AGENT_TYPE_MAPPING +from smolagents.types import _AGENT_TYPE_MAPPING from .test_tools import ToolTesterMixin class DefaultToolTests(unittest.TestCase): def test_visit_webpage(self): - arguments = { - "url": "https://en.wikipedia.org/wiki/United_States_Secretary_of_Homeland_Security" - } + arguments = {"url": "https://en.wikipedia.org/wiki/United_States_Secretary_of_Homeland_Security"} result = VisitWebpageTool()(arguments) assert isinstance(result, str) - assert ( - "* [About Wikipedia](/wiki/Wikipedia:About)" in result - ) # Proper wikipedia pages have an About + assert "* [About Wikipedia](/wiki/Wikipedia:About)" in result # Proper wikipedia pages have an About class PythonInterpreterToolTester(unittest.TestCase, ToolTesterMixin): @@ -49,7 +46,7 @@ def test_exact_match_kwarg(self): def test_agent_type_output(self): inputs = ["2 * 2"] output = self.tool(*inputs, sanitize_inputs_outputs=True) - output_type = AGENT_TYPE_MAPPING[self.tool.output_type] + output_type = _AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, output_type)) def test_agent_types_inputs(self): @@ -59,18 +56,13 @@ def test_agent_types_inputs(self): for _input, expected_input in zip(inputs, self.tool.inputs.values()): input_type = expected_input["type"] if isinstance(input_type, list): - _inputs.append( - [ - AGENT_TYPE_MAPPING[_input_type](_input) - for _input_type in input_type - ] - ) + _inputs.append([_AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: - _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) + _inputs.append(_AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error output = self.tool(*inputs, sanitize_inputs_outputs=True) - output_type = AGENT_TYPE_MAPPING[self.tool.output_type] + output_type = _AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, output_type)) def test_imports_work(self): diff --git a/tests/test_final_answer.py b/tests/test_final_answer.py index 873dcdc90..7bb1e5efe 100644 --- a/tests/test_final_answer.py +++ b/tests/test_final_answer.py @@ -22,10 +22,11 @@ from transformers.testing_utils import get_tests_dir, require_torch from smolagents.default_tools import FinalAnswerTool -from smolagents.types import AGENT_TYPE_MAPPING +from smolagents.types import _AGENT_TYPE_MAPPING from .test_tools import ToolTesterMixin + if is_torch_available(): import torch @@ -45,11 +46,7 @@ def test_exact_match_kwarg(self): def create_inputs(self): inputs_text = {"answer": "Text input"} - inputs_image = { - "answer": Image.open( - Path(get_tests_dir("fixtures")) / "000000039769.png" - ).resize((512, 512)) - } + inputs_image = {"answer": Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png").resize((512, 512))} inputs_audio = {"answer": torch.Tensor(np.ones(3000))} return {"string": inputs_text, "image": inputs_image, "audio": inputs_audio} @@ -58,5 +55,5 @@ def test_agent_type_output(self): inputs = self.create_inputs() for input_type, input in inputs.items(): output = self.tool(**input, sanitize_inputs_outputs=True) - agent_type = AGENT_TYPE_MAPPING[input_type] + agent_type = _AGENT_TYPE_MAPPING[input_type] self.assertTrue(isinstance(output, agent_type)) diff --git a/tests/test_function_type_hints_utils.py b/tests/test_function_type_hints_utils.py new file mode 100644 index 000000000..9e5898516 --- /dev/null +++ b/tests/test_function_type_hints_utils.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from typing import Optional, Tuple + +from smolagents._function_type_hints_utils import get_json_schema + + +class AgentTextTests(unittest.TestCase): + def test_return_none(self): + def fn(x: int, y: Optional[Tuple[str, str, float]] = None) -> None: + """ + Test function + Args: + x: The first input + y: The second input + """ + pass + + schema = get_json_schema(fn) + expected_schema = { + "name": "fn", + "description": "Test function", + "parameters": { + "type": "object", + "properties": { + "x": {"type": "integer", "description": "The first input"}, + "y": { + "type": "array", + "description": "The second input", + "nullable": True, + "prefixItems": [{"type": "string"}, {"type": "string"}, {"type": "number"}], + }, + }, + "required": ["x"], + }, + "return": {"type": "null"}, + } + self.assertEqual( + schema["function"]["parameters"]["properties"]["y"], expected_schema["parameters"]["properties"]["y"] + ) + self.assertEqual(schema["function"], expected_schema) diff --git a/tests/test_models.py b/tests/test_models.py index 992163194..cd3c96f24 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -12,11 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import unittest import json +import unittest +from pathlib import Path from typing import Optional -from smolagents import models, tool, ChatMessage, HfApiModel, TransformersModel +from transformers.testing_utils import get_tests_dir + +from smolagents import ChatMessage, HfApiModel, TransformersModel, models, tool +from smolagents.models import parse_json_if_needed class ModelTests(unittest.TestCase): @@ -34,20 +38,17 @@ def get_weather(location: str, celsius: Optional[bool] = False) -> str: return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" assert ( - "nullable" - in models.get_json_schema(get_weather)["function"]["parameters"][ - "properties" - ]["celsius"] + "nullable" in models.get_tool_json_schema(get_weather)["function"]["parameters"]["properties"]["celsius"] ) def test_chatmessage_has_model_dumps_json(self): - message = ChatMessage("user", "Hello!") + message = ChatMessage("user", [{"type": "text", "text": "Hello!"}]) data = json.loads(message.model_dump_json()) - assert data["content"] == "Hello!" + assert data["content"] == [{"type": "text", "text": "Hello!"}] def test_get_hfapi_message_no_tool(self): model = HfApiModel(max_tokens=10) - messages = [{"role": "user", "content": "Hello!"}] + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] model(messages, stop_sequences=["great"]) def test_transformers_message_no_tool(self): @@ -56,7 +57,40 @@ def test_transformers_message_no_tool(self): max_new_tokens=5, device_map="auto", do_sample=False, + flatten_messages_as_text=True, ) - messages = [{"role": "user", "content": "Hello!"}] + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] output = model(messages, stop_sequences=["great"]).content assert output == "assistant\nHello" + + def test_transformers_message_vl_no_tool(self): + from PIL import Image + + img = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") + model = TransformersModel( + model_id="llava-hf/llava-interleave-qwen-0.5b-hf", + max_new_tokens=5, + device_map="auto", + do_sample=False, + flatten_messages_as_text=False, + ) + messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}, {"type": "image", "image": img}]}] + output = model(messages, stop_sequences=["great"]).content + assert output == "Hello! How can" + + def test_parse_json_if_needed(self): + args = "abc" + parsed_args = parse_json_if_needed(args) + assert parsed_args == "abc" + + args = '{"a": 3}' + parsed_args = parse_json_if_needed(args) + assert parsed_args == {"a": 3} + + args = "3" + parsed_args = parse_json_if_needed(args) + assert parsed_args == 3 + + args = 3 + parsed_args = parse_json_if_needed(args) + assert parsed_args == 3 diff --git a/tests/test_monitoring.py b/tests/test_monitoring.py index bd8b14884..9fa30bb63 100644 --- a/tests/test_monitoring.py +++ b/tests/test_monitoring.py @@ -27,6 +27,7 @@ ChatMessageToolCall, ChatMessageToolCallDefinition, ) +from smolagents.utils import AgentLogger, LogLevel class FakeLLMModel: @@ -43,9 +44,7 @@ def __call__(self, prompt, tools_to_call_from=None, **kwargs): ChatMessageToolCall( id="fake_id", type="function", - function=ChatMessageToolCallDefinition( - name="final_answer", arguments={"answer": "image"} - ), + function=ChatMessageToolCallDefinition(name="final_answer", arguments={"answer": "image"}), ) ], ) @@ -122,9 +121,7 @@ def __call__(self, prompt, **kwargs): ) agent.run("Fake task") - self.assertEqual( - agent.monitor.total_input_token_count, 20 - ) # Should have done two monitoring callbacks + self.assertEqual(agent.monitor.total_input_token_count, 20) # Should have done two monitoring callbacks self.assertEqual(agent.monitor.total_output_token_count, 0) def test_streaming_agent_text_output(self): @@ -135,7 +132,7 @@ def test_streaming_agent_text_output(self): ) # Use stream_to_gradio to capture the output - outputs = list(stream_to_gradio(agent, task="Test task", test_mode=True)) + outputs = list(stream_to_gradio(agent, task="Test task")) self.assertEqual(len(outputs), 4) final_message = outputs[-1] @@ -155,7 +152,6 @@ def test_streaming_agent_image_output(self): agent, task="Test task", additional_args=dict(image=AgentImage(value="path.png")), - test_mode=True, ) ) @@ -167,8 +163,10 @@ def test_streaming_agent_image_output(self): self.assertEqual(final_message.content["mime_type"], "image/png") def test_streaming_with_agent_error(self): + logger = AgentLogger(level=LogLevel.INFO) + def dummy_model(prompt, **kwargs): - raise AgentError("Simulated agent error") + raise AgentError("Simulated agent error", logger) agent = CodeAgent( tools=[], @@ -177,7 +175,7 @@ def dummy_model(prompt, **kwargs): ) # Use stream_to_gradio to capture the output - outputs = list(stream_to_gradio(agent, task="Test task", test_mode=True)) + outputs = list(stream_to_gradio(agent, task="Test task")) self.assertEqual(len(outputs), 5) final_message = outputs[-1] diff --git a/tests/test_python_interpreter.py b/tests/test_python_interpreter.py index 4976c56af..8aec8fe31 100644 --- a/tests/test_python_interpreter.py +++ b/tests/test_python_interpreter.py @@ -14,6 +14,7 @@ # limitations under the License. import unittest +from textwrap import dedent import numpy as np import pytest @@ -55,10 +56,7 @@ def test_assignment_cannot_overwrite_tool(self): code = "print = '3'" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, {"print": print}, state={}) - assert ( - "Cannot assign to name 'print': doing this would erase the existing tool!" - in str(e) - ) + assert "Cannot assign to name 'print': doing this would erase the existing tool!" in str(e) def test_subscript_call(self): code = """def foo(x,y):return x*y\n\ndef boo(y):\n\treturn y**3\nfun = [foo, boo]\nresult_foo = fun[0](4,2)\nresult_boo = fun[1](4)""" @@ -92,9 +90,7 @@ def test_evaluate_dict(self): state = {"x": 3} result, _ = evaluate_python_code(code, {"add_two": add_two}, state=state) self.assertDictEqual(result, {"x": 3, "y": 5}) - self.assertDictEqual( - state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""} - ) + self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""}) def test_evaluate_expression(self): code = "x = 3\ny = 5" @@ -110,9 +106,7 @@ def test_evaluate_f_string(self): result, _ = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == "This is x: 3." - self.assertDictEqual( - state, {"x": 3, "text": "This is x: 3.", "print_outputs": ""} - ) + self.assertDictEqual(state, {"x": 3, "text": "This is x: 3.", "print_outputs": ""}) def test_evaluate_if(self): code = "if x <= 3:\n y = 2\nelse:\n y = 5" @@ -153,15 +147,11 @@ def test_evaluate_subscript(self): state = {"x": 3} result, _ = evaluate_python_code(code, {"add_two": add_two}, state=state) assert result == 5 - self.assertDictEqual( - state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""} - ) + self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""}) code = "vendor = {'revenue': 31000, 'rent': 50312}; vendor['ratio'] = round(vendor['revenue'] / vendor['rent'], 2)" state = {} - evaluate_python_code( - code, {"min": min, "print": print, "round": round}, state=state - ) + evaluate_python_code(code, {"min": min, "print": print, "round": round}, state=state) assert state["vendor"] == {"revenue": 31000, "rent": 50312, "ratio": 0.62} def test_subscript_string_with_string_index_raises_appropriate_error(self): @@ -317,9 +307,7 @@ def test_dictcomp(self): assert result == {0: 0, 1: 1, 2: 4} code = "{num: name for num, name in {101: 'a', 102: 'b'}.items() if name not in ['a']}" - result, _ = evaluate_python_code( - code, {"print": print}, state={}, authorized_imports=["pandas"] - ) + result, _ = evaluate_python_code(code, {"print": print}, state={}, authorized_imports=["pandas"]) assert result == {102: "b"} code = """ @@ -367,9 +355,7 @@ def test_boolops(self): best_city = "Manhattan" best_city """ - result, _ = evaluate_python_code( - code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5} - ) + result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}) assert result == "Brooklyn" code = """if d > e and a < b: @@ -380,9 +366,7 @@ def test_boolops(self): best_city = "Manhattan" best_city """ - result, _ = evaluate_python_code( - code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5} - ) + result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}) assert result == "Sacramento" def test_if_conditions(self): @@ -398,9 +382,7 @@ def test_imports(self): result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 2.0 - code = ( - "from random import choice, seed\nseed(12)\nchoice(['win', 'lose', 'draw'])" - ) + code = "from random import choice, seed\nseed(12)\nchoice(['win', 'lose', 'draw'])" result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == "lose" @@ -434,14 +416,10 @@ def test_imports(self): # Test submodules are handled properly, thus not raising error code = "import numpy.random as rd\nrng = rd.default_rng(12345)\nrng.random()" - result, _ = evaluate_python_code( - code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"] - ) + result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"]) code = "from numpy.random import default_rng as d_rng\nrng = d_rng(12345)\nrng.random()" - result, _ = evaluate_python_code( - code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"] - ) + result, _ = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"]) def test_additional_imports(self): code = "import numpy as np" @@ -613,9 +591,7 @@ def test_print(self): def test_types_as_objects(self): code = "type_a = float(2); type_b = str; type_c = int" state = {} - result, is_final_answer = evaluate_python_code( - code, {"float": float, "str": str, "int": int}, state=state - ) + result, is_final_answer = evaluate_python_code(code, {"float": float, "str": str, "int": int}, state=state) assert result is int def test_tuple_id(self): @@ -655,12 +631,9 @@ def test_adding_int_to_list_raises_error(self): assert "Cannot add non-list value 1 to a list." in str(e) def test_error_highlights_correct_line_of_code(self): - code = """# Ok this is a very long code -# It has many commented lines -a = 1 + code = """a = 1 b = 2 -# Here is another piece counts = [1, 2, 3] counts += 1 b += 1""" @@ -668,12 +641,22 @@ def test_error_highlights_correct_line_of_code(self): evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "Code execution failed at line 'counts += 1" in str(e) + def test_error_type_returned_in_function_call(self): + code = """def error_function(): + raise ValueError("error") + +error_function()""" + with pytest.raises(InterpreterError) as e: + evaluate_python_code(code) + assert "error" in str(e) + assert "ValueError" in str(e) + def test_assert(self): code = """ assert 1 == 1 assert 1 == 2 """ - with pytest.raises(AssertionError) as e: + with pytest.raises(InterpreterError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "1 == 2" in str(e) and "1 == 1" not in str(e) @@ -733,9 +716,7 @@ def test_break(self): break i""" - result, is_final_answer = evaluate_python_code( - code, {"print": print, "round": round}, state={} - ) + result, is_final_answer = evaluate_python_code(code, {"print": print, "round": round}, state={}) assert result == 3 assert not is_final_answer @@ -781,9 +762,7 @@ def test_nested_for_loop(self): out[:10] """ state = {} - result, is_final_answer = evaluate_python_code( - code, {"print": print, "range": range}, state=state - ) + result, is_final_answer = evaluate_python_code(code, {"print": print, "range": range}, state=state) assert result == [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] def test_pandas(self): @@ -798,9 +777,7 @@ def test_pandas(self): parts_with_5_set_count[['Quantity', 'SetCount']].values[1] """ state = {} - result, _ = evaluate_python_code( - code, {}, state=state, authorized_imports=["pandas"] - ) + result, _ = evaluate_python_code(code, {}, state=state, authorized_imports=["pandas"]) assert np.array_equal(result, [-1, 5]) code = """ @@ -811,9 +788,7 @@ def test_pandas(self): # Filter the DataFrame to get only the rows with outdated atomic numbers filtered_df = df.loc[df['AtomicNumber'].isin([104])] """ - result, _ = evaluate_python_code( - code, {"print": print}, state={}, authorized_imports=["pandas"] - ) + result, _ = evaluate_python_code(code, {"print": print}, state={}, authorized_imports=["pandas"]) assert np.array_equal(result.values[0], [104, 1]) # Test groupby @@ -825,9 +800,7 @@ def test_pandas(self): ]) survival_rate_by_class = data.groupby('Pclass')['Survived'].mean() """ - result, _ = evaluate_python_code( - code, {}, state={}, authorized_imports=["pandas"] - ) + result, _ = evaluate_python_code(code, {}, state={}, authorized_imports=["pandas"]) assert result.values[1] == 0.5 # Test loc and iloc @@ -839,11 +812,9 @@ def test_pandas(self): ]) survival_rate_biased = data.loc[data['Survived']==1]['Survived'].mean() survival_rate_biased = data.loc[data['Survived']==1]['Survived'].mean() -survival_rate_sorted = data.sort_values(by='Survived', ascending=False).iloc[0] +survival_rate_sorted = data.sort_values(by='Survived', ascending=False).iloc[0] """ - result, _ = evaluate_python_code( - code, {}, state={}, authorized_imports=["pandas"] - ) + result, _ = evaluate_python_code(code, {}, state={}, authorized_imports=["pandas"]) def test_starred(self): code = """ @@ -864,9 +835,7 @@ def haversine(lat1, lon1, lat2, lon2): distance_geneva_barcelona = haversine(*coords_geneva, *coords_barcelona) """ - result, _ = evaluate_python_code( - code, {"print": print, "map": map}, state={}, authorized_imports=["math"] - ) + result, _ = evaluate_python_code(code, {"print": print, "map": map}, state={}, authorized_imports=["math"]) assert round(result, 1) == 622395.4 def test_for(self): @@ -884,6 +853,13 @@ def test_for(self): result, _ = evaluate_python_code(code, {"print": print, "map": map}, state={}) assert result == {"Worker A": "8:00 pm", "Worker B": "11:45 am"} + def test_syntax_error_points_error(self): + code = "a = ;" + with pytest.raises(InterpreterError) as e: + evaluate_python_code(code) + assert "SyntaxError" in str(e) + assert " ^" in str(e) + def test_fix_final_answer_code(self): test_cases = [ ( @@ -929,10 +905,163 @@ def test_dangerous_subpackage_access_blocked(self): # Import of whitelisted modules should succeed but dangerous submodules should not exist code = "import random;random._os.system('echo bad command passed')" - with pytest.raises(AttributeError) as e: + with pytest.raises(InterpreterError) as e: evaluate_python_code(code) - assert "module 'random' has no attribute '_os'" in str(e) + assert "AttributeError:module 'random' has no attribute '_os'" in str(e) code = "import doctest;doctest.inspect.os.system('echo bad command passed')" - with pytest.raises(AttributeError): + with pytest.raises(InterpreterError): evaluate_python_code(code, authorized_imports=["doctest"]) + + def test_close_matches_subscript(self): + code = 'capitals = {"Czech Republic": "Prague", "Monaco": "Monaco", "Bhutan": "Thimphu"};capitals["Butan"]' + with pytest.raises(Exception) as e: + evaluate_python_code(code) + assert "Maybe you meant one of these indexes instead" in str(e) and "['Bhutan']" in str(e).replace("\\", "") + + def test_dangerous_builtins_calls_are_blocked(self): + unsafe_code = "import os" + dangerous_code = f""" +exec = callable.__self__.exec +compile = callable.__self__.compile +exec(compile('{unsafe_code}', 'no filename', 'exec')) +""" + + with pytest.raises(InterpreterError): + evaluate_python_code(unsafe_code, static_tools=BASE_PYTHON_TOOLS) + + with pytest.raises(InterpreterError): + evaluate_python_code(dangerous_code, static_tools=BASE_PYTHON_TOOLS) + + def test_dangerous_builtins_are_callable_if_explicitly_added(self): + dangerous_code = """ +compile = callable.__self__.compile +eval = callable.__self__.eval +exec = callable.__self__.exec + +eval("1 + 1") +exec(compile("1 + 1", "no filename", "exec")) + +teval("1 + 1") +texec(tcompile("1 + 1", "no filename", "exec")) + """ + + evaluate_python_code( + dangerous_code, static_tools={"tcompile": compile, "teval": eval, "texec": exec} | BASE_PYTHON_TOOLS + ) + + +@pytest.mark.parametrize( + "code, expected_result", + [ + ( + dedent("""\ + x = 1 + x += 2 + """), + 3, + ), + ( + dedent("""\ + x = "a" + x += "b" + """), + "ab", + ), + ( + dedent("""\ + class Custom: + def __init__(self, value): + self.value = value + def __iadd__(self, other): + self.value += other * 10 + return self + + x = Custom(1) + x += 2 + x.value + """), + 21, + ), + ], +) +def test_evaluate_augassign(code, expected_result): + state = {} + result, _ = evaluate_python_code(code, {}, state=state) + assert result == expected_result + + +@pytest.mark.parametrize( + "operator, expected_result", + [ + ("+=", 7), + ("-=", 3), + ("*=", 10), + ("/=", 2.5), + ("//=", 2), + ("%=", 1), + ("**=", 25), + ("&=", 0), + ("|=", 7), + ("^=", 7), + (">>=", 1), + ("<<=", 20), + ], +) +def test_evaluate_augassign_number(operator, expected_result): + code = dedent("""\ + x = 5 + x {operator} 2 + """).format(operator=operator) + state = {} + result, _ = evaluate_python_code(code, {}, state=state) + assert result == expected_result + + +@pytest.mark.parametrize( + "operator, expected_result", + [ + ("+=", 7), + ("-=", 3), + ("*=", 10), + ("/=", 2.5), + ("//=", 2), + ("%=", 1), + ("**=", 25), + ("&=", 0), + ("|=", 7), + ("^=", 7), + (">>=", 1), + ("<<=", 20), + ], +) +def test_evaluate_augassign_custom(operator, expected_result): + operator_names = { + "+=": "iadd", + "-=": "isub", + "*=": "imul", + "/=": "itruediv", + "//=": "ifloordiv", + "%=": "imod", + "**=": "ipow", + "&=": "iand", + "|=": "ior", + "^=": "ixor", + ">>=": "irshift", + "<<=": "ilshift", + } + code = dedent("""\ + class Custom: + def __init__(self, value): + self.value = value + def __{operator_name}__(self, other): + self.value {operator} other + return self + + x = Custom(5) + x {operator} 2 + x.value + """).format(operator=operator, operator_name=operator_names[operator]) + state = {} + result, _ = evaluate_python_code(code, {}, state=state) + assert result == expected_result diff --git a/tests/test_tools.py b/tests/test_tools.py index 5b2dc0e1f..e8d5a50ab 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -15,22 +15,19 @@ import unittest from pathlib import Path from textwrap import dedent -from typing import Dict, Optional, Union -from unittest.mock import patch, MagicMock +from typing import Any, Dict, List, Optional, Tuple, Union +from unittest.mock import MagicMock, patch import mcp import numpy as np import pytest +import torch from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir from smolagents.tools import AUTHORIZED_TYPES, Tool, ToolCollection, tool -from smolagents.types import ( - AGENT_TYPE_MAPPING, - AgentAudio, - AgentImage, - AgentText, -) +from smolagents.types import _AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText + if is_torch_available(): import torch @@ -48,9 +45,7 @@ def create_inputs(tool_inputs: Dict[str, Dict[Union[str, type], str]]): if input_type == "string": inputs[input_name] = "Text input" elif input_type == "image": - inputs[input_name] = Image.open( - Path(get_tests_dir("fixtures")) / "000000039769.png" - ).resize((512, 512)) + inputs[input_name] = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png").resize((512, 512)) elif input_type == "audio": inputs[input_name] = np.ones(3000) else: @@ -97,7 +92,7 @@ def test_agent_type_output(self): inputs = create_inputs(self.tool.inputs) output = self.tool(**inputs, sanitize_inputs_outputs=True) if self.tool.output_type != "any": - agent_type = AGENT_TYPE_MAPPING[self.tool.output_type] + agent_type = _AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, agent_type)) @@ -224,9 +219,7 @@ def test_saving_tool_allows_no_arg_in_init(self): class FailTool(Tool): name = "specific" description = "test description" - inputs = { - "string_input": {"type": "string", "description": "input description"} - } + inputs = {"string_input": {"type": "string", "description": "input description"}} output_type = "string" def __init__(self, url): @@ -248,9 +241,7 @@ def test_saving_tool_allows_no_imports_from_outside_methods(self): class FailTool(Tool): name = "specific" description = "test description" - inputs = { - "string_input": {"type": "string", "description": "input description"} - } + inputs = {"string_input": {"type": "string", "description": "input description"}} output_type = "string" def useless_method(self): @@ -269,9 +260,7 @@ def forward(self, string_input): class SuccessTool(Tool): name = "specific" description = "test description" - inputs = { - "string_input": {"type": "string", "description": "input description"} - } + inputs = {"string_input": {"type": "string", "description": "input description"}} output_type = "string" def useless_method(self): @@ -300,9 +289,7 @@ class GetWeatherTool(Tool): }, } - def forward( - self, location: str, celsius: Optional[bool] = False - ) -> str: + def forward(self, location: str, celsius: Optional[bool] = False) -> str: return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" GetWeatherTool() @@ -340,9 +327,7 @@ class GetWeatherTool(Tool): } output_type = "string" - def forward( - self, location: str, celsius: Optional[bool] = False - ) -> str: + def forward(self, location: str, celsius: Optional[bool] = False) -> str: return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" GetWeatherTool() @@ -389,6 +374,48 @@ def forward(self, location, celsius: str) -> str: GetWeatherTool3() assert "Nullable" in str(e) + def test_tool_default_parameters_is_nullable(self): + @tool + def get_weather(location: str, celsius: bool = False) -> str: + """ + Get weather in the next days at given location. + + Args: + location: The location to get the weather for. + celsius: is the temperature given in celsius? + """ + return "The weather is UNGODLY with torrential rains and temperatures below -10┬░C" + + assert get_weather.inputs["celsius"]["nullable"] + + def test_tool_supports_any_none(self): + @tool + def get_weather(location: Any) -> None: + """ + Get weather in the next days at given location. + + Args: + location: The location to get the weather for. + """ + return + + assert get_weather.inputs["location"]["type"] == "any" + + def test_tool_supports_array(self): + @tool + def get_weather(locations: List[str], months: Optional[Tuple[str, str]] = None) -> Dict[str, float]: + """ + Get weather in the next days at given locations. + + Args: + locations: The locations to get the weather for. + months: The months to get the weather for + """ + return + + assert get_weather.inputs["locations"]["type"] == "array" + assert get_weather.inputs["months"]["type"] == "array" + @pytest.fixture def mock_server_parameters(): @@ -410,9 +437,7 @@ def mock_smolagents_adapter(): class TestToolCollection: - def test_from_mcp( - self, mock_server_parameters, mock_mcp_adapt, mock_smolagents_adapter - ): + def test_from_mcp(self, mock_server_parameters, mock_mcp_adapt, mock_smolagents_adapter): with ToolCollection.from_mcp(mock_server_parameters) as tool_collection: assert isinstance(tool_collection, ToolCollection) assert len(tool_collection.tools) == 2 @@ -440,9 +465,5 @@ def echo_tool(text: str) -> str: with ToolCollection.from_mcp(mcp_server_params) as tool_collection: assert len(tool_collection.tools) == 1, "Expected 1 tool" - assert tool_collection.tools[0].name == "echo_tool", ( - "Expected tool name to be 'echo_tool'" - ) - assert tool_collection.tools[0](text="Hello") == "Hello", ( - "Expected tool to echo the input text" - ) + assert tool_collection.tools[0].name == "echo_tool", "Expected tool name to be 'echo_tool'" + assert tool_collection.tools[0](text="Hello") == "Hello", "Expected tool to echo the input text" diff --git a/tests/test_types.py b/tests/test_types.py index 244875cfc..9350da17a 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -121,4 +121,3 @@ def test_from_string(self): self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw()) - self.assertEqual(string, agent_type) diff --git a/tests/test_utils.py b/tests/test_utils.py index 0a661a35c..31a8a68e0 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -12,10 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect +import os +import pathlib +import tempfile +import textwrap import unittest + import pytest +from IPython.core.interactiveshell import InteractiveShell -from smolagents.utils import parse_code_blobs +from smolagents import Tool +from smolagents.tools import tool +from smolagents.utils import get_source, parse_code_blobs class AgentTextTests(unittest.TestCase): @@ -57,3 +66,287 @@ def multiply(a, b): return a * b""" result = parse_code_blobs(test_input) assert result == expected_output + + +@pytest.fixture(scope="function") +def ipython_shell(): + """Reset IPython shell before and after each test.""" + shell = InteractiveShell.instance() + shell.reset() # Clean before test + yield shell + shell.reset() # Clean after test + + +@pytest.mark.parametrize( + "obj_name, code_blob", + [ + ("test_func", "def test_func():\n return 42"), + ("TestClass", "class TestClass:\n ..."), + ], +) +def test_get_source_ipython(ipython_shell, obj_name, code_blob): + ipython_shell.run_cell(code_blob, store_history=True) + obj = ipython_shell.user_ns[obj_name] + assert get_source(obj) == code_blob + + +def test_get_source_standard_class(): + class TestClass: ... + + source = get_source(TestClass) + assert source == "class TestClass: ..." + assert source == textwrap.dedent(inspect.getsource(TestClass)).strip() + + +def test_get_source_standard_function(): + def test_func(): ... + + source = get_source(test_func) + assert source == "def test_func(): ..." + assert source == textwrap.dedent(inspect.getsource(test_func)).strip() + + +def test_get_source_ipython_errors_empty_cells(ipython_shell): + test_code = textwrap.dedent("""class TestClass:\n ...""").strip() + ipython_shell.user_ns["In"] = [""] + exec(test_code) + with pytest.raises(ValueError, match="No code cells found in IPython session"): + get_source(locals()["TestClass"]) + + +def test_get_source_ipython_errors_definition_not_found(ipython_shell): + test_code = textwrap.dedent("""class TestClass:\n ...""").strip() + ipython_shell.user_ns["In"] = ["", "print('No class definition here')"] + exec(test_code) + with pytest.raises(ValueError, match="Could not find source code for TestClass in IPython history"): + get_source(locals()["TestClass"]) + + +def test_get_source_ipython_errors_type_error(): + with pytest.raises(TypeError, match="Expected class or callable"): + get_source(None) + + +def test_e2e_class_tool_save(): + class TestTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = { + "task": { + "type": "string", + "description": "tool input", + } + } + output_type = "string" + + def forward(self, task: str): + import IPython # noqa: F401 + + return task + + test_tool = TestTool() + with tempfile.TemporaryDirectory() as tmp_dir: + test_tool.save(tmp_dir) + assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"} + assert ( + pathlib.Path(tmp_dir, "tool.py").read_text() + == """from smolagents.tools import Tool +import IPython + +class TestTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = {'task': {'type': 'string', 'description': 'tool input'}} + output_type = "string" + + def forward(self, task: str): + import IPython # noqa: F401 + + return task + + def __init__(self, *args, **kwargs): + self.is_initialized = False +""" + ) + requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split()) + assert requirements == {"IPython", "smolagents"} + assert ( + pathlib.Path(tmp_dir, "app.py").read_text() + == """from smolagents import launch_gradio_demo +from typing import Optional +from tool import TestTool + +tool = TestTool() + +launch_gradio_demo(tool) +""" + ) + + +def test_e2e_ipython_class_tool_save(): + shell = InteractiveShell.instance() + with tempfile.TemporaryDirectory() as tmp_dir: + code_blob = textwrap.dedent(f""" + from smolagents.tools import Tool + class TestTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = {{"task": {{"type": "string", + "description": "tool input", + }} + }} + output_type = "string" + + def forward(self, task: str): + import IPython # noqa: F401 + + return task + TestTool().save("{tmp_dir}") + """) + assert shell.run_cell(code_blob, store_history=True).success + assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"} + assert ( + pathlib.Path(tmp_dir, "tool.py").read_text() + == """from smolagents.tools import Tool +import IPython + +class TestTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = {'task': {'type': 'string', 'description': 'tool input'}} + output_type = "string" + + def forward(self, task: str): + import IPython # noqa: F401 + + return task + + def __init__(self, *args, **kwargs): + self.is_initialized = False +""" + ) + requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split()) + assert requirements == {"IPython", "smolagents"} + assert ( + pathlib.Path(tmp_dir, "app.py").read_text() + == """from smolagents import launch_gradio_demo +from typing import Optional +from tool import TestTool + +tool = TestTool() + +launch_gradio_demo(tool) +""" + ) + + +def test_e2e_function_tool_save(): + @tool + def test_tool(task: str) -> str: + """ + Test tool description + + Args: + task: tool input + """ + import IPython # noqa: F401 + + return task + + with tempfile.TemporaryDirectory() as tmp_dir: + test_tool.save(tmp_dir) + assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"} + assert ( + pathlib.Path(tmp_dir, "tool.py").read_text() + == """from smolagents import Tool +from typing import Optional + +class SimpleTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = {"task":{"type":"string","description":"tool input"}} + output_type = "string" + + def forward(self, task: str) -> str: + \""" + Test tool description + + Args: + task: tool input + \""" + import IPython # noqa: F401 + + return task""" + ) + requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split()) + assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements + assert ( + pathlib.Path(tmp_dir, "app.py").read_text() + == """from smolagents import launch_gradio_demo +from typing import Optional +from tool import SimpleTool + +tool = SimpleTool() + +launch_gradio_demo(tool) +""" + ) + + +def test_e2e_ipython_function_tool_save(): + shell = InteractiveShell.instance() + with tempfile.TemporaryDirectory() as tmp_dir: + code_blob = textwrap.dedent(f""" + from smolagents import tool + + @tool + def test_tool(task: str) -> str: + \""" + Test tool description + + Args: + task: tool input + \""" + import IPython # noqa: F401 + + return task + + test_tool.save("{tmp_dir}") + """) + assert shell.run_cell(code_blob, store_history=True).success + assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"} + assert ( + pathlib.Path(tmp_dir, "tool.py").read_text() + == """from smolagents import Tool +from typing import Optional + +class SimpleTool(Tool): + name = "test_tool" + description = "Test tool description" + inputs = {"task":{"type":"string","description":"tool input"}} + output_type = "string" + + def forward(self, task: str) -> str: + \""" + Test tool description + + Args: + task: tool input + \""" + import IPython # noqa: F401 + + return task""" + ) + requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split()) + assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements + assert ( + pathlib.Path(tmp_dir, "app.py").read_text() + == """from smolagents import launch_gradio_demo +from typing import Optional +from tool import SimpleTool + +tool = SimpleTool() + +launch_gradio_demo(tool) +""" + ) diff --git a/utils/check_tests_in_ci.py b/utils/check_tests_in_ci.py index 4c55ef098..65ebca729 100644 --- a/utils/check_tests_in_ci.py +++ b/utils/check_tests_in_ci.py @@ -16,6 +16,7 @@ from pathlib import Path + ROOT = Path(__file__).parent.parent TESTS_FOLDER = ROOT / "tests" @@ -37,11 +38,7 @@ def check_tests_in_ci(): if path.name.startswith("test_") ] ci_workflow_file_content = CI_WORKFLOW_FILE.read_text() - missing_test_files = [ - test_file - for test_file in test_files - if test_file not in ci_workflow_file_content - ] + missing_test_files = [test_file for test_file in test_files if test_file not in ci_workflow_file_content] if missing_test_files: print( "тЭМ Some test files seem to be ignored in the CI:\n"