diff --git a/.github/workflows/docs-check.yaml b/.github/workflows/docs-check.yaml index 824c1721d..7e538f951 100644 --- a/.github/workflows/docs-check.yaml +++ b/.github/workflows/docs-check.yaml @@ -45,14 +45,24 @@ jobs: fi cat changed_files.txt - - name: Enforce docs update + - name: Check for changes in stable docs + run: | + if grep -qE '^docs/stable/' changed_files.txt; then + echo "❌ Changes to 'stable' documentation are not allowed in this PR." + echo " Please make changes in the 'development' directory instead." + exit 1 + else + echo "✅ No changes found in 'stable' docs." + fi + + - name: Enforce docs update in development run: | if [ "${{ steps.skip.outputs.skip }}" = "true" ]; then echo "🟡 Skipping docs check (No Docs Needed checked)." exit 0 fi - if grep -qE '^(docs/|CONTRIBUTING\.md$|README\.md$|CODE_OF_CONDUCT\.md$)' changed_files.txt; then + if grep -qE '^(docs/development/|docs/docs.json$|docs/package.json$|docs/tasks.toml$|CONTRIBUTING\.md$|README\.md$|CODE_OF_CONDUCT\.md$)' changed_files.txt; then echo "✅ Docs updated." else echo "❌ No docs changes found and 'No Docs Needed' not checked." diff --git a/.vscode/settings.json b/.vscode/settings.json index 677e64ca8..21fc0b7cc 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,6 @@ { - "python.terminal.activateEnvironment": false + "python.terminal.activateEnvironment": false, + "debug.javascript.defaultRuntimeExecutable": { + "pwa-node": "/Users/jp/.local/share/mise/shims/node" + } } \ No newline at end of file diff --git a/docs/README.md b/docs/development/README.md similarity index 100% rename from docs/README.md rename to docs/development/README.md diff --git a/docs/agent-development/agent-details.mdx b/docs/development/agent-development/agent-details.mdx similarity index 100% rename from docs/agent-development/agent-details.mdx rename to docs/development/agent-development/agent-details.mdx diff --git a/docs/agent-development/agent-settings.mdx b/docs/development/agent-development/agent-settings.mdx similarity index 100% rename from docs/agent-development/agent-settings.mdx rename to docs/development/agent-development/agent-settings.mdx diff --git a/docs/agent-development/citations.mdx b/docs/development/agent-development/citations.mdx similarity index 100% rename from docs/agent-development/citations.mdx rename to docs/development/agent-development/citations.mdx diff --git a/docs/agent-development/env-variables.mdx b/docs/development/agent-development/env-variables.mdx similarity index 100% rename from docs/agent-development/env-variables.mdx rename to docs/development/agent-development/env-variables.mdx diff --git a/docs/agent-development/error.mdx b/docs/development/agent-development/error.mdx similarity index 100% rename from docs/agent-development/error.mdx rename to docs/development/agent-development/error.mdx diff --git a/docs/agent-development/files.mdx b/docs/development/agent-development/files.mdx similarity index 100% rename from docs/agent-development/files.mdx rename to docs/development/agent-development/files.mdx diff --git a/docs/agent-development/forms.mdx b/docs/development/agent-development/forms.mdx similarity index 100% rename from docs/agent-development/forms.mdx rename to docs/development/agent-development/forms.mdx diff --git a/docs/agent-development/llm-proxy-service.mdx b/docs/development/agent-development/llm-proxy-service.mdx similarity index 100% rename from docs/agent-development/llm-proxy-service.mdx rename to docs/development/agent-development/llm-proxy-service.mdx diff --git a/docs/agent-development/mcp-oauth.mdx b/docs/development/agent-development/mcp-oauth.mdx similarity index 100% rename from docs/agent-development/mcp-oauth.mdx rename to docs/development/agent-development/mcp-oauth.mdx diff --git a/docs/agent-development/mcp.mdx b/docs/development/agent-development/mcp.mdx similarity index 100% rename from docs/agent-development/mcp.mdx rename to docs/development/agent-development/mcp.mdx diff --git a/docs/agent-development/messages.mdx b/docs/development/agent-development/messages.mdx similarity index 100% rename from docs/agent-development/messages.mdx rename to docs/development/agent-development/messages.mdx diff --git a/docs/agent-development/multi-turn.mdx b/docs/development/agent-development/multi-turn.mdx similarity index 100% rename from docs/agent-development/multi-turn.mdx rename to docs/development/agent-development/multi-turn.mdx diff --git a/docs/agent-development/overview.mdx b/docs/development/agent-development/overview.mdx similarity index 100% rename from docs/agent-development/overview.mdx rename to docs/development/agent-development/overview.mdx diff --git a/docs/agent-development/rag.mdx b/docs/development/agent-development/rag.mdx similarity index 100% rename from docs/agent-development/rag.mdx rename to docs/development/agent-development/rag.mdx diff --git a/docs/agent-development/secrets.mdx b/docs/development/agent-development/secrets.mdx similarity index 100% rename from docs/agent-development/secrets.mdx rename to docs/development/agent-development/secrets.mdx diff --git a/docs/agent-development/tool-calls.mdx b/docs/development/agent-development/tool-calls.mdx similarity index 100% rename from docs/agent-development/tool-calls.mdx rename to docs/development/agent-development/tool-calls.mdx diff --git a/docs/agent-development/trajectory.mdx b/docs/development/agent-development/trajectory.mdx similarity index 100% rename from docs/agent-development/trajectory.mdx rename to docs/development/agent-development/trajectory.mdx diff --git a/docs/community/acp-a2a-migration-guide.mdx b/docs/development/community/acp-a2a-migration-guide.mdx similarity index 100% rename from docs/community/acp-a2a-migration-guide.mdx rename to docs/development/community/acp-a2a-migration-guide.mdx diff --git a/docs/community/community-calls/15-07-2025.md b/docs/development/community/community-calls/15-07-2025.md similarity index 100% rename from docs/community/community-calls/15-07-2025.md rename to docs/development/community/community-calls/15-07-2025.md diff --git a/docs/community/community-calls/16-09-2025.md b/docs/development/community/community-calls/16-09-2025.md similarity index 100% rename from docs/community/community-calls/16-09-2025.md rename to docs/development/community/community-calls/16-09-2025.md diff --git a/docs/community/community-calls/17-06-2025.md b/docs/development/community/community-calls/17-06-2025.md similarity index 100% rename from docs/community/community-calls/17-06-2025.md rename to docs/development/community/community-calls/17-06-2025.md diff --git a/docs/community/community-calls/21-10-2025.md b/docs/development/community/community-calls/21-10-2025.md similarity index 100% rename from docs/community/community-calls/21-10-2025.md rename to docs/development/community/community-calls/21-10-2025.md diff --git a/docs/community/community-calls/26-08-2025.md b/docs/development/community/community-calls/26-08-2025.md similarity index 100% rename from docs/community/community-calls/26-08-2025.md rename to docs/development/community/community-calls/26-08-2025.md diff --git a/docs/community/contribute.mdx b/docs/development/community/contribute.mdx similarity index 100% rename from docs/community/contribute.mdx rename to docs/development/community/contribute.mdx diff --git a/docs/custom-ui/client-sdk.mdx b/docs/development/custom-ui/client-sdk.mdx similarity index 100% rename from docs/custom-ui/client-sdk.mdx rename to docs/development/custom-ui/client-sdk.mdx diff --git a/docs/custom-ui/permissions-and-tokens.mdx b/docs/development/custom-ui/permissions-and-tokens.mdx similarity index 100% rename from docs/custom-ui/permissions-and-tokens.mdx rename to docs/development/custom-ui/permissions-and-tokens.mdx diff --git a/docs/deploy-agent-stack/authenticate-cli-to-server.mdx b/docs/development/deploy-agent-stack/authenticate-cli-to-server.mdx similarity index 100% rename from docs/deploy-agent-stack/authenticate-cli-to-server.mdx rename to docs/development/deploy-agent-stack/authenticate-cli-to-server.mdx diff --git a/docs/deploy-agent-stack/deployment-guide.mdx b/docs/development/deploy-agent-stack/deployment-guide.mdx similarity index 100% rename from docs/deploy-agent-stack/deployment-guide.mdx rename to docs/development/deploy-agent-stack/deployment-guide.mdx diff --git a/docs/deploy-agent-stack/observability.mdx b/docs/development/deploy-agent-stack/observability.mdx similarity index 100% rename from docs/deploy-agent-stack/observability.mdx rename to docs/development/deploy-agent-stack/observability.mdx diff --git a/docs/deploy-agents/building-agents.mdx b/docs/development/deploy-agents/building-agents.mdx similarity index 100% rename from docs/deploy-agents/building-agents.mdx rename to docs/development/deploy-agents/building-agents.mdx diff --git a/docs/deploy-agents/deploy-your-agents.mdx b/docs/development/deploy-agents/deploy-your-agents.mdx similarity index 100% rename from docs/deploy-agents/deploy-your-agents.mdx rename to docs/development/deploy-agents/deploy-your-agents.mdx diff --git a/docs/deploy-agents/wrap-existing-agents.mdx b/docs/development/deploy-agents/wrap-existing-agents.mdx similarity index 100% rename from docs/deploy-agents/wrap-existing-agents.mdx rename to docs/development/deploy-agents/wrap-existing-agents.mdx diff --git a/docs/experimental/a2a-proxy.mdx b/docs/development/experimental/a2a-proxy.mdx similarity index 100% rename from docs/experimental/a2a-proxy.mdx rename to docs/development/experimental/a2a-proxy.mdx diff --git a/docs/experimental/connectors.mdx b/docs/development/experimental/connectors.mdx similarity index 100% rename from docs/experimental/connectors.mdx rename to docs/development/experimental/connectors.mdx diff --git a/docs/experimental/mcp.mdx b/docs/development/experimental/mcp.mdx similarity index 100% rename from docs/experimental/mcp.mdx rename to docs/development/experimental/mcp.mdx diff --git a/docs/images/architecture-src.excalidraw b/docs/development/images/architecture-src.excalidraw similarity index 100% rename from docs/images/architecture-src.excalidraw rename to docs/development/images/architecture-src.excalidraw diff --git a/docs/images/bee-banner.jpg b/docs/development/images/bee-banner.jpg similarity index 100% rename from docs/images/bee-banner.jpg rename to docs/development/images/bee-banner.jpg diff --git a/docs/images/ui-example2.png b/docs/development/images/ui-example2.png similarity index 100% rename from docs/images/ui-example2.png rename to docs/development/images/ui-example2.png diff --git a/docs/introduction/quickstart.mdx b/docs/development/introduction/quickstart.mdx similarity index 100% rename from docs/introduction/quickstart.mdx rename to docs/development/introduction/quickstart.mdx diff --git a/docs/introduction/welcome.mdx b/docs/development/introduction/welcome.mdx similarity index 100% rename from docs/introduction/welcome.mdx rename to docs/development/introduction/welcome.mdx diff --git a/docs/logo/beeai-lockup-white.svg b/docs/development/logo/beeai-lockup-white.svg similarity index 100% rename from docs/logo/beeai-lockup-white.svg rename to docs/development/logo/beeai-lockup-white.svg diff --git a/docs/logo/beeai-lockup.svg b/docs/development/logo/beeai-lockup.svg similarity index 100% rename from docs/logo/beeai-lockup.svg rename to docs/development/logo/beeai-lockup.svg diff --git a/docs/logo/beeai_framework_dark.svg b/docs/development/logo/beeai_framework_dark.svg similarity index 100% rename from docs/logo/beeai_framework_dark.svg rename to docs/development/logo/beeai_framework_dark.svg diff --git a/docs/logo/beeai_framework_light.svg b/docs/development/logo/beeai_framework_light.svg similarity index 100% rename from docs/logo/beeai_framework_light.svg rename to docs/development/logo/beeai_framework_light.svg diff --git a/docs/reference/cli-reference.mdx b/docs/development/reference/cli-reference.mdx similarity index 100% rename from docs/reference/cli-reference.mdx rename to docs/development/reference/cli-reference.mdx diff --git a/docs/docs.json b/docs/docs.json index 8afba5383..5dd7ad9f0 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -9,64 +9,132 @@ }, "favicon": "/favicon.svg", "navigation": { - "groups": [ + "versions": [ { - "group": "Introduction", - "pages": ["introduction/welcome", "introduction/quickstart"] - }, - { - "group": "Deploy Agents", - "pages": [ - "deploy-agents/wrap-existing-agents", - "deploy-agents/building-agents", - "deploy-agents/deploy-your-agents" + "version": "stable", + "groups": [ + { + "group": "Introduction", + "pages": ["stable/introduction/welcome", "stable/introduction/quickstart"] + }, + { + "group": "Deploy Agents", + "pages": [ + "stable/deploy-agents/wrap-existing-agents", + "stable/deploy-agents/building-agents", + "stable/deploy-agents/deploy-your-agents" + ] + }, + { + "group": "Agent Capabilities", + "pages": [ + "stable/agent-development/overview", + "stable/agent-development/messages", + "stable/agent-development/multi-turn", + "stable/agent-development/files", + "stable/agent-development/agent-details", + "stable/agent-development/llm-proxy-service", + "stable/agent-development/trajectory", + "stable/agent-development/citations", + "stable/agent-development/forms", + "stable/agent-development/agent-settings", + "stable/agent-development/secrets", + "stable/agent-development/rag", + "stable/agent-development/mcp-oauth", + "stable/agent-development/mcp", + "stable/agent-development/env-variables", + "stable/agent-development/error", + "stable/agent-development/tool-calls" + ] + }, + { + "group": "Reference", + "pages": ["stable/reference/cli-reference"] + }, + { + "group": "Deploy Agent Stack", + "pages": [ + "stable/deploy-agent-stack/deployment-guide", + "stable/deploy-agent-stack/authenticate-cli-to-server", + "stable/deploy-agent-stack/observability" + ] + }, + { + "group": "Custom UI Integration", + "pages": ["stable/custom-ui/client-sdk", "stable/custom-ui/permissions-and-tokens"] + }, + { + "group": "Experimental", + "pages": ["stable/experimental/connectors", "stable/experimental/a2a-proxy"] + }, + { + "group": "Community", + "pages": ["stable/community/contribute"] + } ] }, { - "group": "Agent Capabilities", - "pages": [ - "agent-development/overview", - "agent-development/messages", - "agent-development/multi-turn", - "agent-development/files", - "agent-development/agent-details", - "agent-development/llm-proxy-service", - "agent-development/trajectory", - "agent-development/citations", - "agent-development/forms", - "agent-development/agent-settings", - "agent-development/secrets", - "agent-development/rag", - "agent-development/mcp-oauth", - "agent-development/mcp", - "agent-development/env-variables", - "agent-development/error", - "agent-development/tool-calls" - ] - }, - { - "group": "Reference", - "pages": ["reference/cli-reference"] - }, - { - "group": "Deploy Agent Stack", - "pages": [ - "deploy-agent-stack/deployment-guide", - "deploy-agent-stack/authenticate-cli-to-server", - "deploy-agent-stack/observability" + "version": "development", + "groups": [ + { + "group": "Introduction", + "pages": ["development/introduction/welcome", "development/introduction/quickstart"] + }, + { + "group": "Deploy Agents", + "pages": [ + "development/deploy-agents/wrap-existing-agents", + "development/deploy-agents/building-agents", + "development/deploy-agents/deploy-your-agents" + ] + }, + { + "group": "Agent Capabilities", + "pages": [ + "development/agent-development/overview", + "development/agent-development/messages", + "development/agent-development/multi-turn", + "development/agent-development/files", + "development/agent-development/agent-details", + "development/agent-development/llm-proxy-service", + "development/agent-development/trajectory", + "development/agent-development/citations", + "development/agent-development/forms", + "development/agent-development/agent-settings", + "development/agent-development/secrets", + "development/agent-development/rag", + "development/agent-development/mcp-oauth", + "development/agent-development/mcp", + "development/agent-development/env-variables", + "development/agent-development/error", + "development/agent-development/tool-calls" + ] + }, + { + "group": "Reference", + "pages": ["development/reference/cli-reference"] + }, + { + "group": "Deploy Agent Stack", + "pages": [ + "development/deploy-agent-stack/deployment-guide", + "development/deploy-agent-stack/authenticate-cli-to-server", + "development/deploy-agent-stack/observability" + ] + }, + { + "group": "Custom UI Integration", + "pages": ["development/custom-ui/client-sdk", "development/custom-ui/permissions-and-tokens"] + }, + { + "group": "Experimental", + "pages": ["development/experimental/connectors", "development/experimental/a2a-proxy"] + }, + { + "group": "Community", + "pages": ["development/community/contribute"] + } ] - }, - { - "group": "Custom UI Integration", - "pages": ["custom-ui/client-sdk", "custom-ui/permissions-and-tokens"] - }, - { - "group": "Experimental", - "pages": ["experimental/connectors", "experimental/a2a-proxy"] - }, - { - "group": "Community", - "pages": ["community/contribute"] } ] }, diff --git a/docs/stable/README.md b/docs/stable/README.md new file mode 100644 index 000000000..a0e771697 --- /dev/null +++ b/docs/stable/README.md @@ -0,0 +1,32 @@ +# Mintlify Starter Kit + +Click on `Use this template` to copy the Mintlify starter kit. The starter kit contains examples including + +- Guide pages +- Navigation +- Customizations +- API Reference pages +- Use of popular components + +### Development + +Install the [Mintlify CLI](https://www.npmjs.com/package/mintlify) to preview the documentation changes locally. To install, use the following command + +``` +npm i -g mintlify +``` + +Run the following command at the root of your documentation (where docs.json is) + +``` +mintlify dev +``` + +### Publishing Changes + +Install our Github App to auto propagate changes from your repo to your deployment. Changes will be deployed to production automatically after pushing to the default branch. Find the link to install on your dashboard. + +#### Troubleshooting + +- Mintlify dev isn't running - Run `mintlify install` it'll re-install dependencies. +- Page loads as a 404 - Make sure you are running in a folder with `docs.json` diff --git a/docs/stable/agent-development/agent-details.mdx b/docs/stable/agent-development/agent-details.mdx new file mode 100644 index 000000000..de9a909ed --- /dev/null +++ b/docs/stable/agent-development/agent-details.mdx @@ -0,0 +1,126 @@ +--- +title: "Customize Agent Details" +description: "Configure how your agent appears and behaves in the GUI" +--- + +When building your agent, you can configure certain attributes that affect how it appears and behaves in the user interface. The `@server.agent` decorator accepts a `detail` parameter with an `AgentDetail` object that controls the visual representation and behavior in the UI. + +You can customize various aspects of your agent's presentation, such as: +- The type of user interface the agent uses (chat, form, etc.) +- Custom user greetings +- Metadata about tools the agent provides +- Author and contributor information +- License, framework, and source code details + +## Basic Configuration + +Configuring agent details is straightforward. Import `AgentDetail` and related types, then pass them to the `detail` parameter in the `@server.agent` decorator: +```python +import os +from textwrap import dedent + +from a2a.types import AgentSkill, Message +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.extensions import AgentDetail, AgentDetailContributor, AgentDetailTool + +server = Server() + +@server.agent( + name="Example Research Assistant", + detail=AgentDetail( + interaction_mode="multi-turn", # or single-turn + user_greeting="Hi there! I can help you research topics or summarize uploaded documents.", + tools=[ + AgentDetailTool( + name="Web Search", + description="Looks up recent and relevant information from the web." + ), + AgentDetailTool( + name="Document Reader", + description="Reads and extracts key insights from uploaded PDFs or text files." + ), + ], + framework="BeeAI Framework", + author=AgentDetailContributor( + name="Agent Stack Team", + email="team@example.com", + ), + source_code_url="https://github.com/example/example-research-assistant", + ), + skills=[ + AgentSkill( + id="research", + name="Research", + description=dedent( + """\ + Finds up-to-date information on a given topic, synthesizes key points, + and summarizes findings in clear, useful responses. + """ + ), + tags=["Search", "Knowledge"], + examples=[ + "Find recent news about AI ethics in 2025.", + "What are the main challenges in renewable energy adoption?", + "Give me an overview of current space exploration missions.", + ], + ), + AgentSkill( + id="summarization", + name="Summarization", + description=dedent( + """\ + Reads and summarizes uploaded text or documents, highlighting the + most important ideas, statistics, and conclusions. + """ + ), + tags=["Documents", "Summaries"], + examples=[ + "Summarize this PDF report about electric vehicle trends.", + "What are the main points from this research article?", + "Condense this document into a short summary I can share.", + ], + ), + ], +) +async def example_agent(input: Message, context: RunContext): + """An example agent with detailed configuration""" + yield "Hello World!" + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + + +if __name__ == "__main__": + run() +``` + +## Key Fields + +### UI Configuration +- **`interaction_mode`**: Set to `"multi-turn"` for conversational agents or `"single-turn"` for one-shot agents +- **`user_greeting`**: Welcome message displayed when users first interact with your agent + +### Metadata +- **`framework`**: The framework used to build your agent (e.g., "BeeAI", "LangGraph", "CrewAI") +- **`programming_language`**: Primary language (e.g., "Python", "JavaScript") +- **`license`**: Software license (e.g., "Apache 2.0", "MIT") + +### Tools +- **`tools`**: List of `AgentDetailTool` objects describing your agent's capabilities + - Each tool has a `name` and `description` to help users understand each tool's purpose + +### Skills +- **`skills`**: List of `AgentSkill` objects that represent distinct, user-facing abilities your agent can perform + - Each skill includes an id, name, description, tags, and examples + - Examples are displayed in the user interface as starter questions — they help users understand what kinds of requests your agent can handle and encourage exploration + - Skills define what your agent can do — for example, “Research” for finding and summarizing information, or “Summarization” for condensing long documents + +### Links and Attribution +- **`source_code_url`**: Link to the source code repository +- **`author`**: Primary author information (name, email, url) +- **`contributors`**: List of additional contributors + + +The Agent Details extension helps users understand your agent's capabilities before they start using it. Provide clear descriptions and accurate tool information to improve the user experience. + \ No newline at end of file diff --git a/docs/stable/agent-development/agent-settings.mdx b/docs/stable/agent-development/agent-settings.mdx new file mode 100644 index 000000000..98aa2fe4f --- /dev/null +++ b/docs/stable/agent-development/agent-settings.mdx @@ -0,0 +1,184 @@ +--- +title: "Configure Agent Settings" +description: "Allow users to configure agent behavior with interactive settings" +--- + +Sometimes you need to give users control over how your agent behaves during a conversation. For example, users might want to enable or disable thinking mode, choose between different response styles, or configure other agent-specific parameters. + +The Agent Stack platform provides a Settings extension that creates an interactive UI component where users can configure these options before or during their interaction with your agent. + + +Settings extensions are a type of [Service Extension](/sdk/overview#dependency-injection-service-extensions) that allows you to easily "inject dependencies" into your agent. This follows the inversion of control principle where your agent defines what it needs, and the platform provides those dependencies. + + +## Quickstart + + + +Import the necessary components from the Agent Stack SDK settings extension. + + + +Inject the Settings extension into your agent function using the `Annotated` type hint. + + + +Create a `SettingsRender` object with the fields you want users to configure. + + + +Use `parse_settings_response()` to access the user's configuration choices. + + + +## Basic Settings Example + +Here's how to add settings capabilities to your agent: + +```python +import os +from collections.abc import AsyncGenerator +from typing import Annotated + +from a2a.types import Message + +from agentstack_sdk.a2a.extensions.ui.settings import ( + CheckboxField, + CheckboxGroupField, + SettingsExtensionServer, + SettingsExtensionSpec, + SettingsRender, +) +from agentstack_sdk.a2a.types import RunYield +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext + +server = Server() + +@server.agent() +async def settings_agent( + message: Message, + context: RunContext, + settings: Annotated[ + SettingsExtensionServer, + SettingsExtensionSpec( + params=SettingsRender( + fields=[ + CheckboxGroupField( + id="thinking_group", + fields=[ + CheckboxField( + id="thinking", + label="Enable Thinking Mode", + default_value=True, + ) + ], + ) + ], + ), + ), + ], +) -> AsyncGenerator[RunYield, Message]: + """Agent that demonstrates settings configuration""" + + if not settings: + yield "Settings extension hasn't been activated, no settings are available" + return + + parsed_settings = settings.parse_settings_response() + + thinking_group = parsed_settings.values["thinking_group"] + if thinking_group.type == "checkbox_group": + if thinking_group.values["thinking"].value: + yield "Thinking mode is enabled - I'll show my reasoning process.\n" + else: + yield "Thinking mode is disabled - I'll provide direct responses.\n" + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + +if __name__ == "__main__": + run() +``` + +## How to work with settings + +Here's what you need to know to add settings capabilities to your agent: + +**Import the settings extension**: Import `SettingsExtensionServer`, `SettingsExtensionSpec`, `SettingsRender`, and field types from `agentstack_sdk.a2a.extensions.ui.settings`. + +**Inject the extension**: Add a settings parameter to your agent function using the `Annotated` type hint with `SettingsExtensionServer` and `SettingsExtensionSpec`. + +**Define your settings structure**: Create a `SettingsRender` object with the fields you want users to configure. + +**Check if the extension exists**: Always verify that the settings extension is provided before using it, as service extensions are optional. + +**Parse settings data**: Use `settings.parse_settings_response()` to access the user's configuration choices. + +**Access field values**: Use `parsed_settings.values['field_id']` to access the submitted values from your settings fields. + + +Settings are presented to users in a clean, organized interface that makes it easy to configure your agent's behavior. The platform automatically handles the UI rendering and data collection. + + + +Always check if the settings extension is available before using it to comply with plain A2A clients. + + +## Settings Field Types + +The Agent Stack supports various field types for collecting different kinds of configuration data: + +### CheckboxField +Single checkbox fields for boolean configuration options. + +```python +from agentstack_sdk.a2a.extensions.ui.settings import CheckboxField + +CheckboxField( + id="debug_mode", + label="Enable Debug Mode", + default_value=False, +) +``` + +### CheckboxGroupField +Groups multiple checkboxes together for related boolean options. + +```python +from agentstack_sdk.a2a.extensions.ui.settings import CheckboxField, CheckboxGroupField + +CheckboxGroupField( + id="features", + fields=[ + CheckboxField( + id="thinking", + label="Show Thinking Process", + default_value=True, + ), + CheckboxField( + id="citations", + label="Include Citations", + default_value=False, + ), + ], +) +``` + +### SingleSelectField +Dropdown fields for choosing a single option from a list. + +```python +from agentstack_sdk.a2a.extensions.ui.settings import OptionItem, SingleSelectField + +SingleSelectField( + id="response_style", + label="Response Style", + options=[ + OptionItem(value="formal", label="Formal"), + OptionItem(value="casual", label="Casual"), + OptionItem(value="technical", label="Technical"), + ], + default_value="casual", +) +``` \ No newline at end of file diff --git a/docs/stable/agent-development/citations.mdx b/docs/stable/agent-development/citations.mdx new file mode 100644 index 000000000..0f33a0347 --- /dev/null +++ b/docs/stable/agent-development/citations.mdx @@ -0,0 +1,74 @@ +--- +title: Add Citations to Agent Responses +description: Add source references to agent responses with clickable links +--- + +## Basic Usage + +```python +from typing import Annotated +from agentstack_sdk.a2a.extensions import ( + Citation, + CitationExtensionServer, + CitationExtensionSpec, +) +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from a2a.types import Message + +server = Server() + +@server.agent() +async def research_agent( + input: Message, + context: RunContext, + citation: Annotated[CitationExtensionServer, CitationExtensionSpec()] +): + response_text = "Python is the most popular programming language for AI development." + + citations = [ + Citation( + url="https://survey.stackoverflow.com/2023", + title="Stack Overflow Developer Survey 2023", + description="Annual survey of developer preferences and trends", + start_index=0, + end_index=47 # "Python is the most popular programming language" + ) + ] + + yield citation.message(text=response_text, citations=citations) +``` + +## Citation Format + +Each citation requires: +- `url`: Source link +- `title`: Display title +- `description`: Brief explanation +- `start_index`: Start position in text +- `end_index`: End position in text + +## Multiple Citations + +```python +response_text = "Python leads AI development while JavaScript dominates web development." + +citations = [ + { + "url": "https://ai-survey.com", + "title": "AI Language Survey", + "description": "Programming language usage in AI", + "start_index": 0, + "end_index": 31 # "Python leads AI development" + }, + { + "url": "https://web-stats.com", + "title": "Web Development Report", + "description": "Web programming language statistics", + "start_index": 38, + "end_index": 67 # "JavaScript dominates web development" + } +] +``` + +Citations appear as highlighted text with hover tooltips and clickable source links in the UI. diff --git a/docs/stable/agent-development/env-variables.mdx b/docs/stable/agent-development/env-variables.mdx new file mode 100644 index 000000000..f23ac4bdc --- /dev/null +++ b/docs/stable/agent-development/env-variables.mdx @@ -0,0 +1,123 @@ +--- +title: "Environment Variables" +description: "Declare which environment variables the agent requires so they can be provided by the Agent Stack runtime." +--- + +Sometimes you need to configure your agent with environment variables that should be provided by the runtime. + +For example, you might want to globally enable or disable thinking mode for all users of your agent, or configure API endpoints, feature flags, or other environment settings. + +Normally, you would set these environment variables directly in your deployment environment. However, when your agent is deployed to Agent Stack via the `agentstack add` command, the platform manages the runtime environment and you can't directly control it. + +The Agent Stack platform allows you to declare environment variable requirements through `AgentDetail`. The platform will then provide these variables to your agent at runtime, and you can access them using standard `os.getenv()` calls. + + +Environment variables declared in `AgentDetail` are automatically provided by the Agent Stack platform when your agent is deployed via `agentstack add`. When running your agent locally or with auto-registration via SDK, you're responsible for providing these variables yourself. + + +## Quickstart + + + +Import the necessary components from the Agent Stack SDK. + + + +Add an `EnvVar` list to the `variables` field in your `AgentDetail` configuration. + + + +Use `agentstack add` command to [deploy your agent in Agent Stack](/guides/deploy-your-agents) + + + +Configure environment variables for your agent using `agentstack env add "Name or ID of your Agent" KEY=VALUE`. + + + + +Use `os.getenv()` to access the environment variables in your agent code. + + + +## Basic Environment Variables Example + +Here's how to request environment variables for your agent: + +```python +import os + +from a2a.types import Message +from agentstack_sdk.a2a.extensions.ui.agent_detail import EnvVar +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.extensions import AgentDetail + +server = Server() + + +@server.agent( + detail=AgentDetail( + interaction_mode="multi-turn", + variables=[ + EnvVar( + name="THINKING_ENABLED", + description="Whether to enable thinking mode for all users", + required=True + ) + ] + ) +) +async def env_var_agent(input: Message, context: RunContext): + """Agent that uses environment variables for configuration""" + thinking_enabled = os.getenv("THINKING_ENABLED", "false").lower() == "true" + + if thinking_enabled: + yield "Thinking mode is enabled. I'll show my reasoning process." + else: + yield "Thinking mode is disabled. I'll provide direct responses." + + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + + +if __name__ == "__main__": + run() +``` + +## Managing Environment Variables + +Once your agent is deployed to Agent Stack, you need to provide the environment variables it requires. You can manage these variables using the `agentstack env` CLI commands. + +### Adding Environment Variables + +To provide environment variables to your agent, use the `agentstack env add` command: + +```bash +agentstack env add "Name or ID of the agent" FIRST_VAR=VALUE SECOND_VAR=VALUE ... +``` + + +You can add multiple variables at once by including them all in a single command. + + +### Listing Environment Variables + +To view the environment variables configured for an agent: + +```bash +agentstack env list "Name or ID of the agent" +``` + +### Removing Environment Variables + +To remove an environment variable from an agent: + +```bash +agentstack env remove "Name or ID of the agent" VARIABLE_NAME +``` + +### Checking Missing Variables + +When a required variable hasn't been provided, you can see this in the agent list. Use `agentstack list` to view all agents - the `MISSING ENV` column shows which variables still need to be configured. diff --git a/docs/stable/agent-development/error.mdx b/docs/stable/agent-development/error.mdx new file mode 100644 index 000000000..c7e7f9587 --- /dev/null +++ b/docs/stable/agent-development/error.mdx @@ -0,0 +1,110 @@ +--- +title: "Handle Errors" +description: "Learn how your agent can report formatted errors to the UI." +--- + +The Error extension provides a standardized way to report errors from your agent to the UI. + +The Agent Stack SDK automatically handles exceptions raised within your agent function and uses the Error extension to report them. **No configuration is required** for standard error reporting. + +## Quickstart + +Simply raise exceptions in your agent code. The SDK will catch them and use the extension to format the error message. + +```python +from a2a.types import Message +from agentstack_sdk.server import Server + +server = Server() + + +@server.agent() +async def my_agent(input: Message): + # ... do some work ... + raise ValueError("Something went wrong!") +``` + +## Advanced Configuration + +If you want to customize the error reporting, for example to include stack traces, you can inject the extension with custom parameters. + + + +Import `ErrorExtensionServer`, `ErrorExtensionSpec`, and `ErrorExtensionParams`. + + + +Inject the extension using `Annotated` and set `include_stacktrace=True`. + + + +```python +import os +from typing import Annotated + +from a2a.types import Message + +from agentstack_sdk.a2a.extensions.ui.error import ( + ErrorExtensionParams, + ErrorExtensionServer, + ErrorExtensionSpec, +) +from agentstack_sdk.server import Server + +server = Server() + + +@server.agent() +async def error_agent( + input: Message, + # Configure to include stack traces + error_ext: Annotated[ + ErrorExtensionServer, + ErrorExtensionSpec(params=ErrorExtensionParams(include_stacktrace=True)), + ], +): + """Agent that demonstrates error handling with stack traces""" + yield "Working..." + + # This exception will be caught and formatted by the extension + # The stack trace will be included because of the configuration above + raise ValueError("Something went wrong!") +``` + + + Enable stack traces during development for easier debugging, but consider disabling them in production to avoid leaking implementation details. + + +## Handling Multiple Errors + +The Error extension supports `ExceptionGroup` to report multiple errors at once. When an `ExceptionGroup` is raised, it is formatted as a group of errors in the UI. + +```python +@server.agent() +async def group_agent(input: Message): + # ... + raise ExceptionGroup("Multiple failures", [ + ValueError("First error"), + TypeError("Second error") + ]) +``` + +## Adding Context + +You can attach arbitrary context to errors by accessing the injected `ErrorExtensionServer` instance. This context will be included in the error metadata sent to the client. + +```python +@server.agent() +async def context_agent( + input: Message, + error_ext: Annotated[ErrorExtensionServer, ErrorExtensionSpec()], +): + # Add context before an error might occur or in an except block + error_ext.context["request_id"] = "req-123" + error_ext.context["user_id"] = 42 + + # ... do work ... + + # If an exception is raised, the context is included + raise ValueError("Something went wrong!") +``` diff --git a/docs/stable/agent-development/files.mdx b/docs/stable/agent-development/files.mdx new file mode 100644 index 000000000..07cc5e5ea --- /dev/null +++ b/docs/stable/agent-development/files.mdx @@ -0,0 +1,137 @@ +--- +title: "Working with Files" +description: "Upload and generate files in agents" +--- + +One of the most common use cases for AI agents is working with files. Your agent should be able to read files from user uploads and generate new files as outputs. The Agent Stack makes this seamless through the A2A protocol's `FilePart`. + +## Quickstart + + + +Add the `default_input_modes` parameter to your agent decorator only if you want users to upload files to your agent. This specifies which file types users can upload. + + + +Import and use the `PlatformApiExtensionServer` to access file creation capabilities. This extension provides your agent with the proper context and authentication needed to use the Agent Stack API for creating and managing files. If not provided, your agent will receive unauthorized responses when working with files. + + + +Iterate through message parts to find `FilePart` objects and load their content using `load_file` helper. + + + +Use the `File.create()` method to generate new files and yield them as `FilePart` objects. + + + +## Example of File Processing + +Here's how to build an agent that can accept and modify files: + +```python +import os + +from typing import Annotated + +from agentstack_sdk.server import Server +from a2a.types import Message +from agentstack_sdk.a2a.extensions.services.platform import ( + PlatformApiExtensionServer, + PlatformApiExtensionSpec, +) +from agentstack_sdk.platform import File +from agentstack_sdk.util.file import load_file + + +server = Server() + + +@server.agent( + default_input_modes=["text/plain"], + default_output_modes=["text/plain"] +) +async def example_agent( + input: Message, + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +): + """Agent that can accept and modify files""" + + for file_part in input.parts: + file_part_root = file_part.root + + if file_part_root.kind == "file": + async with load_file(file_part_root) as loaded_content: + new_file = await File.create( + filename=f"processed_{file_part_root.file.name}", + content_type=file_part_root.file.mime_type or "application/octet-stream", + content=loaded_content.text.encode(), + ) + yield new_file.to_file_part() + + yield "File Processing Done" + + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + + +if __name__ == "__main__": + run() +``` + +## How to work with files + +Here's what you need to know to add file processing capabilities to your agent: + +**Enable file uploads**: Add `default_input_modes` to your agent decorator with a list of MIME types you want to accept (e.g., `["text/plain", "application/pdf", "image/jpeg"]`). + +**Enable producing of files**: Add `default_output_modes` to your agent decorator with a list of MIME types that your agent can potentially produce (e.g., `["text/plain", "application/pdf", "image/jpeg"]`). + +**Access the Platform API**: Import and use `PlatformApiExtensionServer` to get access to file manipulation capabilities. + +**Process message parts**: Iterate through `input.parts` to find FilePart objects that represent uploaded files. + +**Load file content**: Use `load_file()` with an async context manager to safely access file content. + +**Create new files**: Use `File.create()` to generate new files with custom names, content types, and content. + +**Yield file outputs**: The `File` object created by the SDK can be easily converted to a `FilePart` using the `to_file_part()` method and then yielded as agent outputs. + +## File Upload Configuration + +The `default_input_modes` parameter controls which file types users can upload: + +```python +@server.agent( + default_input_modes=[ + "text/plain", # Plain text files + "application/pdf", # PDF documents + "image/jpeg", # JPEG images + "image/png", # PNG images + "application/json", # JSON files + "text/csv" # CSV files + ] +) +``` + +The `default_output_modes` parameter controls which file agent can produce: + +```python +@server.agent( + default_output_modes=[ + "text/plain", # Plain text files + "application/pdf", # PDF documents + "image/jpeg", # JPEG images + "image/png", # PNG images + "application/json", # JSON files + "text/csv" # CSV files + ] +) +``` + +Common MIME types you might want to support: +- **Text files**: `text/plain`, `text/markdown`, `text/csv` +- **Documents**: `application/pdf`, `application/msword`, `application/vnd.openxmlformats-officedocument.wordprocessingml.document` +- **Images**: `image/jpeg`, `image/png`, `image/gif`, `image/svg+xml` +- **Data**: `application/json`, `application/xml`, `text/xml` diff --git a/docs/stable/agent-development/forms.mdx b/docs/stable/agent-development/forms.mdx new file mode 100644 index 000000000..2fa48a48d --- /dev/null +++ b/docs/stable/agent-development/forms.mdx @@ -0,0 +1,359 @@ +--- +title: "Collect Input with Forms" +description: "Collect structured input from users" +--- + +One of the most powerful features of the Agent Stack is the ability to request structured data from users through interactive forms. Instead of relying on free-form text input, your agent can present users with specific fields, dropdowns, and other form elements to gather precise information. + +The Agent Stack provides a Form extensions that allows you to collect structured data from users in two ways: + +1. **Initial form rendering** - Present a form as the first interaction before users start a conversation with your agent +2. **Dynamic form requests** - Request forms at any point during a multi-turn conversation when your agent needs specific structured input + +## Initial Form Rendering + +For initial form rendering, you specify the form structure when injecting the extension and then parse the response using a Pydantic model. The form is presented to users before they start a conversation with your agent. + +### Quickstart + + + +Import `FormServiceExtensionServer`, `FormServiceExtensionSpec`, `FormRender`, and field types from the Agent Stack SDK. + + + +Create a Pydantic model with fields matching your form field IDs. + + + +Inject the form extension into your agent function using `FormServiceExtensionSpec.demand(initial_form=FormRender(...))`. + + + +Call `form.parse_initial_form(model=YourModel)` to extract the submitted form data. + + + +```python +from typing import Annotated + +from a2a.types import Message +from pydantic import BaseModel +from agentstack_sdk.server import Server +from agentstack_sdk.a2a.extensions.common.form import FormRender, TextField +from agentstack_sdk.a2a.extensions.services.form import ( + FormServiceExtensionServer, + FormServiceExtensionSpec, +) + +server = Server() + + +class UserInfo(BaseModel): + first_name: str | None + last_name: str | None + + +@server.agent() +async def initial_form_agent( + _message: Message, + form: Annotated[ + FormServiceExtensionServer, + FormServiceExtensionSpec.demand( + initial_form=FormRender( + title="Welcome! Please tell us about yourself", + columns=2, + fields=[ + TextField(id="first_name", label="First Name", col_span=1), + TextField(id="last_name", label="Last Name", col_span=1), + ], + ) + ), + ], +): + """Agent that collects user information through an initial form""" + + # Parse the form data using a Pydantic model + user_info = form.parse_initial_form(model=UserInfo) + + if user_info is None: + yield "No form data received." + else: + yield f"Hello {user_info.first_name} {user_info.last_name}! Nice to meet you." + + +if __name__ == "__main__": + server.run() +``` + +## Dynamic Form Requests + +For dynamic form requests during conversation, you can request forms at any point when your agent needs structured input. This is useful when your agent needs to collect additional information based on the conversation flow. + +### Quickstart + + + +Import `FormRequestExtensionServer`, `FormRequestExtensionSpec`, `FormRender`, and field types from the Agent Stack SDK. + + + +Create a Pydantic model with fields matching your form field IDs. + + + +Inject the request form extension into your agent function using `FormRequestExtensionSpec()`. + + + +Call `await form_request.request_form(form=FormRender(...), model=YourModel)` when you need to collect structured input. + + + +```python +from typing import Annotated + +from a2a.types import Message +from a2a.utils.message import get_message_text +from pydantic import BaseModel +from agentstack_sdk.server import Server +from agentstack_sdk.a2a.extensions.common.form import FormRender, TextField +from agentstack_sdk.a2a.extensions.ui.form_request import ( + FormRequestExtensionServer, + FormRequestExtensionSpec, +) + +server = Server() + + +class ContactInfo(BaseModel): + email: str | None + phone: str | None + company: str | None + + +@server.agent() +async def dynamic_form_agent( + message: Message, + form_request: Annotated[ + FormRequestExtensionServer, + FormRequestExtensionSpec(), + ], +): + """Agent that requests forms dynamically during conversation""" + + user_input = get_message_text(message) + + # Check if user wants to provide contact information + if "contact" in user_input.lower() or "reach" in user_input.lower(): + # Request contact form dynamically + contact_info = await form_request.request_form( + form=FormRender( + title="Please provide your contact information", + columns=2, + fields=[ + TextField(id="email", label="Email Address", col_span=2), + TextField(id="phone", label="Phone Number", col_span=1), + TextField(id="company", label="Company", col_span=1), + ], + ), + model=ContactInfo, + ) + + if contact_info is None: + yield "No contact information received." + else: + yield f"Thank you! I'll contact you at {contact_info.email} or {contact_info.phone} regarding {contact_info.company}." + else: + yield "Hello! If you'd like me to contact you, just let me know and I'll ask for your details." + + +if __name__ == "__main__": + server.run() +``` + +## How to work with forms + +Here's what you need to know to add form capabilities to your agent: + +**Import the form components**: +- For form fields and `FormRender`, import from `agentstack_sdk.a2a.extensions.common.form` +- For initial forms, import `FormServiceExtensionServer` and `FormServiceExtensionSpec` from `agentstack_sdk.a2a.extensions.services.form` +- For dynamic forms, import `FormRequestExtensionServer` and `FormRequestExtensionSpec` from `agentstack_sdk.a2a.extensions.ui.form_request` + +**Inject the extension**: Add a form parameter to your agent function using the `Annotated` type hint. + +**For initial forms**: Use `FormServiceExtensionSpec.demand(initial_form=FormRender(...))` to specify the form structure and call `form.parse_initial_form(model=YourModel)` to extract data. + +**For dynamic forms**: Use `FormRequestExtensionSpec()` and call `await form_request.request_form(form=FormRender(...), model=YourModel)` when needed. + +**Access form data**: The recommended approach is to use a Pydantic model (or `TypedDict`, `dataclass`, or any class supported by `pydantic.TypeAdapter`) to load form data. Define a model with fields matching your form field IDs: + +```python +from pydantic import BaseModel + +class ContactInfo(BaseModel): + email: str | None + phone: str | None + company: str | None +``` + +Then, pass `model=ContactInfo` to `parse_initial_form(...)` or `request_form(...)` to get the form data directly as an instance of `ContactInfo`: + +```python +# For initial forms +contact_info: ContactInfo | None = form.parse_initial_form(model=ContactInfo) + +# For dynamic forms +contact_info: ContactInfo | None = await form_request.request_form( + form=FormRender(...), + model=ContactInfo +) +``` + +If you don't use a model, the methods return `FormResponse` which has a `values` dictionary. You can access values using `form_data.values['field_id'].value`. Different field types return different value types: + +- **TextField/DateField**: Returns `str | None` +- **FileField**: Returns `list[FileInfo] | None` where each `FileInfo` has `uri`, `name`, and `mime_type` +- **SingleSelectField**: Returns `str | None` (selected option ID) +- **MultiSelectField**: Returns `list[str] | None` (list of selected option IDs) +- **CheckboxField**: Returns `bool | None` + +## Form Field Types + +The Agent Stack supports various field types for collecting different kinds of structured data: + +### TextField +Basic text input fields for collecting strings, names, descriptions, etc. + +```python +from agentstack_sdk.a2a.extensions.common.form import TextField + +TextField( + id="username", + label="Username", + col_span=1, + required=True, + placeholder="Enter your username", + default_value="", + type="text" # Optional, defaults to "text" +) +``` + +### DateField +Date input fields for collecting dates and timestamps. + +```python +from agentstack_sdk.a2a.extensions.common.form import DateField + +DateField( + id="birth_date", + label="Birth Date", + col_span=1, + required=True, + placeholder="YYYY-MM-DD", + default_value="1990-01-01" +) +``` + +### FileField +File upload fields for collecting files from users. + +```python +from agentstack_sdk.a2a.extensions.common.form import FileField + +FileField( + id="document", + label="Upload Document", + col_span=2, + required=True, + accept=["application/pdf", "image/jpeg", "image/png"] +) +``` + +### SingleSelectField +Single-select dropdown fields for choosing single option from a list. + +```python +from agentstack_sdk.a2a.extensions.common.form import OptionItem, SingleSelectField + +SingleSelectField( + id="contact_method", + label="Preferred Contact Method", + col_span=2, + required=False, + options=[ + OptionItem(id="email", label="Email"), + OptionItem(id="phone", label="Phone"), + OptionItem(id="sms", label="SMS"), + OptionItem(id="none", label="Do not contact") + ], + default_value="email" +) +``` + +### MultiSelectField +Multi-select dropdown fields for choosing multiple options from a list. + +```python +from agentstack_sdk.a2a.extensions.common.form import OptionItem, MultiSelectField + +MultiSelectField( + id="interests", + label="Your Interests", + col_span=2, + required=False, + options=[ + OptionItem(id="tech", label="Technology"), + OptionItem(id="sports", label="Sports"), + OptionItem(id="music", label="Music"), + OptionItem(id="travel", label="Travel") + ], + default_value=["tech", "music"] +) +``` + +### CheckboxField +Single checkbox fields for boolean values. + +```python +from agentstack_sdk.a2a.extensions.common.form import CheckboxField + +CheckboxField( + id="newsletter", + label="Subscribe to Newsletter", + col_span=1, + required=False, + content="I agree to receive marketing emails", + default_value=False +) +``` + +## Form Layout Configuration + +Control how your form appears using the `FormRender` configuration: + +```python +from agentstack_sdk.a2a.extensions.common.form import FormRender + +FormRender( + title="Form Title", + description="Optional description text below the title", + columns=2, # Number of columns in the form grid + submit_label="Custom Submit Button Text", + fields=[ + # Your field definitions here + ] +) +``` + +**FormRender properties**: +- **`title`**: Main heading displayed above the form (optional) +- **`description`**: Optional description text displayed below the title +- **`columns`**: Number of columns in the form grid (1-4, optional) +- **`submit_label`**: Custom text for the submit button (optional, default: "Submit") +- **`fields`**: List of form field definitions (required) + + +Use `col_span` on individual fields to control how they span across the grid. For example, with `columns=2`, a field with `col_span=2` will take the full width, while `col_span=1` will take half the width. + diff --git a/docs/stable/agent-development/llm-proxy-service.mdx b/docs/stable/agent-development/llm-proxy-service.mdx new file mode 100644 index 000000000..cd2600397 --- /dev/null +++ b/docs/stable/agent-development/llm-proxy-service.mdx @@ -0,0 +1,131 @@ +--- +title: "LLM Proxy Service" +description: "Learn how to use the LLM proxy service extension within your agent" +--- + +When building AI agents, one of the first requirements you might have is to connect your agent to a Large Language Model (LLM). Fortunately, the Agent Stack helps with this by providing built-in OpenAI-compatible LLM inference. + +The platform's OpenAI endpoints are model and provider agnostic, serving as a proxy to whatever is configured. + +For you as an agent builder, the usage is extremely simple because we've wrapped the usage into a Service Extension. + + +Service Extensions are a type of [A2A Extension](https://a2a-protocol.org/latest/topics/extensions/) that allows you to easily "inject dependencies" into your agent. This follows the inversion of control principle where your agent defines what it needs, and the platform (in this case, Agent Stack) is responsible for providing those dependencies. + + + +Service extensions are optional by definition, so you should always check if they exist before using them. + + +## Quickstart + + + +Import the necessary components and add the LLM service extension to your agent function. + + + +Specify which model your agent prefer and how you want to access it. + + + +Access the optionally provided LLM configuration and use it with your preferred LLM client. + + + +## Example of LLM Access + +Here's how to add LLM inference capabilities to your agent: + +```python +import os +from typing import Annotated + +from a2a.types import Message +from a2a.utils.message import get_message_text +from agentstack_sdk.server import Server +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.a2a.extensions import LLMServiceExtensionServer, LLMServiceExtensionSpec + +server = Server() + +@server.agent() +async def example_agent( + input: Message, + llm: Annotated[ + LLMServiceExtensionServer, + LLMServiceExtensionSpec.single_demand(suggested=("ibm/granite-3-3-8b-instruct",)) + ], +): + """Agent that uses LLM inference to respond to user input""" + + if llm and llm.data and llm.data.llm_fulfillments: + # Extract the user's message + user_message = get_message_text(input) + + # Get LLM configuration + # Single demand is resolved to default (unless specified otherwise) + llm_config = llm.data.llm_fulfillments.get("default") + + if llm_config: + # Use the LLM configuration with your preferred client + # The platform provides OpenAI-compatible endpoints + api_model = llm_config.api_model + api_key = llm_config.api_key + api_base = llm_config.api_base + + yield AgentMessage(text=f"LLM access configured for model: {api_model}") + else: + yield AgentMessage(text="LLM configuration not found.") + else: + yield AgentMessage(text="LLM service not available.") + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + +if __name__ == "__main__": + run() +``` + +## How to request LLM access + +Here's what you need to know to add LLM inference capabilities to your agent: + +**Import the extension**: Import `LLMServiceExtensionServer` and `LLMServiceExtensionSpec` from `agentstack_sdk.a2a.extensions`. + +**Add the LLM parameter**: Add a third parameter to your agent function with the `Annotated` type hint for LLM access. + +**Specify your model requirements**: Use `LLMServiceExtensionSpec.single_demand()` to request a single model (multiple models will be supported in the future). + +**Suggest a preferred model**: Pass a tuple of suggested model names to help the platform choose the best available option. + +**Check if the extension exists**: Always verify that the LLM extension is provided before using it, as service extensions are optional. + +**Access LLM configuration**: Use `llm.data.llm_fulfillments.get("default")` to get the LLM configuration details. + +**Use with your LLM client**: The platform provides `api_model`, `api_key`, and `api_base` that work with OpenAI-compatible clients. + +## Understanding LLM Configuration + +The platform automatically provides you with: + +- **`api_model`**: The specific model identifier that was allocated to your request +- **`api_key`**: Authentication key for the LLM service +- **`api_base`**: The base URL for the OpenAI-compatible API endpoint + +These credentials work with any OpenAI-compatible client library, making it easy to integrate with popular frameworks like: +- BeeAI Framework +- LangChain +- LlamaIndex +- OpenAI Python client +- Custom implementations + +## Model Selection + +When you specify a suggested model like `"ibm/granite-3-3-8b-instruct"`, the platform will: + +1. Check if the requested model is available in your configured environment +2. Allocate the best available model that matches your requirements +3. Provide you with the exact model identifier and endpoint details + +The platform handles the complexity of model provisioning and endpoint management, so you can focus on building your agent logic. diff --git a/docs/stable/agent-development/mcp-oauth.mdx b/docs/stable/agent-development/mcp-oauth.mdx new file mode 100644 index 000000000..4c131bf15 --- /dev/null +++ b/docs/stable/agent-development/mcp-oauth.mdx @@ -0,0 +1,84 @@ +--- +title: "Authorize MCP with OAuth" +description: "Authorize the user to delegate their identity to the MCP server using OAuth" +--- + +Some MCP servers require OAuth authentication to access third-party services on behalf of users. The Agent Stack provides an OAuth extension that allows your agent to seamlessly handle OAuth flows through the user interface. + +The OAuth extension allows users to authorize your agent to access MCP servers that require authentication. The Agent Stack UI serves as a secure channel for users to perform the OAuth flow and grant access to third-party services through the MCP server. + +Once authorized, the OAuth extension automatically manages authentication headers, making it easy to use authenticated MCP clients in your agents. + +## Quickstart + + + +Make sure `mcp` is installed as a dependency in your project. + + + +Import the necessary components from the Agent Stack SDK OAuth extension. + + + +Inject the OAuth extension into your agent function using the `Annotated` type hint. + + + +Use `create_httpx_auth()` to create an authentication handler for your MCP HTTP client. + + + +## Basic OAuth Example + +Here's how to build an agent that uses OAuth to authenticate with an MCP server: + +```python +import os +from typing import Annotated + +import pydantic +from mcp import ClientSession +from mcp.client.streamable_http import streamablehttp_client + +from agentstack_sdk.a2a.extensions.auth.oauth import OAuthExtensionServer, OAuthExtensionSpec +from agentstack_sdk.server import Server + +server = Server() + + +@server.agent() +async def oauth_agent( + oauth: Annotated[OAuthExtensionServer, OAuthExtensionSpec.single_demand()], +): + """Agent that uses OAuth to authenticate with an MCP server""" + mcp_url = "https://mcp.stripe.com" + + async with streamablehttp_client( + url=mcp_url, + auth=await oauth.create_httpx_auth(resource_url=pydantic.AnyUrl(mcp_url)) if oauth else None + ) as (read, write, _): + async with ClientSession(read, write) as session: + await session.initialize() + result = await session.call_tool("get_stripe_account_info") + # Extract text content from CallToolResult + if result.content: + content = result.content[0] + if hasattr(content, 'text'): + yield str(content.text) # type: ignore + else: + yield "No content returned" + + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", "8000"))) + + +if __name__ == "__main__": + run() +``` + + + +Always check if the OAuth extension is available before using it to comply with plain A2A clients that may not support OAuth authentication. + \ No newline at end of file diff --git a/docs/stable/agent-development/mcp.mdx b/docs/stable/agent-development/mcp.mdx new file mode 100644 index 000000000..6dd363a8e --- /dev/null +++ b/docs/stable/agent-development/mcp.mdx @@ -0,0 +1,105 @@ +--- +title: "Dynamically select MCP Servers" +description: "Learn how to let the client dynamically select MCP servers on per-task basis" +--- + +In scenarios where it is necessary to dynamically select MCP servers for the agent to use, the Agent Stack provides an MCP extension that allows clients to specify MCP server details at runtime. + +For you as an agent builder, the usage is extremely simple because we’ve wrapped the usage into an MCP Extension. + + + Service Extensions are a type of [A2A + Extension](https://a2a-protocol.org/latest/topics/extensions/) that allows you + to easily "inject dependencies" into your agent. This follows the inversion of + control principle where your agent defines what it needs, and the client + (possibly in cooperation with the platform) is responsible for providing those + dependencies. + + + + Service extensions are optional by definition, so you should always check if + they exist before using them. + + +## Quickstart + + + +Import the necessary components and add the MCP service extension to your agent function. + + + + Specify which MCP servers your agent prefers. + + + +Access the optionally provided MCP extension and use it to create MCP client(s). + + + +## Example of MCP extension + +Here's how to add MCP extension to your agent: + +```python +import os +from typing import Annotated + +from a2a.types import Message +from mcp import ClientSession +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.a2a.extensions import MCPServiceExtensionServer, MCPServiceExtensionSpec + +server = Server() + +@server.agent() +async def example_agent( + input: Message, + context: RunContext, + mcp: Annotated[ + MCPServiceExtensionServer, + MCPServiceExtensionSpec.single_demand(), + ], +): + """Agent that uses MCP client to list tools""" + + if not mcp: + yield "No tools available" + + async with mcp.create_client() as (read, write), ClientSession(read, write) as session: + await session.initialize() + + tools = await session.list_tools() + + yield "Available tools: \n" + yield "\n".join([t.name for t in tools.tools]) + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + +if __name__ == "__main__": + run() +``` + +## Usage with OAuth extension + +In the usual scenario where the specified MCP server requires OAuth authentication, the MCP extension automatically leverages the OAuth extension if activated. Technically, it does exactly what is described in [Authorize MCP with OAuth](/extensions/mcp-oauth). However, the agent still needs to declare the extension: + +```python +@server.agent() +async def example_agent( + input: Message, + context: RunContext, + mcp: Annotated[ + MCPServiceExtensionServer, + MCPServiceExtensionSpec.single_demand(), + ], + _: Annotated[OAuthExtensionServer, OAuthExtensionSpec.single_demand()], +): +``` + +## Usage with Connectors + +To further improve user experience, one can use [Connectors](/guides/connectors) and have the Agent Stack platform automatically manage access tokens, hence effectively skipping the aforementioned OAuth flow during agent execution. This is achieved by specifying a connector instead of the actual MCP server that backs the connector. diff --git a/docs/stable/agent-development/messages.mdx b/docs/stable/agent-development/messages.mdx new file mode 100644 index 000000000..a11407d6e --- /dev/null +++ b/docs/stable/agent-development/messages.mdx @@ -0,0 +1,106 @@ +--- +title: "Working with Messages" +description: "Learn how messages — the foundation of agent communication — are structured, sent, and received" +--- + +In the Hello World example, you used `yield` with `AgentMessage` to send textual data to the agent consumer. This concept has two important parts: + +1. **Yielding data**: You can yield data from your agent implementation, which gets sent to the client +2. **AgentMessage wrapper**: `AgentMessage` is a convenience wrapper around [A2A](https://github.com/a2aproject/A2A) `Message` that simplifies common use cases. Think of responding with text to the client. + + +## Agent Stack SDK Message Types + +The Agent Stack SDK simplifies development by allowing you to yield different types of data. You can use convenient SDK constructs or direct A2A components. + +### Convenience Wrappers + +#### AgentMessage +The most common way to respond with text. It's a convenience wrapper around A2A `Message` that makes it easy to create responses: + +```python +yield AgentMessage(text="This is my text", metadata={"foo": "bar"}) +``` + +#### Plain strings +Simple strings are automatically converted to textual A2A `Message` objects: + +```python +yield "Hello, world!" +``` + +#### Plain dict +Dictionaries sends `Message` containing `DataPart` + +```python +yield {"status": "processing", "progress": 50} +``` + +#### AgentArtifact + +Same as `AgentMessage` but simplifies work with Artifacts. + +### Direct A2A Components + +For more advanced use cases, you can yield direct A2A protocol components. + + +While it's perfectly fine to yield plain A2A Components, the Agent Stack forms opinions on communication to support great UX in the GUI. For the best user experience, we recommend using the convenience wrappers when possible. + + + +Feel free to check the A2A [Key Concepts](https://a2a-protocol.org/latest/topics/key-concepts/#fundamental-communication-elements) page to understand all the structures thoroughly. + + +#### Message +The basic communication unit in the A2A protocol, representing a single turn in a conversation. + +```python +import uuid +from a2a.types import Message, TextPart, Part, Role + +# @server.agent(...) +async def example_agent(): + yield Message(role=Role.agent, message_id=str(uuid.uuid4()), parts=[Part(root=TextPart(text="Hello from the agent!"))]) +``` + +#### Part +The fundamental unit of content. For example, a `TextPart` contains text data. A `Message` consists of multiple `Part` objects. Can be any of `TextPart`, `FilePart`, or `DataPart`. + +```python +from a2a.types import TextPart + +# @server.agent(...) +async def example_agent(): + yield TextPart(text="Hello from the agent!") +``` + +#### Artifact +Tangible outputs produced by the agent, such as documents, files, or other generated content. + +```python +import uuid +from a2a.types import Artifact, FilePart, FileWithUri, Part + +# @server.agent(...) +async def example_agent(): + yield Artifact( + artifact_id=str(uuid.uuid4()), + parts=[Part(root=FilePart(file=FileWithUri(uri="https://www.ibm.com/us-en", mime_type="text/html", name="IBM Website")))], + ) +``` + +#### TaskStatus +A stateful unit of work that can annotate a transaction spanning multiple messages. The task state can change over time as the agent progresses. + +```python +import uuid +from a2a.types import TaskStatus, TextPart, Message, TaskState, Part, Role + +# @server.agent(...) +async def example_agent(): + yield TaskStatus( + message=Message(message_id=str(uuid.uuid4()), role=Role.agent, parts=[Part(root=TextPart(text="Please provide some input."))]), + state=TaskState.input_required, + ) +``` \ No newline at end of file diff --git a/docs/stable/agent-development/multi-turn.mdx b/docs/stable/agent-development/multi-turn.mdx new file mode 100644 index 000000000..368038f0b --- /dev/null +++ b/docs/stable/agent-development/multi-turn.mdx @@ -0,0 +1,278 @@ +--- +title: "Multi-Turn Conversations" +description: "Learn how to manage conversation state and history to build context-aware agents in Agent Stack" +--- + +When building conversational AI agents, one of the key requirements is maintaining context across multiple interactions. While agent functions are essentially stateless by design, the Agent Stack provides built-in mechanisms to access and manage conversation history. + +You need to explicitly store messages in the context memory using `context.store()` to make them available for retrieval in future interactions. + + +The stateless design of agent functions ensures reliability and scalability, while the context memory system provides the conversation continuity that users expect from AI assistants. + + +## Quickstart + + + +Use `await context.store(input)` to store the current user message in the conversation history. + + + +Use the `RunContext` parameter to access the conversation store and load previous messages. + + + +Retrieve and filter the conversation history to get the messages relevant to your agent's logic. + + + +Use `await context.store(response)` to store your agent's responses for future conversation context. + + + +Set up persistent context storage to maintain conversation history across agent restarts. + + + +## Basic History Access + +Here's how to access conversation history in your agent: + +```python +import os + +from a2a.types import Message +from a2a.utils.message import get_message_text + +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext + +server = Server() + + +@server.agent() +async def example_agent(input: Message, context: RunContext): + """Agent that demonstrates conversation history access""" + + # Store the current message in the context store + await context.store(input) + + # Get the current user message + current_message = get_message_text(input) + print(f"Current message: {current_message}") + + # Load all messages from conversation history (including current message) + history = [message async for message in context.load_history() if isinstance(message, Message) and message.parts] + + # Process the conversation history + print(f"Found {len(history)} messages in conversation (including current)") + + # Your agent logic here - you can now reference all messages in the conversation + message = AgentMessage(text=f"Hello! I can see we have {len(history)} messages in our conversation.") + yield message + + # Store the message in the context store + await context.store(message) + + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + +if __name__ == "__main__": + run() +``` + +## Understanding Conversation History + +The `context.load_history()` method returns an async iterator containing all items in the conversation, including the current message. This can include: + +- **A2A Messages**: Both user and assistant messages from the conversation (including the current message) +- **Artifacts**: Any files, documents, or other artifacts shared during the conversation + +For multi-turn conversations, you'll primarily work with A2A messages, which include: +- **User messages**: Messages sent by the user +- **Assistant messages**: Previous responses from your agent + + +The history includes the current message, so if you want only previous messages, you may need to filter out the last message or use the current message separately. + + + +The history iterator returns all message types. Always filter messages using `isinstance(message, Message)` to ensure you're working with the correct message format. + + +## Message Storage Guidelines + +Since messages are not automatically stored, you need to explicitly call `context.store()` for any message you want to be available in future interactions. Here are the key guidelines: + +### What to Store + +- **User messages**: Always store incoming user messages to maintain conversation context +- **Agent responses**: Store your agent's responses so they're available for future reference +- **Important artifacts**: Store any files, documents, or other artifacts that should persist + +### When to Store + +```python +@server.agent() +async def my_agent(input: Message, context: RunContext): + # Store the incoming user message immediately + await context.store(input) + + # Process the message and generate response + response = AgentMessage(text="Your response here") + yield response + + # Store the agent's response after yielding + await context.store(response) +``` + +### Storage Best Practices + +- **Store early**: Store user messages at the beginning of your agent function +- **Store after yielding**: Store agent responses after yielding them to the user +- **Be selective**: Only store messages that are relevant for future conversation context +- **Handle errors**: Consider what happens if storage fails - your agent should still function + + +If you don't store messages, they won't be available in `context.load_history()` for future interactions. This means your agent will lose conversation context. + + +## Persistent Storage + +By default, conversation history is stored in memory, which means it's lost when the agent process restarts. For production applications, you'll want to use persistent storage. + +### Using Platform Context Store + +To maintain conversation history across agent restarts, configure your server to use the platform's persistent context store: + +```python +import os +from agentstack_sdk.server import Server +from agentstack_sdk.server.store.platform_context_store import PlatformContextStore + +server = Server() + +def run(): + server.run( + host=os.getenv("HOST", "127.0.0.1"), + port=int(os.getenv("PORT", 8000)), + context_store=PlatformContextStore() + ) +``` + + +The `PlatformContextStore` automatically handles conversation persistence, ensuring that users can continue their conversations even after agent restarts or deployments. + + +## Advanced History Usage with BeeAI Framework + +Here's a sophisticated example using the BeeAI Framework to build a multi-turn chat agent that leverages conversation history and LLM capabilities: + +```python +import os +from typing import Annotated + +from a2a.types import Message, Role +from a2a.utils.message import get_message_text +from beeai_framework.adapters.agentstack.backend.chat import AgentStackChatModel +from beeai_framework.agents.requirement import RequirementAgent +from beeai_framework.agents.requirement.requirements.conditional import ConditionalRequirement +from beeai_framework.backend import AssistantMessage, UserMessage +from beeai_framework.backend.types import ChatModelParameters +from beeai_framework.tools.think import ThinkTool + +from agentstack_sdk.a2a.extensions import ( + LLMServiceExtensionServer, + LLMServiceExtensionSpec, +) +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.server.store.platform_context_store import PlatformContextStore + +server = Server() + +FrameworkMessage = UserMessage | AssistantMessage + + +def to_framework_message(message: Message) -> FrameworkMessage: + """Convert A2A Message to BeeAI Framework Message format""" + message_text = "".join(part.root.text for part in message.parts if part.root.kind == "text") + + if message.role == Role.agent: + return AssistantMessage(message_text) + elif message.role == Role.user: + return UserMessage(message_text) + else: + raise ValueError(f"Invalid message role: {message.role}") + + +@server.agent() +async def multi_turn_chat_agent( + input: Message, + context: RunContext, + llm: Annotated[LLMServiceExtensionServer, LLMServiceExtensionSpec.single_demand()], +): + """Multi-turn chat agent with conversation memory and LLM integration""" + await context.store(input) + + # Load conversation history + history = [message async for message in context.load_history() if isinstance(message, Message) and message.parts] + + # Initialize BeeAI Framework LLM client + llm_client = AgentStackChatModel() + llm_client.set_context(llm) + + # Create a RequirementAgent with conversation memory + agent = RequirementAgent( + name="Agent", + llm=llm_client, + role="helpful assistant", + instructions="You are a helpful assistant that is supposed to remember users name. Ask them for their name and remember it.", + tools=[ThinkTool()], + requirements=[ConditionalRequirement(ThinkTool, force_at_step=1)], + save_intermediate_steps=False, + middlewares=[], + ) + + # Load conversation history into agent memory + await agent.memory.add_many(to_framework_message(item) for item in history) + + # Process the current message and generate response + async for event, meta in agent.run(get_message_text(input)): + if meta.name == "success" and event.state.steps: + step = event.state.steps[-1] + if not step.tool: + continue + + tool_name = step.tool.name + + if tool_name == "final_answer": + response = AgentMessage(text=step.input["response"]) + + yield response + await context.store(response) + + +def run(): + server.run( + host=os.getenv("HOST", "127.0.0.1"), + port=int(os.getenv("PORT", "8000")), + context_store=PlatformContextStore(), # Enable persistent storage + ) + + +if __name__ == "__main__": + run() +``` + +This advanced example demonstrates several key concepts: + +- **LLM Integration**: Uses the platform's LLM service extension to get model access +- **Framework Integration**: Leverages the BeeAI Framework for sophisticated agent capabilities +- **Memory Management**: Converts conversation history to framework format and loads it into agent memory +- **Tool Usage**: Includes thinking tools and conditional requirements for better reasoning +- **Persistent Storage**: Uses `PlatformContextStore` for conversation persistence diff --git a/docs/stable/agent-development/overview.mdx b/docs/stable/agent-development/overview.mdx new file mode 100644 index 000000000..9882ea8b3 --- /dev/null +++ b/docs/stable/agent-development/overview.mdx @@ -0,0 +1,161 @@ +--- +title: "Agent Development SDK (Python)" +description: "Enhance your existing AI agents with platform capabilities" +--- + +The Agent Stack SDK is a Python library that enhances your existing AI agents with platform capabilities. Whether you've built your agent with LangGraph, CrewAI, or custom logic, the SDK connects it to the Agent Stack platform, giving you instant access to runtime-configurable services, interactive UI components, and deployment infrastructure. + +Built on top of the [Agent2Agent Protocol (A2A)](https://a2a-protocol.org/), the SDK wraps your agent implementation and adds powerful functionality through [A2A extensions](https://a2a-protocol.org/latest/topics/extensions/). + +This enables your agent to leverage platform services like [LLM providers](/extensions/llm-proxy-service), [file storage](/guides/files), [vector databases](/extensions/rag), and rich UI components. That's all without rewriting your core agent logic. + +## What the SDK Provides + +The Agent Stack SDK offers several key capabilities: + +- **Server wrapper**: Simplified server creation and agent registration +- **Extension system**: Dependency injection for services (LLM, embeddings, file storage) and UI components (forms, citations, trajectory) +- **Convenience wrappers**: Simplified message types like `AgentMessage` that reduce boilerplate +- **Context management**: Built-in conversation history and state management +- **Async generator pattern**: Natural task-based execution with pause/resume capabilities + +## Core Concepts + +### Server and Agent Registration + +The SDK uses a server-based architecture where you create a `Server` instance and register your agent function: + +```python +from a2a.types import Message +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.types import AgentMessage + +server = Server() + +@server.agent() +async def my_agent(input: Message, context: RunContext): + """Your agent implementation""" + yield AgentMessage(text="Hello from my agent!") +``` + +### Asynchronous Generator Pattern + +Agent functions are asynchronous generators that yield responses. This pattern aligns perfectly with A2A's task model: + +- **One function execution** = **One A2A task** +- **Yielding data** = **Sending messages to the client** +- **Pausing execution** = **Waiting for user input** + +The generator pattern is particularly powerful when your agent needs to request structured input from users. + +When you await a form request, execution pauses the task, allowing the user to fill out the form. Once submitted, execution resumes with the form data: + +```python +from typing import Annotated + +from a2a.types import Message +from agentstack_sdk.a2a.extensions.common.form import FormRender, TextField +from agentstack_sdk.a2a.extensions.ui.form_request import ( + FormRequestExtensionServer, + FormRequestExtensionSpec, +) +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext + +server = Server() + +@server.agent() +async def form_agent( + input: Message, + context: RunContext, + form_request: Annotated[FormRequestExtensionServer, FormRequestExtensionSpec()] +): + """Agent that pauses execution to request user input""" + yield AgentMessage(text="I need some information from you.") + + # Execution pauses here - task enters input_required state + # User fills out the form in the UI + form_data = await form_request.request_form( + form=FormRender( + title="Please provide your details", + fields=[ + TextField(id="name", label="Your Name"), + TextField(id="email", label="Email Address"), + ], + ) + ) + + # Execution resumes after user submits the form + if form_data and form_data.values: + name = form_data.values["name"].value + email = form_data.values["email"].value + yield AgentMessage(text=f"Thank you, {name}! I'll contact you at {email}.") + else: + yield AgentMessage(text="Form was not filled out.") +``` + +The whole complexity of Task management is handled via Agent Stack SDK. + +The generator pattern also enables agents to: +- Stream responses incrementally +- Yield multiple messages during a single task +- Handle long-running operations gracefully + + + +### Extension System + +Agent Stack is utilizing A2A extensions to extend the protocol with Agent Stack-specific capabilities. They enable your agent to access platform services and enhance the user interface beyond what the base A2A protocol provides. + +There are two types of extensions: + +#### Dependency Injection Service Extensions + +Service extensions use a dependency injection pattern where each run of the agent declares a demand that must be fulfilled by the client (consumer). The platform provides configured access to external services based on these demands: + +- **LLM Service**: Language model access with automatic provider selection +- **Embedding Service**: Text embedding generation for RAG +- **Platform API**: File storage, vector databases, and platform services +- **MCP**: Model Context Protocol integration + +```python +from typing import Annotated +from a2a.types import Message +from agentstack_sdk.a2a.extensions import ( + LLMServiceExtensionServer, + LLMServiceExtensionSpec, +) +from agentstack_sdk.a2a.types import AgentMessage +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext + +server = Server() + +@server.agent() +async def llm_agent( + input: Message, + context: RunContext, + llm: Annotated[LLMServiceExtensionServer, LLMServiceExtensionSpec.single_demand()] +): + # The demand is fulfilled by the client - llm is provided if available + if llm: + # response = await llm.chat(messages=[...]) + # ... + pass + else: + yield AgentMessage(text="LLM service not available") +``` + +#### UI Extensions + +UI extensions add extra metadata to messages, enabling the Agent Stack UI to render more advanced interactive components: + +- **Forms**: Collect structured user input through interactive forms +- **Citations**: Display source references with clickable inline links +- **Trajectory**: Visualize agent reasoning steps with execution traces + +These extensions enhance messages with metadata that the UI interprets to create rich, interactive experiences beyond standard text responses. + + diff --git a/docs/stable/agent-development/rag.mdx b/docs/stable/agent-development/rag.mdx new file mode 100644 index 000000000..73de13509 --- /dev/null +++ b/docs/stable/agent-development/rag.mdx @@ -0,0 +1,373 @@ +--- +title: "Build RAG Pipelines" +description: "Use vector stores, embedding and text-extraction services to build RAG pipelines" +--- + +Retrieval Augmented Generation (RAG) is one of the keystones for efficient data processing and search in the age of AI agents. The goal is to surface +information from a knowledge base relevant to a specific user query and provide curated context to the LLM. This is +a complex topic with many variants. We will focus on the fundamental building blocks that any RAG pipeline needs. + +The document processing pipeline: + +1. **text extraction** - process complex document formats (PDF, CSV, etc.) +2. **text splitting** - create meaningful chunks out of long pages of text +3. **embedding** - vectorize chunks (extract semantic meaning) +4. **store** - insert chunks to a specialized database + +Retrieval: + +1. **embedding** - vectorize user query +2. **search** - retrieve the document chunks most similar to the user query + +## Building blocks + +Let's break down how each step can be implemented with the Agent Stack API, but first, make sure you have the +Platform API extension enabled in your agent: + +```python +from typing import Annotated + +from a2a.types import Message + +from agentstack_sdk.server import Server +from agentstack_sdk.a2a.extensions import ( + PlatformApiExtensionServer, + PlatformApiExtensionSpec, + EmbeddingServiceExtensionServer, + EmbeddingServiceExtensionSpec, +) +from agentstack_sdk.server.context import RunContext + +# Fileformats supported by the text-extraction service (docling) +default_input_modes = [ + "text/plain", + "application/pdf", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", # DOCX + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # XLSX + "application/vnd.openxmlformats-officedocument.presentationml.presentation", # PPTX + "text/markdown", # Markdown + "text/asciidoc", # AsciiDoc + "text/html", # HTML + "application/xhtml+xml", # XHTML + "text/csv", # CSV + "image/png", # PNG + "image/jpeg", # JPEG + "image/tiff", # TIFF + "image/bmp", # BMP + "image/webp", # WEBP +] + +server = Server() + +@server.agent( + default_input_modes=default_input_modes, default_output_modes=["text/plain"] +) +async def rag_agent( + input: Message, + context: RunContext, + embedding: Annotated[ + EmbeddingServiceExtensionServer, EmbeddingServiceExtensionSpec.single_demand() + ], + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +): ... # Agent code + +``` + +Agent Stack uses [docling](https://docling-project.github.io/docling/) for extracting text out of documents in various +[supoported formats](https://docling-project.github.io/docling/usage/supported_formats/). To select which formats the +agent can accept, use the `default_input_modes` parameter in the agent decorator. + +First, let's build a set of functions to process the documents which we will then use in the agent. + +### Text Extraction + +To extract text from a `File` uploaded to the Platform API, simply use `file.create_extraction()` and wait for +the result. After extraction is completed, the `extraction` object will contain +`extracted_files`, which is a list of extracted files in different formats. + +```python +from agentstack_sdk.platform import File, Extraction +import asyncio + +async def extract_file(file: File): + extraction = await file.create_extraction() + while extraction.status in {"pending", "in_progress"}: + await asyncio.sleep(1) + extraction = await file.get_extraction() + if extraction.status != "completed": + raise ValueError(f"Extraction failed with status: {extraction.status}") +``` + +#### Extraction Formats + +Text extraction produces two extraction formats and you can request either subset by passing `formats` to `create_extraction` (e.g., `["markdown"]` if you only need plain text): + +- __markdown__: The extracted text formatted as Markdown (`file.load_text_content()`) +- __vendor_specific_json__: The Docling-specific JSON format containing document structure (`file.load_json_content()`) + +> __WARNING__: +> The `vendor_specific_json` format is not generated for plain text or markdown files, as Docling does not support these formats as input. + +### Text Splitting + +In this example we will use `MarkdownTextSplitter` from the +[langchain-text-splitters](https://reference.langchain.com/python/langchain_text_splitters/) package. +This will split a long document into reasonably sized chunks based on the Markdown header structure. + +```python +from langchain_text_splitters import MarkdownTextSplitter + +def chunk_markdown(markdown_text: str) -> list[str]: + return MarkdownTextSplitter().split_text(markdown_text) +``` + +### Embedding + +Now we need to embed each chunk using the embedding service. Similarly to LLM, Agent Stack implements +OpenAI-compatible embedding API. You can use any preferred client, in this example we will use the embedding extension +to create an `AsyncOpenAI` client: + +```python +from openai import AsyncOpenAI +from agentstack_sdk.a2a.extensions import EmbeddingServiceExtensionServer + +def get_embedding_client( + embedding: EmbeddingServiceExtensionServer, +) -> tuple[AsyncOpenAI, str]: + if not embedding or not embedding.data: + raise ValueError("Embedding extension not provided") + + embedding_config = embedding.data.embedding_fulfillments.get("default") + if not embedding_config: + raise ValueError("Default embedding configuration not found") + + embedding_client = AsyncOpenAI( + api_key=embedding_config.api_key, base_url=embedding_config.api_base + ) + embedding_model = embedding_config.api_model + return embedding_client, embedding_model + + +``` + +Now we can use this client to embed our chunks and create vector store items: + +```python +from openai import AsyncOpenAI +from agentstack_sdk.platform import VectorStoreItem, File + + +async def embed_chunks( + file: File, chunks: list[str], embedding_client: AsyncOpenAI, embedding_model: str +) -> list[VectorStoreItem]: + vector_store_items = [] + embedding_result = await embedding_client.embeddings.create( + input=chunks, + model=embedding_model, + encoding_format="float", + ) + for i, embedding_data in enumerate(embedding_result.data): + item = VectorStoreItem( + document_id=file.id, + document_type="platform_file", + model_id=embedding_model, + text=chunks[i], + embedding=embedding_data.embedding, + metadata={"chunk_index": str(i)}, # add arbitrary string metadata + ) + vector_store_items.append(item) + return vector_store_items +``` + +### Store + +Finally, to insert the prepared items, we need a function to create a vector store. For this we will need to know +the dimension of the embeddings and model_id. Because the model is chosen by the embedding extension and we don't know +it in advance, we will create a test embedding request to calculate the dimension: + +```python +from openai import AsyncOpenAI +from agentstack_sdk.platform import VectorStore + + +async def create_vector_store(embedding_client: AsyncOpenAI, embedding_model: str): + embedding_response = await embedding_client.embeddings.create( + input="test", model=embedding_model + ) + dimension = len(embedding_response.data[0].embedding) + return await VectorStore.create( + name="rag-example", + dimension=dimension, + model_id=embedding_model, + ) +``` + +We can then add the prepared items using `vector_store.add_documents`, this will become clear in the final example. + +### Query vector store + +Assuming we have our knowledge base of documents prepared, we can now easily search the store according to the user +query. The following function will retrieve five document chunks most similar to the query embedding: + +```python +from openai import AsyncOpenAI +from agentstack_sdk.platform import VectorStore, VectorStoreSearchResult + +async def search_vector_store( + vector_store: VectorStore, + query: str, + embedding_client: AsyncOpenAI, + embedding_model: str, +) -> list[VectorStoreSearchResult]: + embedding_response = await embedding_client.embeddings.create( + input=query, model=embedding_model + ) + query_vector = embedding_response.data[0].embedding + return await vector_store.search(query_vector=query_vector, limit=5) +``` + +## Putting all together + +Having all the pieces in place, we can now build the agent. + +### Simple agent + +This is a simplified agent that expects a message with one or more files attached as `FilePart` and a +user query as `TextPart`. A new vector store is created for each message. + +```python +@server.agent( + default_input_modes=default_input_modes, + default_output_modes=["text/plain"], +) +async def rag_agent( + input: Message, + context: RunContext, + embedding: Annotated[ + EmbeddingServiceExtensionServer, EmbeddingServiceExtensionSpec.single_demand() + ], + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +) -> AsyncGenerator[RunYield, None]: + # Create embedding client + embedding_client, embedding_model = get_embedding_client(embedding) + + # Extract files and query from input + files = [] + query = "" + for part in input.parts: + match part.root: + case FilePart(file=FileWithUri(uri=uri)): + files.append(await File.get(PlatformFileUrl(uri).file_id)) + case TextPart(text=text): + query = text + case _: + raise NotImplementedError(f"Unsupported part: {type(part.root)}") + + if not files or not query: + raise ValueError("No files or query provided") + + # Create vector store + vector_store = await create_vector_store(embedding_client, embedding_model) + + # Process files, add to vector store + for file in files: + await extract_file(file) + async with file.load_text_content() as loaded_file: + chunks = chunk_markdown(loaded_file.text) + items = await embed_chunks(file, chunks, embedding_client, embedding_model) + await vector_store.add_documents(items=items) + + # Search vector store + results = await search_vector_store( + vector_store, query, embedding_client, embedding_model + ) + + # TODO: You can add LLM result processing here + + snippet = [res.model_dump() for res in results] + yield f"# Results:\n```\n{json.dumps(snippet, indent=2)}\n```" +``` + +Instead of simply returning the output of the vector store, you would typically plug this as a tool into your favorite +agentic framework. + +### Conversational agent + +Having a new vector store for each message is not really a good practice. Typically, you would want to search through +all documents uploaded in the conversation. Below is a version of the agent which will reuse the vector store across +messages so you can ask multiple queries and or additional documents later on. + +```python +@server.agent( + default_input_modes=default_input_modes, + default_output_modes=["text/plain"], +) +async def rag_agent( + input: Message, + context: RunContext, + embedding: Annotated[ + EmbeddingServiceExtensionServer, EmbeddingServiceExtensionSpec.single_demand() + ], + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +) -> AsyncGenerator[RunYield, None]: + # Create embedding client + embedding_client, embedding_model = get_embedding_client(embedding) + + # Extract files and query from input + files = [] + query = "" + for part in input.parts: + match part.root: + case FilePart(file=FileWithUri(uri=uri)): + files.append(await File.get(PlatformFileUrl(uri).file_id)) + case TextPart(text=text): + query = text + case _: + raise NotImplementedError(f"Unsupported part: {type(part.root)}") + + # Check if vector store exists + vector_store = None + async for message in context.load_history(): + match message: + case Message(parts=[Part(root=DataPart(data=data))]): + vector_store = await VectorStore.get(data["vector_store_id"]) + + # Create vector store if it does not exist + if not vector_store: + vector_store = await create_vector_store(embedding_client, embedding_model) + # store vector store id in context for future messages + data_part = DataPart(data={"vector_store_id": vector_store.id}) + await context.store(AgentMessage(parts=[data_part])) + + # Process files, add to vector store + for file in files: + await extract_file(file) + async with file.load_text_content() as loaded_file: + chunks = chunk_markdown(loaded_file.text) + items = await embed_chunks(file, chunks, embedding_client, embedding_model) + await vector_store.add_documents(items=items) + + # Search vector store + if query: + results = await search_vector_store( + vector_store, query, embedding_client, embedding_model + ) + snippet = [res.model_dump() for res in results] + + # TODO: You can add LLM result processing here + + yield f"# Results:\n```\n{json.dumps(snippet, indent=2)}\n```" + elif files: + yield f"{len(files)} file(s) processed" + else: + yield "Nothing to do" +``` + +### Next steps + +To further improve the agent, learn how to use other parts of the platform such as LLMs, +file uploads and conversations: + +- [LLM extension](/extensions/llm-proxy-service) +- [Multi-turn conversations](/guides/multi-turn) +- [File handling](/guides/files) diff --git a/docs/stable/agent-development/secrets.mdx b/docs/stable/agent-development/secrets.mdx new file mode 100644 index 000000000..d40d70099 --- /dev/null +++ b/docs/stable/agent-development/secrets.mdx @@ -0,0 +1,140 @@ +--- +title: "Manage Runtime Secrets" +description: "Learn how your agent can request secrets (e.g. API keys) to be provided by the user." +--- + +Secrets are a way for agent builders to request sensitive values for agent execution. A great example is API keys that your agent needs to access external services. + +The secrets extension allows you to demand specific secrets from users. Users can provide these secrets before running the agent, which is why secret fulfillment is optional. + +We don't want to bother users unless absolutely necessary. When an agent can't continue its work without a secret and the user hasn't provided it beforehand, the agent may request the secret dynamically and, for example, reject the user request if not specified. + +Once a secret is provided by the user, it's stored in the platform so subsequent runs don't need to prompt the user again. + + +Users can revoke or update the secret at any time through the GUI. + + +## Quickstart + + + +Import the necessary components from the Agent Stack SDK secrets extension. + + + +Inject the Secrets extension into your agent function using the `Annotated` type hint. + + + +Create `SecretDemand` objects for each secret your agent needs. + + + +Check if secrets are provided and request them dynamically if needed. + + + +## Basic Secrets Example + +Here's how to add secrets capabilities to your agent: + +```python +import os +from typing import Annotated + +from a2a.types import Message + +from agentstack_sdk.a2a.extensions.auth.secrets import ( + SecretDemand, + SecretsExtensionServer, + SecretsExtensionSpec, + SecretsServiceExtensionParams, +) +from agentstack_sdk.server import Server + +server = Server() + + +@server.agent() +async def secrets_agent( + input: Message, + secrets: Annotated[ + SecretsExtensionServer, + SecretsExtensionSpec.single_demand(key="SLACK_API_KEY", name="Slack", description="Access to Slack"), + ], +): + """Agent that requests a secret that can be provided during runtime""" + if secrets and secrets.data and secrets.data.secret_fulfillments: + yield f"Slack API key: {secrets.data.secret_fulfillments['SLACK_API_KEY'].secret}" + else: + runtime_provided_secrets = await secrets.request_secrets( + params=SecretsServiceExtensionParams( + secret_demands={"SLACK_API_KEY": SecretDemand(description="I really need Slack Key", name="Slack")} + ) + ) + if runtime_provided_secrets and runtime_provided_secrets.secret_fulfillments: + yield f"Slack API key: {runtime_provided_secrets.secret_fulfillments['SLACK_API_KEY'].secret}" + else: + yield "No Slack API key provided" + + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + + +if __name__ == "__main__": + run() +``` + +## How to work with secrets + +Here's what you need to know to add secrets capabilities to your agent: + +**Import the secrets extension**: Import `SecretsExtensionServer`, `SecretsExtensionSpec`, `SecretDemand`, and `SecretsServiceExtensionParams` from `agentstack_sdk.a2a.extensions.auth.secrets`. + +**Inject the extension**: Add a secrets parameter to your agent function using the `Annotated` type hint with `SecretsExtensionServer` and `SecretsExtensionSpec`. + +**Define your secret demands**: Create `SecretDemand` objects for each secret your agent needs, specifying the name and description. + +**Check for pre-configured secrets**: Always check if secrets are already provided before requesting them dynamically. + +**Request secrets dynamically**: Use `await secrets.request_secrets()` to ask for secrets during runtime if they weren't provided beforehand. + +**Handle missing secrets**: Implement appropriate fallback behavior when secrets are not available. + + +Secrets are securely stored in the platform and automatically provided to your agent on subsequent runs, so users only need to provide them once. + + + +Always check if the secrets extension is available before using it to comply with plain A2A clients. + + +## Usage Patterns + +There are two main patterns for working with secrets in your agents: + +### Pre-configured Secrets + +When secrets are provided before the agent runs, they're available immediately in the `secrets.data.secret_fulfillments` object. This is the preferred approach as it provides a smoother user experience. + +```python +# Check if secrets are pre-configured +if secrets and secrets.data and secrets.data.secret_fulfillments: + api_key = secrets.data.secret_fulfillments['default'].secret + # Use the secret immediately +``` + +### Dynamic Secret Requests + +When secrets aren't pre-configured, you can request them during runtime using `await secrets.request_secrets()`. This is useful when your agent needs to ask for secrets based on user input or when the secret requirement is conditional. + +```python +# Request secrets dynamically +runtime_secrets = await secrets.request_secrets( + params=SecretsServiceExtensionParams( + secret_demands={"default": SecretDemand(description="API key needed", name="API Key")} + ) +) +``` diff --git a/docs/stable/agent-development/tool-calls.mdx b/docs/stable/agent-development/tool-calls.mdx new file mode 100644 index 000000000..07c0d1c40 --- /dev/null +++ b/docs/stable/agent-development/tool-calls.mdx @@ -0,0 +1,82 @@ +--- +title: "Approve Tool Calls" +description: "Have tool calls approved by the user before execution" +--- + +Many agent frameworks support the ability to request user approval before executing certain actions. This is especially useful when an agent is calling external tools that may have significant effects or costs associated with their usage. + +The Tool Call extension provides a mechanism for implementing this functionality over A2A connection. + +## Usage + + + + Inject the `ToolCallExtension` into your agent function using the `Annotated` + type hint. + + + + Use `request_tool_call_approval()` method to request tool call approval from the A2A client side. + + + +## Basic Example + +Here's how to use this extension with the [BeeAI Framework](https://framework.beeai.dev/modules/agents/requirement-agent#ask-permission-requirement) to request user approval before executing a tool call: + +```python +from typing import Annotated, Any + +from a2a.types import ( + Message, +) +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.extensions.tools.call import ( + ToolCallExtensionParams, + ToolCallExtensionServer, + ToolCallExtensionSpec, + ToolCallRequest, +) +from agentstack_sdk.a2a.extensions.tools.exceptions import ToolCallRejectionError +from beeai_framework.agents.requirement import RequirementAgent +from beeai_framework.backend import ChatModel +from beeai_framework.agents.requirement.requirements.ask_permission import AskPermissionRequirement +from beeai_framework.tools import Tool +from beeai_framework.tools.think import ThinkTool +from beeai_framework.adapters.mcp.serve.server import _tool_factory + +server = Server() + + +@server.agent() +async def tool_call_agent( + input: Message, + context: RunContext, + mcp_tool_call: Annotated[ToolCallExtensionServer, ToolCallExtensionSpec(params=ToolCallExtensionParams())], +): + async def handler(tool: Tool, input: dict[str, Any]) -> bool: + try: + await mcp_tool_call.request_tool_call_approval( + # using MCP Tool data model as intermediary to simplify conversion + ToolCallRequest.from_mcp_tool(_tool_factory(tool), input=input), # type: ignore + context=context, + ) + return True + except ToolCallRejectionError: + return False + + think_tool = ThinkTool() + agent = RequirementAgent( + llm=ChatModel.from_name("ollama:gpt-oss:20b"), + tools=[think_tool], + requirements=[AskPermissionRequirement([think_tool], handler=handler)], + ) + + result = await agent.run("".join(part.root.text for part in input.parts if part.root.kind == "text")) + yield result.output[0].text + + +if __name__ == "__main__": + server.run() +``` diff --git a/docs/stable/agent-development/trajectory.mdx b/docs/stable/agent-development/trajectory.mdx new file mode 100644 index 000000000..d2efbf7dc --- /dev/null +++ b/docs/stable/agent-development/trajectory.mdx @@ -0,0 +1,148 @@ +--- +title: Visualize Agent Trajectories +description: Show users the step-by-step reasoning process of your agent. +--- + +## Basic Usage + +```python +from typing import Annotated +from a2a.types import Message +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.extensions import TrajectoryExtensionServer, TrajectoryExtensionSpec + +server = Server() + +@server.agent() +async def my_agent( + input: Message, + context: RunContext, + trajectory: Annotated[TrajectoryExtensionServer, TrajectoryExtensionSpec()] +): + yield trajectory.trajectory_metadata( + title="Planning", + content="Analyzing the user request to determine the best approach..." + ) + + # Do work + + yield trajectory.trajectory_metadata( + title="Execution", + content="Processing data with temperature=0.7" + ) + + yield "Final result" +``` + +## Markdown Support + +The `content` field of `trajectory_metadata` supports Markdown, which is rendered directly in the UI. + +Supported elements include: + +- Headers +- Bold and italic text +- Ordered and unordered lists +- Tables +- Code blocks +- Links + +```python +yield trajectory.trajectory_metadata( + title="Checklist", + content=""" +- Load data +- Validate schema +- Run inference +- Generate report +""" +) +``` + +## Grouping Trajectories + +Use the `group_id` parameter to update trajectory content in the UI. Each update replaces the previous content (and title, if defined) for the same `group_id`: + +```python +# Initial search status +yield trajectory.trajectory_metadata( + title="Web search", + content="Querying search engines...", + group_id="websearch" +) + +# Update with results +yield trajectory.trajectory_metadata( + content="Found 8 results", + group_id="websearch" +) +yield trajectory.trajectory_metadata( + content="Found 8 results\nAnalyzed 3/8 results", + group_id="websearch" +) +yield trajectory.trajectory_metadata( + content="Found 8 results\nAnalyzed 8/8 results", + group_id="websearch" +) + +# Final update +yield trajectory.trajectory_metadata( + title="Web search finished", + content="Found 8 results\nAnalyzed 3/8 results\nExtracted key information", + group_id="websearch" +) +``` + +## Common Patterns + +**Progress Steps:** + +```python +yield trajectory.trajectory_metadata(title="Step 1", content="Loading data...") +yield trajectory.trajectory_metadata(title="Step 2", content="Processing...") +yield trajectory.trajectory_metadata(title="Step 3", content="Generating output...") +``` + +**Decision Points:** + +```python +yield trajectory.trajectory_metadata( + title="Tool Selection", + content="Choosing search tool based on query type: factual" +) +``` + +**Error Handling:** + +```python +yield trajectory.trajectory_metadata( + title="Retry Attempt", + content="First attempt failed, trying alternative approach..." +) +``` + +**Multi-step Process with Live Updates:** + +```python +# Use group_id to show progress updates in a single section +yield trajectory.trajectory_metadata( + title="Data Processing", + content="Starting...", + group_id="processing" +) +yield trajectory.trajectory_metadata( + content="Processing batch 1/10...", + group_id="processing" +) +yield trajectory.trajectory_metadata( + content="Processing batch 10/10...", + group_id="processing" +) +yield trajectory.trajectory_metadata( + content="Processing complete! Processed 10 items", + group_id="processing" +) +``` + +Trajectory steps appear as expandable sections in the UI, helping users understand your agent's thought process. diff --git a/docs/stable/community/acp-a2a-migration-guide.mdx b/docs/stable/community/acp-a2a-migration-guide.mdx new file mode 100644 index 000000000..24c75a68f --- /dev/null +++ b/docs/stable/community/acp-a2a-migration-guide.mdx @@ -0,0 +1,313 @@ +--- +title: ACP to A2A Migration Guide +description: "Migrate from ACP (Agent Communication Protocol) to A2A (Agent2Agent) Protocol for Agent Stack v0.3.x+." +--- + +## Quick Migration Checklist + +✅ Update dependencies: `acp_sdk` → `agentstack_sdk` + +✅ Update imports and function signature + +✅ Replace `Metadata` with `AgentDetail` + +✅ Update message processing + +✅ Update trajectory and citation handling + +✅ Use LLM service extension + +## Step-by-Step Migration + +### 1. Update Dependencies & Imports + +**Old (ACP)** + +```toml +dependencies = ["acp-sdk>=1.0.0"] +``` + +```py +from acp_sdk import Message, Metadata, Link, LinkType, Annotations +from acp_sdk.models import MessagePart +from acp_sdk.server import Context, Server +from acp_sdk.models.platform import PlatformUIAnnotation, PlatformUIType, AgentToolInfo +``` + +**New (A2A)** + +```toml +dependencies = ["agentstack-sdk>=0.4.0"] +``` + +```py +from a2a.types import AgentSkill, Message +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.extensions import ( + AgentDetail, AgentDetailTool, + CitationExtensionServer, CitationExtensionSpec, + TrajectoryExtensionServer, TrajectoryExtensionSpec, + LLMServiceExtensionServer, LLMServiceExtensionSpec +) +from agentstack_sdk.a2a.extensions.services.platform import PlatformApiExtensionServer, PlatformApiExtensionSpec +from agentstack_sdk.a2a.types import AgentMessage, AgentArtifact +from agentstack_sdk.util.file import load_file +``` + +### 2. Update Agent Decorator & Function Signature + +**Old (ACP)** + +```py +@server.agent( + name="jennas_granite_chat", + description="This is a general-purpose chat assistant prototype built with the BeeAI Framework and powered by Granite.", + metadata=Metadata( + annotations=Annotations( + beeai_ui=PlatformUIAnnotation( + ui_type=PlatformUIType.CHAT, + user_greeting="Hi! I'm your Granite-powered AI assistant—here to help with questions, research, and more. What can I do for you today?", + display_name="Jenna's Granite Chat", + tools=[ + AgentToolInfo(name="Think", description="Advanced reasoning and analysis to provide thoughtful, well-structured responses to complex questions and topics."), + AgentToolInfo(name="DuckDuckGo", description="Search the web for current information, news, and real-time updates on any topic.") + ] + ) + ), + author={"name": "Jenna Winkler"}, + contributors=[{"name": "Tomas Weiss"}, {"name": "Tomas Dvorak"}], + recommended_models=["granite3.3:8b-beeai"], + tags=["Granite", "Chat", "Research"], framework="BeeAI", license="Apache 2.0", + links=[{"type": "source-code", "url": "https://github.com/jenna-winkler/granite_chat"}]) +async def agent_function(input: list[Message], context: Context) -> AsyncGenerator: +``` + +**New (A2A)** + +```py +@server.agent( + name="Jenna's Granite Chat", + default_input_modes=["text", "text/plain", "application/pdf", "text/csv", "application/json"], + default_output_modes=["text", "text/plain"], + detail=AgentDetail( + interaction_mode="multi-turn", + user_greeting="Hi! I'm your Granite-powered AI assistant. How can I help?", + version="0.0.10", + tools=[ + AgentDetailTool( + name="Think", + description="Advanced reasoning and analysis to provide thoughtful, well-structured responses to complex questions and topics." + ), + AgentDetailTool( + name="DuckDuckGo", + description="Search the web for current information, news, and real-time updates on any topic." + ), + AgentDetailTool( + name="File Processing", + description="Read and analyze uploaded files including PDFs, text files, CSV data, and JSON documents." + ) + ], + framework="BeeAI", + author={ + "name": "Jenna Winkler" + }, + source_code_url="https://github.com/jenna-winkler/granite_chat" + ), + skills=[ + AgentSkill( + id="chat", + name="Chat", + description=dedent( + """\ + The agent is an AI-powered conversational system designed to process user messages, maintain context, + generate intelligent responses, and analyze uploaded files. + """ + ), + tags=["Chat", "Files"], + examples=[ + "What are the latest advancements in AI research from 2025?", + "What's the difference between LLM tool use and API orchestration?", + "Can you help me draft an email apologizing for missing a meeting?", + "Analyze this CSV file and tell me the key trends.", + "Summarize the main points from this PDF document.", + ] + + ) + ], +) +async def agent_function( + message: Message, + context: RunContext, + trajectory: Annotated[TrajectoryExtensionServer, TrajectoryExtensionSpec()], + citation: Annotated[CitationExtensionServer, CitationExtensionSpec()], + llm_ext: Annotated[LLMServiceExtensionServer, LLMServiceExtensionSpec.single_demand()], +): +``` + +### 3. Update Message & File Processing + +**Old (ACP)** + +```py +user_msg = input[-1].parts[0].content if input else "Hello" +``` + +**New (A2A)** + +Use `PlatformApiExtensionServer` in your agent to get access to files. + +```py +from agentstack_sdk.a2a.extensions.services.platform import ( + PlatformApiExtensionServer, + PlatformApiExtensionSpec, +) + +async def example_agent( + input: Message, + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +): +``` + +```py +# Process message parts - A2A receives single Message with multiple parts +user_text = "" +uploaded_files = [] + +for part in message.parts: + if part.root.kind == "text": + user_text = part.root.text + elif part.root.kind == "file": + uploaded_files.append(part.root) + +# Simple file processing (if needed) +if uploaded_files: + from agentstack_sdk.util.file import load_file + + for file_part in uploaded_files: + async with load_file(file_part) as loaded_content: + # Process file content as needed + content = loaded_content.text + # Use content in your agent logic... + +if not user_text: + user_text = "Hello" +``` + +### 4. Update Context & Memory + +**Old (ACP)** + +```py +def get_memory(context: Context) -> UnconstrainedMemory: + session_id = getattr(context, "session_id", "default") + return memories.setdefault(session_id, UnconstrainedMemory()) +``` + +**New (A2A)** + +```py +def get_memory(context: RunContext) -> UnconstrainedMemory: + context_id = getattr(context, "context_id", getattr(context, "session_id", "default")) + return memories.setdefault(context_id, UnconstrainedMemory()) +``` + +### 5. Use LLM Service Extension + +**Old (ACP)** + +```py +os.environ["OPENAI_API_BASE"] = os.getenv("LLM_API_BASE", "http://localhost:11434/v1") +os.environ["OPENAI_API_KEY"] = os.getenv("LLM_API_KEY", "dummy") +llm = ChatModel.from_name(f"openai:{os.getenv('LLM_MODEL', 'llama3.1')}") +``` + +**New (A2A)** + +```py +# Add LLM extension to function signature +async def agent_function( + message: Message, + context: RunContext, + llm_ext: Annotated[ + LLMServiceExtensionServer, + LLMServiceExtensionSpec.single_demand( + suggested=("ibm/granite-3-3-8b-instruct", "llama3.1", "gpt-4o-mini") + ) + ], + # ... other extensions +): + if llm_ext: + # Get platform-managed LLM configuration + llm_config = llm_ext.data.llm_fulfillments.get("default") + + llm = OpenAIChatModel( + model_id=llm_config.api_model, + api_key=llm_config.api_key, + base_url=llm_config.api_base, + parameters=ChatModelParameters(temperature=0.0), + tool_choice_support=set(), + ) +``` + +### 6. Update Trajectory & Citations + +**Old (ACP)** + +```py +yield MessagePart(metadata=TrajectoryMetadata( + kind="trajectory", + key=str(uuid.uuid4()), + message="Processing..." +)) + +citations.append(CitationMetadata( + kind="citation", + url=url, + title=title, + description=description, + start_index=start, + end_index=end +)) +for citation in citations: + yield MessagePart(metadata=citation) +``` + +**New (A2A)** + +```py +yield trajectory.trajectory_metadata( + title="Processing", + content="Processing message..." +) + +citations.append({ + "url": url, + "title": title, + "description": description, + "start_index": start, + "end_index": end +}) +yield citation.citation_metadata(citations=citations) +``` + +### 7. Response Output + +**Old (ACP)** + +```py +yield MessagePart(content=response_text) +``` + +**New (A2A)** + +```py +yield AgentMessage(text=response_text) +# or simply: +yield response_text +``` + +## Complete Examples + +1. https://github.com/i-am-bee/agentstack/blob/main/agents/chat/src/chat/agent.py +2. https://github.com/jenna-winkler/granite_chat/blob/main/src/beeai_agents/agent.py diff --git a/docs/stable/community/community-calls/15-07-2025.md b/docs/stable/community/community-calls/15-07-2025.md new file mode 100644 index 000000000..fe9becdac --- /dev/null +++ b/docs/stable/community/community-calls/15-07-2025.md @@ -0,0 +1,55 @@ +# BeeAI Platform Community Update + +## July 15, 2025 + +--- + +## 🛠️ In Development: ACP + A2A Unification + +Google's A2A Protocol has joined the Linux Foundation alongside ACP (Agent Communication Protocol). + +### What This Means + +Both protocols are working toward a unified agent communication standard. + +### Our Role + +Partnering with Google to improve A2A while positioning BeeAI Platform as the simple way to build A2A compatible agents. + +Our plan is to introduce a developer-friendly wrapper that adds: +- Intuitive API with ACP-style decorators +- Seamless BeeAI Platform integration +- Rich UI components (trajectories, chat interfaces) +- Enhanced developer experience + +So you get "A2A with the simplicity you expect from BeeAI" + +--- + +## 🗺️ Roadmap + +### Now + +- A2A agent support in BeeAI Platform +- Developer-friendly SDK wrapper + +### Next + +- Enhanced UI components (e.g., feedback mechanism) +- Better developer tooling + +--- + +## 💬 Community Input Needed + +- Share use cases - help us prioritize + +**Join the discussion:** [discord.gg/NradeA6ZNF](https://discord.gg/NradeA6ZNF) + +--- + +## 🙏 Thank You + +**The BeeAI Platform is community-driven** + +**Questions? Let's discuss!** diff --git a/docs/stable/community/community-calls/16-09-2025.md b/docs/stable/community/community-calls/16-09-2025.md new file mode 100644 index 000000000..7e219812d --- /dev/null +++ b/docs/stable/community/community-calls/16-09-2025.md @@ -0,0 +1,96 @@ +# BeeAI Platform Community Update + +## Major updates 💥 +1. Structure User Input via Forms +1. OAuth for MCP + +## Coming up soon +1. Session History +1. User Authentication +1. Agent Installation via GUI +1. Agent Settings +1. Agent Secrets + +--- + +## 🚀 Major Feature: Structured User Input + +We've built a new extension to support structured user input via forms in the GUI. + +### Key Functionality + +- Agents can define structured input demands as the first message +- Agents can request structured data at any time during multi-turn conversations +- The extension supports basic form layout using columns and column spans +- We provide essential form components: TextField, DateField, FileField, MultiSelect, and more +- Check out [the documentation](https://agentstack.beeai.dev/build-agents/forms) + +--- + +## 🔐 Sneak Peek: MCP OAuth Integration + +**Secure authentication for Model Context Protocol servers** + +We're actively developing comprehensive MCP OAuth integration to enable secure, authenticated access to MCP servers and their tools. + +### What is MCP OAuth? + +- **Model Context Protocol (MCP)**: Open standard for AI systems to interact with external tools and data sources +- **OAuth 2.1 Integration**: Secure authorization framework for MCP server authentication + +This combination enables an excellent user experience, allowing tools to be enhanced with context provided by third parties authorized by the user via OAuth. + +### Key Benefits + +- **🔒 Enhanced Security**: Robust authentication and authorization for MCP resources +- **🚀 Seamless Integration**: Automatic client registration and token management +- **⚡ Developer Experience**: Simplified authentication flow for MCP tool access + +### Current Status + +**In Active Development** - This feature depends on broader adoption of the MCP OAuth specification in hosted MCP servers, particularly the dynamic client registration requirement. + +--- + +## 🗺️ Coming Up Soon + +**Exciting features in development to enhance your BeeAI Platform experience** + +### 🧵 Session History +**Persistent conversation context across sessions** + +**Context Continuity**: Agents maintain conversation history and context between sessions + +### 🔑 User Authentication +**Multi-tenant platform with secure user management** + +**Multi-User Support**: Multiple users can build and manage their own agents independently + + +### ⚙️ Agent Installation via GUI +**One-click agent deployment from the platform interface** + +- **Streamlined Setup**: Install agents with a single click, no CLI required +- **Multi-Tenant Ready**: Integrated with user authentication for secure agent management + +### 🎛️ Agent Settings +**Runtime configuration for enhanced agent behavior** + +- **Custom Parameters**: Configure agent behavior, thinking, and other settings per run +- **Dynamic Tuning**: Adjust agent performance based on specific use cases + +### 🔐 Agent Secrets +**Secure API key management for agent tools** + +- **Dynamic Key Requests**: Agents can request API keys during conversations +- **Static Key Requests**: Agents can request API keys before a conversation starts to ensure a seamless experience. + +--- + +## 🙏 Thank You + +The BeeAI Platform is community-driven - your feedback shapes our roadmap! + +Questions? Let's discuss! +- **Discord Community**: [discord.gg/NradeA6ZNF](https://discord.gg/NradeA6ZNF) +- **GitHub Discussions**: Share ideas and ask questions diff --git a/docs/stable/community/community-calls/17-06-2025.md b/docs/stable/community/community-calls/17-06-2025.md new file mode 100644 index 000000000..be473b126 --- /dev/null +++ b/docs/stable/community/community-calls/17-06-2025.md @@ -0,0 +1,187 @@ +# BeeAI platform - What's New + +## Major updates 💥 +1. Introducing Playground +1. ACP Await supported in the CLI +1. Documentation Updates +1. File Uploads through Agent GUI +1. Kubernetes backed architecture refactor + +## We need you 🫵 +1. Comming Up Next +1. Call To Action + +--- +## 🚀 Introducing Playground + +### Why the change? + +Previously, we had the Compose Playground integrated. + +We wanted to streamline the experience and make it more flexible for future features. + +### What’s new? + +Playground is now a standalone GUI. + +Allows us more room for experimentation and rapid iterations. + +Cleaner separation between production tools and experimental workflows. + +### How to access it: + +CLI: + +```bash +agentstack playground +``` + +Web: + +Simply navigate to: http://localhost:8333/playground + +--- +## ⏳ ACP Await supported in the CLI + +### What is Await? + +A feature of ACP enabling flow interventions: + +The agent can pause and wait for input either from: + +- A human operator +- An external system + +Previously, when an agent raised Await, both CLI and GUI ignored it. + +### What’s new? + +You can now run agents that leverage Await in the CLI. + +When Await is raised: + +- The CLI prompts you for input. +- You can manually decide how to proceed. + +Check BeeAI framework [`RequirementAgent`](https://github.com/i-am-bee/beeai-framework/blob/3c8f314299ba80d54245999e212f86e43957007c/python/examples/serve/beeai_platform_await.py#L15) example that leverages the concept. + +### Future of Await + +First step — GUI support is coming soon! + +--- +## 📜 Documentation Updates + +Fresh, simplified, and much more practical documentation! + +### What’s included: + +✅ Quickstart Guide + +- Very quick, minimal, and non-verbose. +- Get you up and running fast. + +✅ Detailed "How Tos" + +Step-by-step guides covering: + +- Implementing your agent +- Wrapping it into the platform +- Building the agent image +- Deploying to Kubernetes cluster + +### Coming soon: + +🚀 OpenShift Integration Guide + +Smooth deployment process also for OpenShift users. + +--- +## 📂 File Uploads through Agent GUI + +### What’s new? + +Agents can now receive file uploads via the GUI interface. + +The uploaded file becomes part of the conversation using `ACP` [MessagePart](https://agentcommunicationprotocol.dev/core-concepts/message-structure) + +- Contains `content_url` and `content_type`. +- Files are attached to the conversation context. + +### Why is this important? + +A prerequisite for advanced RAG (Retrieval Augmented Generation) scenarios. + +Enables more sophisticated workflows that can utilize file content. + +### Examples of use: + +- 🔎 Summarize uploaded documents. +- 🧠 Extract information and feed it into your tools. +- 🛠️ Use files as part of advanced tool calling pipelines. + +Use the uploaded files however your agent requires. + +--- +## ⚙️ Kubernetes backed architecture refactor + +Complete architecture refactor — now fully backed by Kubernetes! + +### Why the change? + +- Smooth transition towards enterprise-ready deployments. +- Provides a more scalable, production-grade foundation for running your agents. +- Allows introduction of more services within the stack (MessageQueues, Databases...) + +### Architectural Requirements + +✅ Feature parity between: +- Local development experience +- Remote / production deployments + +✅ Simplified future development & maintenance: +- One consistent architecture +- Strong emphasis testing & debugging + +### Deployment made easy +- Provided Helm Chart for simple deployment to any Kubernetes cluster. +- Complete deployment guide available in the new documentation. + +--- +## 🚀 What’s Coming Next + +Heavy focus on improving the Agent Try Experience + +### UI Components +Agent builders will soon be able to easily leverage rich UI components. This simplifies building more interactive, user-friendly agents. + +Some of the examples: Citations, Chain of thought log, File attachments... + +### User Identity Support +Agents will soon have access to user identity context. + +- Enables more personalized agent behavior. +- Opens door to user-specific data & flows. + +--- +## 🫵 Call To Action + +🙏 Community contributions are more important than ever. We’re actively building a lot of new features in the UI + +- File Uploads +- Await Support +- Citations & Sources + +...and more to come! + +### 🛠 Out-of-the-box agents need updates + +To fully leverage these new features. Adjust existing agents to support the evolving platform capabilities. + +### 📌 How you can help + +- We will mark tasks with the help wanted label. +- Feel free to pick them up and contribute. +- Every PR moves the whole ecosystem forward! + +💬 Reach out if you have any questions — we’re happy to support contributors. diff --git a/docs/stable/community/community-calls/21-10-2025.md b/docs/stable/community/community-calls/21-10-2025.md new file mode 100644 index 000000000..ba7c859d1 --- /dev/null +++ b/docs/stable/community/community-calls/21-10-2025.md @@ -0,0 +1,98 @@ +# BeeAI Platform Community Update + +## Major updates 💥 +1. Session History +1. Agent Secrets +1. Agent Settings +1. Server Side GH builds + +## Coming up soon +1. Dynamically adding agents via GUI +1. Box OAuth connector for MCP +1. User Authentication with proper docs + +--- + +## 🚀 Session History + +You can now persist and retrieve full conversation history across sessions. + +Previously, messages were only kept in memory, meaning once a session ended, the context was lost. + +With the new Session History feature, all messages can now be stored in the `PlatformContextStore`, giving you a persistent, queryable record of your conversations. + +### What this means + +- Every message in a session can now be saved to the platform store. +- You can retrieve and replay the entire conversation at any time. +- The UI can fetch and display past conversations, enabling a continuous experience between user sessions. +- Checkout the thorough [documentation](https://agentstack.beeai.dev/build-agents/multi-turn) + +--- + +## 🔐 Agent Secrets + +You can now securely request and manage secrets from users through the A2A service dependency extension. + +Think of cases where your agent needs an API key or other credentials. Instead of manually configuring these, your agent can now ask the user to provide them directly in the UI, safely and only when needed. + +### How it works + +- Agents can declare required secrets that the user can provide before execution. +- Users can optionally provide or refuse the secret when prompted. +- Agents can delay secret requests until they’re absolutely necessary during the conversation. + +--- + +## ⚙️ Agent Settings + +You can now provide custom configuration options for your agents through the Settings A2A extension. + +For example, a toggle to enable or disable thinking, you can now expose those settings directly in the BeeAI UI. + + +### How it works + +- Agents can define custom settings that are shown to users before or during a run. +- Users can tweak configuration values like toggles or enumerated values. +- These settings are automatically passed to the agent at runtime, so you can adapt behavior on the fly. + +--- + +## 🏗️ Server Side GitHub builds + +You can now let the platform build your agent images directly from GitHub. + +Now, thanks to Kaniko, we have a reliable agent builder that is integrated directly in the cluster, you can simply link a GitHub repository with a Dockerfile, and the platform will automatically build and host the agent image for you in-cluster. + + +### What this means + +- Provide a GitHub repo with a valid `Dockerfile` +- Your agent image is built server-side and made available directly in the cluster. +- This lays the foundation for the upcoming GitHub Agent import via GUI, arriving in the next release. + +--- + +## 🗺️ Coming Up Soon + +**Exciting features in development to enhance your BeeAI Platform experience** + +### 📦 Box OAuth Connector +Enable seamless Box integration for MCP clients, making it easy for users to connect their Box accounts. + +### 🔐 User Authentication +This feature is complete! We’re finalizing documentation and will announce it very soon. + +### ⚙️ Dynamically adding agents via GUI +Add and configure new agents directly through the BeeAI interface without the need for custom builds and registration through Helm. + +--- + +## 🙏 Thank You + +The BeeAI Platform is community-driven - your feedback shapes our roadmap! + +Questions? Let's discuss! +- **Discord Community**: [discord.gg/NradeA6ZNF](https://discord.gg/NradeA6ZNF) +- **GitHub Discussions**: Share ideas and ask questions. diff --git a/docs/stable/community/community-calls/26-08-2025.md b/docs/stable/community/community-calls/26-08-2025.md new file mode 100644 index 000000000..b017478d8 --- /dev/null +++ b/docs/stable/community/community-calls/26-08-2025.md @@ -0,0 +1,119 @@ +# BeeAI Platform Community Update + +## August 26, 2025 + +--- + +## 🚀 Major Achievement: Full A2A Protocol Migration + +We've successfully migrated from ACP to A2A protocol, making BeeAI Platform the simple way to build A2A compatible agents. + +### Key Changes + +- All core agents migrated to A2A protocol +- Updated UI components to work with A2A message structure +- Improved error handling and streaming capabilities +- Better developer experience with new SDK patterns + +--- + +## 🛠️ Platform Improvements + +### User Feedback System +**New thumbs up/down rating system for agent responses** + +Users can now: +- Rate agent responses with ⬆️ ⬇️ buttons +- Provide detailed feedback with categories +- Help improve agents through structured comments + +### RAG Agent & Enhanced File Support +**First-class support for document-based workflows** + +- New RAG agent implementation +- Upload documents and extract content automatically +- Query documents via embeddings through platform API +- Support for multiple file formats + +### Context Scoped Resources & Token System +**Enhanced security and resource isolation** + +Major new feature for enterprise and multi-user scenarios: +- Context Management: Files and vector stores now scoped to contexts +- Context Tokens: Fine-grained permission system for secure resource access +- Isolation: Users can only access resources within their permitted contexts +- Token Permissions: Separate global and context-level permission grants + +--- + +## 🔧 Developer Experience + +### BeeAI SDK +**Comprehensive SDK for A2A agent development** + +New extensions and services: +- **LLM Service Extension**: Enhanced language model integration +- **Embedding Service Extension**: Vector operations and similarity search +- **Citation & Trajectory Extensions**: Better result tracking and sourcing +- **MCP Extension**: Model Context Protocol support +- **Platform Client**: Simplified API interactions + +### Separate LLM & Embedding Providers +**Independent configuration for maximum flexibility** + +Through the `agentstack model setup` CLI flow: +- Select your preferred LLM provider +- Choose a separate embedding provider +- Optimize costs and performance independently + +--- + +## 🗺️ What's Next + +**Priorities:** +1. Open Canvas functionality [#168](https://github.com/i-am-bee/agentstack/issues/168) +2. Session history [#623](https://github.com/i-am-bee/agentstack/issues/623) +3. MCP Support [#780](https://github.com/i-am-bee/agentstack/issues/780) +4. Support Structured/Form Input in new Form Interface [#982](https://github.com/i-am-bee/agentstack/issues/982) +5. User configurable agent settings at runtime [#633](https://github.com/i-am-bee/agentstack/issues/633) + +--- + +## 🙏 Community Recognition + +### Core Maintainers +Massive thanks to our 11 core maintainers who led the A2A migration: +- **[@jezekra1](https://github.com/jezekra1)** - Platform infrastructure, SDK development, and agent migration +- **[@JanPokorny](https://github.com/JanPokorny)** - SDK architecture, A2A client implementation, and developer tooling +- **[@kapetr](https://github.com/kapetr)** - UI/UX improvements, Next.js migration, and agent routing +- **[@PetrBulanek](https://github.com/PetrBulanek)** - Chat interface, message rendering, and UI components +- **[@tomkis](https://github.com/tomkis)** - Protocol strategy, LLM fulfillment, and user feedback systems +- **[@penge](https://github.com/penge)** - Navigation improvements and UI modernization +- **[@Zycon42](https://github.com/Zycon42)** - ACP-SDK integration and protocol transitions +- **[@xjacka](https://github.com/xjacka)** - Embedding services and backend infrastructure +- **[@aleskalfas](https://github.com/aleskalfas)** - RAG agent implementation and chat agent improvements +- **[@pilartomas](https://github.com/pilartomas)** - MCP extension development and service modules +- **[@Tomas2D](https://github.com/Tomas2D)** - Framework updates and dependency management + +### External Contributors +Special recognition for our external contributors: +- **[@tedhabeck](https://github.com/tedhabeck)** - OIDC configuration and UI component enhancements (2 major PRs) +- **[@flekmatik](https://github.com/flekmatik)** - IBM Cloud support improvements + +--- + +## 💬 Community Input Needed + +- **Share your use cases** - Help us prioritize features +- **Contribute agents** - Add to our community catalog +- **Report feedback** - Test the new A2A agents and share experiences + +--- + +## 🙏 Thank You + +The BeeAI Platform is community-driven - your feedback shapes our roadmap! + +Questions? Let's discuss! +- **Discord Community**: [discord.gg/NradeA6ZNF](https://discord.gg/NradeA6ZNF) +- **GitHub Discussions**: Share ideas and ask questions diff --git a/docs/stable/community/contribute.mdx b/docs/stable/community/contribute.mdx new file mode 100644 index 000000000..fc0eaaf94 --- /dev/null +++ b/docs/stable/community/contribute.mdx @@ -0,0 +1,48 @@ +--- +title: "Contribute" +description: "Learn how to contribute to Agent Stack" +--- + +Welcome! We’re so glad you’re here. Whether you're fixing a typo, filing a bug, or building something brand new, your contributions help shape Agent Stack into something better for everyone. + +Here’s a quick guide to help you get started. + +## What We Value + +Agent Stack is more than code - it’s a community of curious, collaborative people. We care about: +- Making real problems easier to solve +- Sharing early and learning together +- Designing tools that are easy to build on +- Keeping things kind, intentional, and inclusive + +## Ways to Contribute + +You don’t need to be an AI expert to help. Here’s how you can get involved: +- Use Agent Stack and tell us what works (and what doesn’t) +- Open an issue for bugs, questions, or new ideas +- Share feedback on discussions or pull requests +- Propose changes — small ones are just as helpful! +- Improve docs, clarify examples, or help others onboard + +## How We Work + +We’re async-friendly, open to feedback, and always learning. A few principles: +- Start small and share often +- Focus on the problem you’re solving, not just the code +- Give thoughtful feedback and expect the same in return +- Prioritize clarity, empathy, and user impact + +## Getting Started + +Not sure where to begin? +- Start with our [Contributing Guide](https://github.com/i-am-bee/agentstack/blob/main/CONTRIBUTING.md) +- Browse [Good first issues](https://github.com/i-am-bee/agentstack/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) to find something small and approachable +- Join the conversation on [Discord](https://discord.gg/NradeA6ZNF) - questions, ideas, and curiosity welcome + +Don't hesitate to reach out - we’re here to help you get started. + +## Thanks for Being Here + +We’re grateful for your time, your ideas, and your effort to help make Agent Stack better. + +If you get stuck, feel free to [open an issue](https://github.com/i-am-bee/agentstack/issues) or reach out to the team on [Discord](https://discord.gg/NradeA6ZNF)! \ No newline at end of file diff --git a/docs/stable/custom-ui/client-sdk.mdx b/docs/stable/custom-ui/client-sdk.mdx new file mode 100644 index 000000000..8eabfe7a0 --- /dev/null +++ b/docs/stable/custom-ui/client-sdk.mdx @@ -0,0 +1,12 @@ +--- +title: Client SDK (Typescript) +description: Build custom UIs on top of Agent Stack +--- + +Agent Stack TypeScript SDK (`agentstack-sdk`) enables you to build custom user interfaces and applications on top of Agent Stack's deployment infrastructure. + + +**Documentation Coming Soon** + +Full TypeScript SDK documentation is currently in development. In the meantime, check out the [NPM package](https://www.npmjs.com/package/agentstack-sdk). + diff --git a/docs/stable/custom-ui/permissions-and-tokens.mdx b/docs/stable/custom-ui/permissions-and-tokens.mdx new file mode 100644 index 000000000..378747ac2 --- /dev/null +++ b/docs/stable/custom-ui/permissions-and-tokens.mdx @@ -0,0 +1,568 @@ +--- +title: Permissions and Tokens +description: Grant user roles and generate tokens with custom scopes +--- + +Agent Stack uses two types of authentication tokens to control access to platform resources: + +1. **User Tokens** - Issued by your identity provider (OIDC/OAuth) for human users accessing the platform +2. **Context Tokens** - Generated programmatically for agents to access resources with limited permissions during a conversation + +Both token types can be used to authenticate API requests, but serve different purposes and have different permission scopes. + +## Understanding the Permission System + +Agent Stack's permission system has two independent dimensions that work together: + +1. **Privacy and Ownership** - Controls who can access resources (user roles and entity visibility) +2. **Context Scoping** - Controls where resources are associated and how agents access them + +### Privacy and Ownership + +This determines **who can access and manage resources** based on entity type and user role. + +```mermaid +graph TB + Start["Privacy & Ownership"] + + Start --> Entities["Entity Types"] + Entities --> Platform["Platform Entities"] + Entities --> UserPrivate["User-Private Entities"] + Entities --> SemiPrivate["Semi-Private Entities"] + + Start --> Roles["User Roles"] + Roles --> User["USER"] + Roles --> Developer["DEVELOPER"] + Roles --> Admin["ADMIN"] + + style Platform fill:#fff3e0,stroke:#ff6f00 + style UserPrivate fill:#e8f5e9,stroke:#2e7d32 + style SemiPrivate fill:#fce4ec,stroke:#c2185b + style User fill:#e3f2fd,stroke:#1976d2 + style Developer fill:#e1bee7,stroke:#7b1fa2 + style Admin fill:#ffcdd2,stroke:#c62828 +``` + + +#### Entity Types + +**Platform Entities** - Shared across all users in the platform: + +| Entity | Description | +|--------------------------|-----------------------------------------| +| **system_configuration** | Platform-wide configuration | +| **model_providers** | Available LLM/embedding model providers | + +**User-Private Entities** - Scoped to individual users (users can access their own): + +| Entity | Description | +|-------------------|---------------------------------| +| **files** | Uploaded files and documents | +| **vector_stores** | Vector databases for embeddings | +| **variables** | User-specific variables | +| **contexts** | Conversation contexts | +| **context_data** | Context history and data | +| **feedback** | User feedback submissions | + +**Semi-Private Entities** - Visible to all users, but management is restricted by role: + +| Entity | Description | +|------------------------|-----------------------| +| **providers** | Agent providers | +| **provider_builds** | Agent provider builds | + +#### User Roles and Access + +Agent Stack uses three role-based access levels: + +**USER** - Standard user role: +- **Platform Entities**: Read-only access +- **User-Private Entities**: Full read/write access to their own resources +- **Semi-Private Entities**: Read-only access (can view all providers/builds but cannot manage them). + +**DEVELOPER** - Extends USER with additional management capabilities: +- All USER permissions +- **Semi-Private Entities**: Can create and manage their own providers and provider builds + +**ADMIN** - Full system access: +- Full access to all resources across the platform + +### Context Scoping and Agent Access + +This dimension controls **where resources are associated** and **how agents access them** through context tokens. + +A **context** represents a single conversation with an agent. Resources can be associated with either: +- **User level** (not tied to any specific conversation) +- **Context level** (linked to a specific conversation) + +This association, combined with token permissions, determines what an agent can access. + +#### Context-Scopeable Resources + +These resources can be **associated with** either the user level or a specific context: + +- **files**: Can be user-level (not associated with any context) or context-associated (linked to a specific context) +- **vector_stores**: Can be user-level or context-associated +- **context_data**: Conversation history (always associated with a context) + + + **Context association does NOT restrict access** + + Association with a context is just metadata. A user token or a context token with **global permissions** can access + ALL user files, including files associated with other contexts. Only a context token with **local-only permissions** + is restricted to files associated with that specific context. + + +#### Context Permissions for Agents + +When agents interact with resources, they use **context tokens** with limited permissions. You can grant specific permissions for each resource type: + + +```mermaid +graph TB + Start["Context Scoping"] + + Start --> Resources["Resource Types"] + Resources --> Scopeable["Context-Scopeable"] + Resources --> GlobalOnly["Global-Only"] + + Start --> Tokens["Token Types"] + Tokens --> UserToken["User Token (OIDC/OAuth)
Full user access"] + Tokens --> ContextToken["Context Token (Generated)
Limited permissions"] + + ContextToken --> GlobalPerm["Global Permissions
Access ALL user resources"] + ContextToken --> LocalPerm["Local Permissions
Only this context's resources"] + + style Scopeable fill:#e0f2f1,stroke:#00796b + style GlobalOnly fill:#fff9c4,stroke:#f57f17 + style UserToken fill:#e8eaf6,stroke:#3f51b5 + style ContextToken fill:#f3e5f5,stroke:#8e24aa + style GlobalPerm fill:#e1f5fe,stroke:#0277bd + style LocalPerm fill:#fce4ec,stroke:#ad1457 +``` + + +#### Available Permissions + +| Resource | Available Operations | Context Scopeable | Description | +|---------------------|---------------------------------|-------------------|---------------------------------| +| **files** | `read`, `write`, `extract`, `*` | ✔︎ | User files and documents | +| **vector_stores** | `read`, `write`, `*` | ✔︎ | Vector databases for embeddings | +| **context_data** | `read`, `write`, `*` | ✔︎ | Conversation history | +| **llm** | `*` | | LLM inference services | +| **embeddings** | `*` | | Embedding generation services | +| **a2a_proxy** | `*` | | Agent-to-agent communication | +| **model_providers** | `read`, `write`, `*` | | Model provider management | +| **variables** | `read`, `write`, `*` | | User variables | +| **providers** | `read`, `write`, `*` | | Agent providers | +| **contexts** | `read`, `write`, `*` | | Context management | +| **connectors** | `read`, `write`, `proxy`, `*` | | External service connectors | +| **feedback** | `write` | | User feedback submissions | + +### Global vs. Local Permission Grants + +When generating a context token, you specify **two independent permission sets** that control what the token can access: + +#### Global Permissions +Grants the token access to **ALL user resources**, regardless of which context they're associated with. + +**What the token can access:** +- All user-level files and vector stores (not associated with any context) +- Files and vector stores associated with ANY context (including other contexts) + +**Example use cases:** +- Agent needs to search across all user files from previous conversations +- Agent needs to access a shared knowledge base (user-level vector store) +- Agent creates persistent resources that should be accessible to other contexts + +#### Context Permissions (Local) +Grants the token access **ONLY to resources associated with this specific context**. + +**What the token can access:** +- Files and vector stores associated with this specific context +- Context data (conversation history) for this context +- A restricted surface area - only resources linked to this conversation + +**Example use cases:** +- Sandboxed agent that should only see files uploaded in this conversation +- Limiting agent access to prevent data leakage across conversations +- Working with conversation-specific temporary data + +## Creating Context Tokens + +Context tokens allow agents to authenticate with limited permissions. Here's the flow: + +```mermaid +sequenceDiagram + participant User + participant Platform + participant Agent + + User->>Platform: 1. Create Context + Platform-->>User: Context created (context_id) + + User->>Platform: 2. Generate Context Token + Note over Platform: Specify global & local permissions + Platform-->>User: Context Token + + User->>Agent: 3. Start Agent Run + Note over User,Agent: Context token passed via
A2A PlatformApiExtension + + Agent->>Platform: 4. Access Platform Resources + Note over Agent,Platform: Using context token
with limited permissions + Platform-->>Agent: Resources (within token scope) +``` + +### Generating a Context Token + +Here is an example how you can create a context and generate a custom token with specific permissions: + + + ```python Python SDK expandable + from agentstack_sdk.platform.context import Context, Permissions, ContextPermissions + + # Create a context + context = await Context.create() + + # Generate a token with specific permissions + token = await context.generate_token( + grant_global_permissions=Permissions( + files={"read", "write"}, + llm={"*"}, + embeddings={"*"}, + ), + grant_context_permissions=ContextPermissions( + files={"read", "write"}, + context_data={"read", "write"}, + ), + ) + + # Use the token + print(f"Token: {token.token.get_secret_value()}") + print(f"Expires at: {token.expires_at}") + + ``` + + ```bash REST API + # 1. Create a context + curl -X POST "https://api.agentstack.example.com/api/v1/contexts" \ + -H "Authorization: Bearer YOUR_USER_TOKEN" \ + -H "Content-Type: application/json" + + # Response + { + "id": "123abc", + "created_at": "2025-01-15T10:00:00Z" + } + + # 2. Generate a context token + curl -X POST "https://api.agentstack.example.com/api/v1/contexts/{context_id}/token" \ + -H "Authorization: Bearer YOUR_USER_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "grant_global_permissions": { + "files": ["read", "write"], + "llm": ["*"], + "embeddings": ["*"] +}, + "grant_context_permissions": { + "files": ["read", "write"], + "context_data": ["read", "write"] +} +}' + + # Response + { + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "expires_at": "2025-01-15T12:00:00Z" + } + ``` + + + + **Important Notes** + + - Context tokens **cannot** be used to generate other tokens + - The permissions you grant must be a subset of your own permissions + - Context token expiration is 20 minutes. If an agent runs longer it must request a new token using `AuthRequired` a2a message. + + +### Using Context Tokens in Agent Runs + +When calling an agent using the Agent Stack SDK, you pass the context token through extension configurations. Here's a complete example: + +```python Full Agent Run Client Example expandable +from datetime import timedelta + +import httpx +from a2a.client import ClientConfig, ClientFactory +from a2a.types import Message, Part, Role, TextPart + +from agentstack_sdk.platform import ModelProvider, Provider, ModelCapability +from agentstack_sdk.platform.context import Permissions, ContextPermissions, Context +from agentstack_sdk.a2a.extensions import ( + LLMServiceExtensionClient, + LLMServiceExtensionSpec, + EmbeddingServiceExtensionClient, + EmbeddingServiceExtensionSpec, + PlatformApiExtensionClient, + PlatformApiExtensionSpec, + LLMFulfillment, + EmbeddingFulfillment, +) +from uuid import uuid4 + + +async def call_agent( + provider: Provider, # any agent from Provider.list() call + user_access_token: str, # Access token obtained from OIDC provider (e.g. using authorization code OAuth flow) +): + # 1. Create context and generate token + context = await Context.create(provider_id="your_provider_id") + + context_token = await context.generate_token( + grant_global_permissions=Permissions( + llm={"*"}, embeddings={"*"}, a2a_proxy={"*"} + ), + grant_context_permissions=ContextPermissions( + files={"*"}, vector_stores={"*"}, context_data={"*"} + ), + ) + + # 2. Get agent card and prepare extension specs + agent_card = provider.agent_card + llm_spec = LLMServiceExtensionSpec.from_agent_card(agent_card) + embedding_spec = EmbeddingServiceExtensionSpec.from_agent_card(agent_card) + platform_spec = PlatformApiExtensionSpec.from_agent_card(agent_card) + + # 3. Build extension metadata with context token + metadata = {} + + # LLM extension - token passed as api_key + if llm_spec: + metadata |= LLMServiceExtensionClient(llm_spec).fulfillment_metadata( + llm_fulfillments={ + key: LLMFulfillment( + api_base="{platform_url}/api/v1/openai/", + api_key=context_token.token.get_secret_value(), + api_model=( + await ModelProvider.match( + suggested_models=demand.suggested, + capability=ModelCapability.LLM, + ) + )[0].model_id, + ) + for key, demand in llm_spec.params.llm_demands.items() + } + ) + + # Embedding extension - token passed as api_key + if embedding_spec: + metadata |= EmbeddingServiceExtensionClient( + embedding_spec + ).fulfillment_metadata( + embedding_fulfillments={ + key: EmbeddingFulfillment( + api_base="{platform_url}/api/v1/openai/", + api_key=context_token.token.get_secret_value(), + api_model=( + await ModelProvider.match( + suggested_models=demand.suggested, + capability=ModelCapability.EMBEDDING, + ) + )[0].model_id, + ) + for key, demand in embedding_spec.params.embedding_demands.items() + } + ) + + # Platform API extension - token passed as auth_token + if platform_spec: + metadata |= PlatformApiExtensionClient(platform_spec).api_auth_metadata( + auth_token=context_token.token, expires_at=context_token.expires_at + ) + + # 4. Create message and run agent + message = Message( + message_id=str(uuid4()), + parts=[Part(root=TextPart(text="Hello, agent!"))], + role=Role.user, + context_id=context.id, # Use the Context ID we just created + metadata=metadata, + ) + + # 5. Execute agent run + async with httpx.AsyncClient( + # Add user token from OIDC provider + headers={"Authorization": f"Bearer {user_access_token}"}, + follow_redirects=True, + timeout=timedelta(hours=1).total_seconds(), + ) as httpx_client: + conf = ClientConfig(httpx_client=httpx_client, use_client_preference=True) + a2a_client = ClientFactory(conf).create(card=agent_card) + async for event in a2a_client.send_message(message): + # Handle agent responses + print(event) +``` + +### Inside Your Agent + +When you declare the `PlatformApiExtensionServer` in your agent function, the Agent Stack SDK automatically authenticates your platform API calls using the context token that was passed through the extension metadata. + +```python expandable +from typing import Annotated + +from a2a.types import Message + +from agentstack_sdk.a2a.extensions import ( + PlatformApiExtensionServer, + PlatformApiExtensionSpec, +) +from agentstack_sdk.platform import File +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext + +server = Server() + +@server.agent() +async def my_agent( + input: Message, + context: RunContext, + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], +): + """Your agent function with platform access.""" + + # The platform extension automatically authenticates these calls + # using the context token passed in the extension metadata + + # Create a file - automatically scoped to the current context + file = await File.create( + filename="agent_output.txt", + content=b"Generated by the agent", + # context_id="auto" is the default - uses the context from the token + # context_id=None will attempt to upload the file globally (outside the context) + ) + yield "Done! Uploaded a file to the context!" + +``` + + + **How it works** + + 1. The client passes the context token via `PlatformApiExtensionClient.api_auth_metadata()` + 2. The `PlatformApiExtensionServer` receives the token and sets up the authentication context + 3. All `File`, `VectorStore`, and other platform API calls automatically use this token + 4. By default, context-scopeable resources are associated with the context (via `context_id="auto"`) + + + +## Resource Scoping with `context_id` + +When creating context-scopeable resources (files, vector stores) using the SDK, the `context_id` parameter determines whether they are scoped globally (user-level) or locally (context-level). + +### The `auto` Parameter + +Both `File.create()` and `VectorStore.create()` include a `context_id` parameter with special `"auto"` behavior. +- `context_id="auto"` (default) - Automatically scopes the resource to the current context if used inside an agent with active `PlatformApiExtension` +- `context_id=None` - Scopes the resource globally (outside the context) +- `context_id=specific_context_id` - Scopes the resource explicitly to the context ID passed in + + + **Best Practices** + + 1. **Use context tokens for agents**: When building agents, always use context tokens to limit access scope + 2. **Default to `"auto"`**: Let the SDK determine the appropriate scope based on the client type + 3. **Be explicit when needed**: Override `context_id` only when you need specific scoping behavior + 4. **Consider data lifecycle**: Use local scoping for temporary data, global scoping for persistent resources + + + +## Advanced: API Authentication + + + This is an advanced section aimed for agent application developers who want to integrate their custom a2a clients + with the Agent Stack. + + +The Agent Stack platform API accepts two types of tokens for authentication: + +1. **Access Token** - Issued by your identity provider (OIDC/OAuth) when a user logs in +2. **Context Token** - Generated programmatically for agents with limited permissions + +Technically, both token types are used identically: passed in the `Authorization: Bearer {token}` header for all +API endpoints. However, as you may have noticed in the example above, you would use A2A extensions to send context +token to the agent. The agent will then use the context token in the Bearer authorization header +to authenticate itself with the platform API, this is abstracted away in the SDK. + +To make this crystal clear, let's break down the full low-level interaction with an agent that wants to upload a file. +We will use the square brackets `[]` to denote the `Authorization: Bearer` header and placeholders +`ACCESS_TOKEN` and `CONTEXT_TOKEN` for the user access token and context token, respectively. + +**Part 1: Client code**: + + + Execute the Authorization code flow or similar to obtain an **ACCESS_TOKEN** from the identity + provider (in this flow, the user will log in to the system with their credentials). + + + Create a **context token** with specific permissions, these are 2 API requests: + 1. Create a context: `POST /api/v1/contexts` **[ACCESS_TOKEN]** + 2. Create context token: `POST /api/v1/contexts/{context_id}/token` **[ACCESS_TOKEN]**. + The body contains a request for `files.write` context permission grant. + + + 1. Fulfill extension demands and configure message metadata, add **CONTEXT_TOKEN** to: + - LLM and embedding fulfillments + - PlatformApiExtensionClient + 2. Set `context_id` to associate the message with the context and token created in previous step + + + Send message to the agent: `POST /api/v1/a2a/{provider_id}` **[ACCESS_TOKEN]**. + + **Proxy request authorization** + + The _request header_ contains **ACCESS_TOKEN** to authorize user. + This token is consumed by the platform API (a2a proxy endpoint) and not forwarded to the agent. + + **Extension payload** + + The _request body_ contains the **CONTEXT_TOKEN** that the agent receives and can use later to call + the Platform API. + + + + +**Part 2: Agent code**: + + + The agent requests the platform API extension using dependency injection: + ```python + @server.agent() + async def my_agent( + ..., + _: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()], + ): ... + ``` + + + When the agent is invoked through the A2A protocol, the `PlatformApiExtensionServer` will consume the + **CONTEXT_TOKEN** from the extension metadata and set up the authentication context. + + + In your agent code you will use the `File.create()` method to upload a file to the context. The method will + call `POST /api/v1/files?context_id={context_id}` **[CONTEXT_TOKEN]**. + + Using the "auto" behavior of `File.create`, the file will be uploaded to the context by specifying + the `context_id` query parameter. This ID is taken from the user message payload. + + The SDK will automatically attach the CONTEXT_TOKEN to the request header. The token must contain the + `files.write` context permission grant for this to work. + + + + + At no point did Agent receive or use `ACCESS_TOKEN`. This is on purpose, because based on user role this token + can possess destructive permissions - to delete other agents, read all user data (if the user is ADMIN, etc.). + + To prevent API misuse by untrusted agents, avoid passing the access token to the agent. + \ No newline at end of file diff --git a/docs/stable/deploy-agent-stack/authenticate-cli-to-server.mdx b/docs/stable/deploy-agent-stack/authenticate-cli-to-server.mdx new file mode 100644 index 000000000..ec5e42d8b --- /dev/null +++ b/docs/stable/deploy-agent-stack/authenticate-cli-to-server.mdx @@ -0,0 +1,56 @@ +--- +title: "Connect CLI to Remote Server" +description: "Connect the Agent Stack CLI to your deployed server" +--- + +Use the Agent Stack CLI to configure and manage remote Agent Stack deployments. Agent Stack follows a client-server architecture, where the CLI acts as a client connecting to a deployed Agent Stack server. This allows you to manage agents remotely from your local machine. + +## Prerequisites + +- Agent Stack CLI installed locally ([Quickstart](/introduction/quickstart)) +- URL of your deployed Agent Stack server +- Authentication credentials for the server + +## Connect to a server + +Run this command to connect to a remote server: + +```bash +agentstack server login +``` + +The CLI will prompt you for: +- Server URL +- Authentication credentials + +Once authenticated, the CLI connects to your specified server and saves the connection for future commands. + +## View connected servers + +List all servers you've connected to: + +```bash +agentstack server list +``` + +This shows all saved server connections. The active server is marked in the output. + +## Switch between servers + +To switch to a different connected server: + +```bash +agentstack server select +``` + +Select from your saved connections. + +## Disconnect from a server + +Remove the currently active server connection: + +```bash +agentstack server logout +``` + +This removes the server from your saved connections. To disconnect from a different server, switch to it first, then logout. diff --git a/docs/stable/deploy-agent-stack/deployment-guide.mdx b/docs/stable/deploy-agent-stack/deployment-guide.mdx new file mode 100644 index 000000000..100b1f0d1 --- /dev/null +++ b/docs/stable/deploy-agent-stack/deployment-guide.mdx @@ -0,0 +1,486 @@ +--- +title: "Agent Stack Deployment Guide" +description: "Deploy Agent Stack to Kubernetes with Helm for internal team use" +--- + +Deploy Agent Stack on Kubernetes using Helm to create a centralized environment where your team can quickly test, share, and iterate on agents. + + + **Intended Use:** Agent Stack is designed for internal team deployments behind + VPNs or firewalls. Basic authentication protects administrative operations + (agent management, secrets, model configuration), but authenticated users can + freely use agents, upload files, and create vector stores without per-user + limits. Deploy only in trusted environments where you control who has access. + Public internet deployments are not recommended. + + +## Requirements + +- Agent Stack installed for post-deployment configuration +- Kubernetes 1.24+ with admin access +- kubectl configured to access your cluster +- Helm 3.8+ +- Persistent storage (20GB+ for PostgreSQL) +- LLM provider API access (OpenAI, Anthropic, etc.) + +## Get Started + +### Step 1: Create Configuration File + +Create a `config.yaml` file with your desired configuration, here is a minimal example, more advanced options +are explained in the [Configuration Options](#configuration-options) section. + +```yaml +# If you want to include agents from the default catalog (change release/tag accordingly): +externalRegistries: + public_github: "https://github.com/i-am-bee/agentstack@v0.4.3#path=agent-registry.yaml" + +# Your custom agents as docker images +providers: + # e.g. + # - location: ghcr.io/i-am-bee/agentstack-starter/my-agent:latest + - location: + +# Generate the encryption key: +# - using UV (https://docs.astral.sh/uv/getting-started/installation/) +# $ uv run --with cryptography python3 -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())' +# - using python3 directly +# $ python3 -m pip install cryptography # (or use your preferred way to install the cryptography package) +# $ python3 -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())' +encryptionKey: "encryption-key-from-command" + +# This requires passing an admin password to certain endpoints, you can disable auth for insecure deployments +auth: + enabled: true + jwtSecretKey: "my-secret-key" + basic: + # CAUTION: this leaves most features accessible without authentication, please read the authentication section below + enabled: true + adminPassword: "my-secret-password" +``` + +### Step 2: Install the Chart + +Then install the chart using: + +```shell +helm upgrade --install agentstack -f config.yaml oci://ghcr.io/i-am-bee/agentstack/chart/agentstack:0.4.3 +``` + +It will take a few minutes for the pods to start. + +### Step 3: Port-Forwarding + +By default, ingress is not configured. You can port-forward the service to access the platform. +In a separate terminal, run: + +```shell +kubectl port-forward svc/agentstack-svc 8333:8333 & +``` + +### Step 4: Setup LLM + +After the platform becomes ready, it's time to set up your model provider: + +```shell +AGENTSTACK__ADMIN_PASSWORD="my-secret-password" agentstack model setup +``` + +### Step 5: Test the Deployment + +```shell +agentstack list +agentstack run chat hi +``` + +## Configuration Options + +### Security Settings + + + The current authentication model is basic and intended for development use. + For any deployment beyond local testing, carefully consider your security + requirements and network access controls. + + +#### Disable authentication + +For local testing environments without authentication: + +```yaml +# CAUTION: INSECURE, for testing only +auth: + enabled: false +``` + +#### Admin authentication + +The admin password protects only administrative operations: deploying/deleting agents and modifying LLM provider connections. All other functionality is accessible to anyone who can reach the application on your network without authentication, including: + +- **Using agents (consuming LLM API credits)** +- Uploading files and creating vector stores +- Managing sessions + +**This means anyone on your network can incur LLM costs.** Basic authentication is only suitable for controlled environments (behind VPN/firewall) where you trust everyone with network access. + +For production deployments, multi-user environments, or cost control, use OIDC authentication which requires login for all actions. + +```yaml +auth: + enabled: true + jwtSecretKey: "my-secret-key" # fill in a strong secret + basic: + enabled: true + adminPassword: "my-admin-password" # fill in a strong admin password +``` + +#### OIDC authentication + +This is our most secure authentication method, supporting multi-user login with different roles. + +```yaml +trustProxyHeaders: true # This is important if validate_audience is enabled + +auth: + enabled: true + jwtSecretKey: "my-secret-key" # fill in a strong secret + oidc: + # Important: redirect URIs must be configured correctly in your provider: + # - UI endpoint: "https://your-public-url/api/auth/callback" + # - CLI endpoint: "http://localhost:9001/callback" + enabled: true + default_new_user_role: "user" # valid options: [user, developer]. Developers can deploy and configure their agents. + admin_emails: # one or more administrators + - example.admin@ibm.com + nextauth_trust_host: true + nextauth_secret: "" + nextauth_url: "https://agentstack.localhost:8336" + validate_audience: true # audience must be set to the public URL of your application in your OIDC provider + providers: [ + { + "id": "w3id", + "name": "w3id", + "provider_type": "custom", + "client_id": "", + "client_secret": "", + "issuer": "" + } + ] +``` + +### Rate Limiting + +Rate limiting is recommended for production environments as a protection against overloading the platform. + +```yaml +rateLimit: + enabled: true + globalLimits: + - "20/second" + - "100/minute" + roleBasedLimits: + user: + openai_chat_completion_tokens: [] + openai_chat_completion_requests: [] + openai_embedding_inputs: [] + developer: + openai_chat_completion_tokens: [] + openai_chat_completion_requests: [] + openai_embedding_inputs: [] + admin: + openai_chat_completion_tokens: [] + openai_chat_completion_requests: [] + openai_embedding_inputs: [] + strategy: "sliding-window-counter" # Options: fixed-window, moving-window, sliding-window-counter +``` + + + If you are running multiple replicas (`replicaCount` > 1), Redis is + **required** for rate limiting to work correctly across all instances. Without + Redis, each replica maintains its own rate limit counters, allowing users to + exceed limits by distributing requests across replicas. + + +### Exposing the platform + +Ingress is not configured by default. You can expose the following services using your preferred way. + +- `agentstack-ui-svc`: access to the UI (which includes the API proxy) +- `agentstack-server-svc` (optional): direct access to the server API, required for CLI + +Typically, this means creating a custom [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) +or adding [routes](https://docs.redhat.com/en/documentation/openshift_container_platform/4.20/html/ingress_and_load_balancing/configuring-routes) +in OpenShift. + +### Agent Configuration + +You can add specific agents directly or use a remote registry to sync agents from an external catalog. + +#### Specify agents statically + +Configure specific agents in your deployment: + +```yaml +providers: + # Official agents + - location: ghcr.io/i-am-bee/agentstack/agents/chat:0.4.3 + - location: ghcr.io/i-am-bee/agentstack/agents/rag:0.4.3 + - location: ghcr.io/i-am-bee/agentstack/agents/form:0.4.3 + + # Your custom agents + - location: your-registry.com/your-team/custom-agent:v1.0.0 + auto_stop_timeout_sec: 0 # disable agent downscaling + # Variables should be strings (or they will be converted) + variables: + MY_API_KEY: "sk-..." + MY_CONFIG_VAR: "42" +``` + +To upgrade an agent, change its version tag and redeploy using `helm upgrade`. + +#### External Agent Registry + +You can use the concept of agent registries instead of specifying individual agents: + +```yaml +externalRegistries: + public_github: "https://github.com/i-am-bee/agentstack@v0.4.3#path=agent-registry.yaml" +``` + +To upgrade an agent, change its version in the registry and wait for automatic synchronization (up to 10 minutes). + +### Agent builds + +Agents can be built from a GitHub repository directly in the cluster. To enable this feature, you will need to +setup a few things: + +- docker image registry credentials with write permissions, see [Private image registries](#private-image-registries) +- [optional] github credentials (optional) to access private or enterprise GitHub repositories +- [optional] external cluster (optional) for better security +- [opsnehift only] service account with appropriate [SCC](https://www.redhat.com/en/blog/managing-sccs-in-openshift) + to allow elevated container permissions + +```yaml +providerBuilds: + enabled: true + buildBackend: "kaniko" # valid options: [kaniko, buildkit] + buildRegistry: + registryPrefix: "ghcr.io/github-org-name" + imageFormat: "{registry_prefix}/{org}/{repo}/{path}:{commit_hash}" + # Registry credentials with write access (see section about Private image registries below) + secretName: "custom-registry-secret" + insecure: false + kaniko: + useSecurityContextCapabilities: true + externalClusterExecutor: + serviceAccountName: "" + namespace: "" # Kubernetes namespace for provider builds (defaults to current namespace if empty) + kubeconfig: "" # Kubeconfig YAML content for external cluster (optional) + # Example: + # kubeconfig: | + # apiVersion: v1 + # kind: Config + # clusters: + # - cluster: + # server: https://kubernetes.example.com + # ... +``` + +### Configuring external services + +You may want to access or build agents from private registries or GitHub repositories, here is how to +configure these options. + +#### Private image registries + +You can configure pull secrets to access agents in private docker registries using: + +```yaml +imagePullSecrets: + - name: custom-registry-secret +``` + +where `custom-registry-secret` is the name of a kubernetes secret created according to the official +[documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + +#### Private github repositories + +If you want to build agents from enterprise github or private github repositories, you can either create a +[Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) +or use a [GitHub App](https://docs.github.com/en/apps/creating-github-apps/about-creating-github-apps/about-creating-github-apps) +to access the repository. The configuration looks as follows: + +```yaml +github: + auths: + github.com: + type: "pat" + token: "ghp_xxxxxxxxxxxxxxxxxxxx" + github.enterprise.com: + type: "app" + app_id: 123456 + installation_id: 789012 + private_key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEpAIBAAKCAQEA... + -----END RSA PRIVATE KEY----- +``` + +### Storage + +By default, Agent Stack deployment includes postgresql and seaweedfs (s3 compatible object storage). It is not +recommended to use these builtin versions of services in production. Instead, you should configure external databases +(ideally managed by a cloud provider). + +#### External PostgreSQL + +If you prefer to use an external PostgreSQL instance instead of provisioning a new one within the cluster, +you can disable the built-in PostgreSQL and provide the required connection details using the `externalDatabase` section. +Below is an example configuration: + +```yaml +postgresql: + enabled: false # disable builtin subchart +externalDatabase: + host: "" + port: 5432 + user: "" + database: "" + password: "" + # Required when initContainers.createVectorDbExtension is enabled + adminUser: "" + adminPassword: "" + ssl: true + sslRootCert: "" + # SSL certificate for the external database to ensure ssl connection, for example: + # sslRootCert: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +``` + +If you encounter issues with installing `vector` extension during db migration, you can disable the creation by: + +```console +initContainers.createVectorDbExtension=false +``` + +Then make sure the following SQL statements were executed in your database: + +```sql +CREATE EXTENSION IF NOT EXISTS vector; +SET maintenance_work_mem = '512MB'; +SET hnsw.ef_search = 1000; +SET hnsw.iterative_scan = strict_order; +SET hnsw.max_scan_tuples = 1000000; +``` + +#### Redis + +Agent Stack supports Redis for rate limiting and caching. This feature is disabled by default, but required if replicaCount > 1. +You can use the built-in Redis subchart or connect to an external instance. + +**Enable Built-in Redis:** + +```yaml +redis: + enabled: true +``` + +**External Redis:** + +To use an external Redis instance, ensure the built-in subchart is disabled (default) and provide connection details: + +```yaml +redis: + enabled: false +externalRedis: + host: "" + port: 6379 + password: "" + # Optional: use existing secret for password + # existingSecret: "my-redis-secret" + # existingSecretPasswordKey: "redis-password" +``` + +#### External S3 support + +You may want to have Agent Stack connect to an external object storage rather than installing seaweedfs inside +your cluster. To achieve this, the chart allows you to specify credentials for an external storage streaming with the +`externalS3`. You should also disable the seaweedfs installation with the `seaweedfs.enabled` +option. Here is an example: + +```console +seaweedfs: + enabled: false +externalS3: + host: + accessKeyID: + accessKeySecret: +``` + +### Advanced Configuration + +The list of all configuration options is available in the +[values.yaml](https://github.com/i-am-bee/agentstack/blob/v0.4.3/helm/values.yaml) file. If you have specific +requirements for the helm chart configuration which are not covered by the current options, please open an issue. + +## Management Commands + +### Upgrading + +To upgrade to a newer version of the Agent Stack, use: + +```shell +helm upgrade --install agentstack -f config.yaml oci://ghcr.io/i-am-bee/agentstack/chart/agentstack: +``` + +### View Current Configuration + +```bash +helm get values agentstack +``` + +### Check Deployment Status + +```bash +helm status agentstack +kubectl get pods +kubectl logs deployment/agentstack-server +``` + +### Uninstall + +```bash +helm uninstall agentstack +``` + +## Troubleshooting + +### Common Issues + +**Platform not starting:** + +```bash +# Check pod status +kubectl get pod + +# Check server logs +kubectl logs deployment/agentstack-server +# If server is not starting, check specific init container logs (e.g. migrations) +kubectl logs deployment/agentstack-server -c run-migrations + +# Check events +kubectl get events --sort-by=.lastTimestamp +``` + +**Authentication issues:** + +Make sure you have configured your OIDC provider correctly: + +- redirect URI should be the public URL + `/api/auth/callback` (e.g. `https://your-public-url.com/api/auth/callback`) +- for CLI, the redirect URI should be `http://localhost:9001/callback` +- consider creating a separate public OIDC application for the CLI +- `audience` claim should be the public URL of your application without a trailing slash (e.g. `https://example.com`) +- `trustProxyHeaders` must be enabled to correctly forward the request URL through proxies +- if this is still not working, try to disable `auth.oidc.validate_audience` diff --git a/docs/stable/deploy-agent-stack/observability.mdx b/docs/stable/deploy-agent-stack/observability.mdx new file mode 100644 index 000000000..dc4dc7df7 --- /dev/null +++ b/docs/stable/deploy-agent-stack/observability.mdx @@ -0,0 +1,155 @@ +--- +title: Monitoring & Observability +description: Monitor traces, logs, and metrics from your agents and platform +--- + +Agent Stack includes built-in observability through [OpenTelemetry (OTLP)](https://opentelemetry.io/), with [Arize Phoenix](https://docs.arize.com/phoenix) available out-of-the-box for immediate use. Monitor your agents through logging, telemetry, and integration with external monitoring systems. + +```mermaid +graph LR + %% Sources + AgentStack[Agent Stack Server] + Agent[Agent Providers] + + %% Collector + OTLP[OpenTelemetry
Collector
:4318] + + %% Destinations + Phoenix[Phoenix
:6006
✓ Built-in] + Langfuse[Langfuse
Cloud
⚙ Config Required] + Custom[Custom
Backend
⚙ Config Required] + + %% Data flows + AgentStack -->|traces/metrics| OTLP + Agent -->|traces/metrics| OTLP + + OTLP --> Phoenix + OTLP --> Langfuse + OTLP --> Custom + + %% Simple styling + classDef default fill:#f9f9f9,stroke:#333,stroke-width:2px + classDef collector fill:#e3f2fd,stroke:#1976d2,stroke-width:2px + classDef builtin fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px + classDef optional fill:#fff3e0,stroke:#f57c00,stroke-width:2px,stroke-dasharray: 5 5 + + class OTLP collector + class Phoenix builtin + class Langfuse,Custom optional +``` + +## View Agent Logs + +Stream real-time logs from any running agent: +```bash +agentstack logs +``` + +What you'll see: +- Agent startup and initialization +- Request processing steps +- Error messages and stack traces +- Container lifecycle events + + + Logs are only available for managed (containerized) agents that are currently running. + + +## Telemetry Collection + +Agent Stack includes OpenTelemetry instrumentation to collect traces and metrics. Telemetry data helps with performance monitoring, error tracking, usage analytics, and debugging agent interactions. + +By default, Agent Stack sends telemetry to: +- **Local Phoenix instance** (if running) for trace visualization + +The telemetry includes: +- Platform version and runtime details +- Agent execution traces + +## Quickstart: Enable Phoenix Observability + +[Arize Phoenix](https://phoenix.arize.com/) provides visualization for OpenTelemetry traces from your agents. + + + **Important License Notice**: Phoenix is disabled by default in Agent Stack. When you enable Phoenix, be aware that Arize Phoenix is licensed under the Elastic License v2 (ELv2), which has specific terms regarding commercial use and distribution. By enabling Phoenix, you acknowledge that you are responsible for ensuring compliance with the ELv2 license terms for your specific use case. Please review the [Phoenix license](https://github.com/Arize-ai/phoenix/blob/main/LICENSE) before enabling this feature in production environments. + + + + + +Install and start Phoenix using the `agentstack platform start` command: +```sh +agentstack platform start --set phoenix.enabled=true +``` + +You can run this even if your platform is already running without losing data. + + + + +Spinning up Phoenix can take a while, even after the `platform start` command reports success. Go to [http://localhost:6006](http://localhost:6006) and check if it's running. If not, please wait a few minutes or check your internet connection. + + + + +Execute the following command to run an example chat agent: +```sh +agentstack run chat "Hello" +``` + + + + +Open [http://localhost:6006](http://localhost:6006) in your browser and navigate to the **default** project to explore the collected traces. + + + + + + For an enhanced user experience and richer trace detail, consider instrumenting agents using the [OpenInference](https://github.com/Arize-ai/openinference/) standard for custom instrumentation. + + +## Advanced Configuration + +### Configure Langfuse Integration + +[Langfuse](https://langfuse.com/) is an LLM observability platform that can be integrated with the Agent Stack through OpenTelemetry. + + + +1. Sign up at [cloud.langfuse.com](https://cloud.langfuse.com) +2. Create a project and generate API keys +3. Encode your keys: `echo -n "public_key:secret_key" | base64` + + +```yaml +collector: + exporters: + otlphttp/langfuse: + endpoint: "https://cloud.langfuse.com/api/public/otel" # EU data region + headers: + Authorization: "Basic " + pipelines: + traces: + receivers: [ otlp ] + processors: [ memory_limiter, filter/phoenix, batch ] + exporters: [ otlphttp/langfuse ] +``` + + +```bash +agentstack platform start -f config.yaml +``` + + +Check your Langfuse project dashboard for incoming traces and metrics. + + + +## Additional Resources + +- **OpenTelemetry Docs**: https://opentelemetry.io/docs/ +- **Langfuse Docs**: https://langfuse.com/docs +- **Phoenix Docs**: https://docs.arize.com/phoenix +- **Prometheus Docs**: https://prometheus.io/docs/ +- **Grafana Docs**: https://grafana.com/docs/ diff --git a/docs/stable/deploy-agents/building-agents.mdx b/docs/stable/deploy-agents/building-agents.mdx new file mode 100644 index 000000000..906ca9df3 --- /dev/null +++ b/docs/stable/deploy-agents/building-agents.mdx @@ -0,0 +1,160 @@ +--- +title: "Build New Agents" +description: "Start building your own agent with a simple Hello World example" +--- + +To help you get started quickly, we’ve created a ready-to-use starter repository. You can clone it and start coding right away—no setup headaches or boilerplate required. This guide walks you through running your first “Hello World” agent and then customizing it to make it your own. + +## Prerequisites + +- Agent Stack installed ([Quickstart](/introduction/quickstart)) +- [uv](https://docs.astral.sh/uv/) package manager (should be already installed if you followed the quickstart) + +## Start From Template + + + + + +```bash +git clone https://github.com/i-am-bee/agentstack-starter my-agent +cd my-agent +``` + + +1. Go to the [template repository](https://github.com/i-am-bee/agentstack-starter) +2. Click "Use this template" → "Create a new repository" +3. Clone your new repository locally + + + + + + +```bash +uv run server +``` + + +**Enable auto-reload during development:** Add `watchfiles` to automatically restart your server when code changes: +```bash +uv run watchfiles agentstack_agents.agent.run +``` + + + + + +In another terminal: + +```bash +agentstack run example_agent "Alice" +``` + + + +You should see: "Ciao Alice!" 🎉 + +With your first agent running, you can now modify it to do anything you want. + +## Implement Your Agent Logic + +Navigate to [src/agentstack_agents/agent.py](https://github.com/i-am-bee/agentstack-starter/blob/main/src/agentstack_agents/agent.py) and replace the example with your agent logic. + +The starter example is minimal and intended for demonstration purposes only: + +```python +import os + +from a2a.types import ( + Message, +) +from a2a.utils.message import get_message_text +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.types import AgentMessage + +server = Server() + +@server.agent() +async def example_agent(input: Message, context: RunContext): + """Polite agent that greets the user""" + hello_template: str = os.getenv("HELLO_TEMPLATE", "Ciao %s!") + yield AgentMessage(text=hello_template % get_message_text(input)) + +def run(): + server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 8000))) + + +if __name__ == "__main__": + run() +``` + + + + +An agent is essentially an HTTP server. Create a `Server` instance and run it using `run()`. + + + +Add the `@server.agent` decorator to your function so the platform recognizes it as an agent. + + + +The function name becomes the agent’s name in the platform. + + + +Write a docstring for the function; it will be extracted and shown as the agent’s description in the platform. + + + +- **First argument:** an [A2A `Message`](https://a2a-protocol.org/latest/specification/#64-message-object). +- **Second argument:** a `RunContext` object with run details (e.g., `task_id`, `context_id`). + + + +Use `get_message_text()` to quickly extract the text content from a `Message`. + + + +The agent function should be asynchronous and yield results as they’re ready. + + + +- Yield an `AgentMessage` (a handy wrapper around A2A Message) for convenience. +- Or yield a plain `str`, which will be automatically converted into an A2A Message. + + + + +## Starting from Scratch + +If you prefer not to use the starter repo: +- Create an empty Python project +- Install `agentstack-sdk` +- Copy the example code above + +The starter repo mainly provides basic scaffolding, a GitHub workflow, and a Dockerfile — all optional. + +## Next Steps + +After building your agent, you can enhance it and learn more: + + + +Customize your agent's name, description, and how it appears in the UI + + + +Learn how agents and clients communicate through structured messaging + + + +Understand how to handle multi-turn conversations and maintain context + + + +Work with files to provide inputs or store outputs for your agent + + \ No newline at end of file diff --git a/docs/stable/deploy-agents/deploy-your-agents.mdx b/docs/stable/deploy-agents/deploy-your-agents.mdx new file mode 100644 index 000000000..172006657 --- /dev/null +++ b/docs/stable/deploy-agents/deploy-your-agents.mdx @@ -0,0 +1,160 @@ +--- +title: "Deploy Your Agents" +description: "Package and deploy your agent to Agent Stack as a managed service" +--- + +Once you've wrapped your agent with the Agent Stack server, you need to containerize it so Agent Stack can run it as a managed service. + +## Prerequisites + +- Docker installed and running (or use GitHub Actions via template) +- Agent wrapped with Agent Stack SDK ([Wrap Existing Agents](/deploy/wrap-existing-agents) or [Build New Agent](/introduction/start-building-agents)) +- Agent Stack installed ([Quickstart](/introduction/quickstart)) + +## Containerize Your Agent + +### Option 1: Use the Starter Template (Recommended) + +The [agentstack-starter template](https://github.com/i-am-bee/agentstack-starter) includes everything you need: +- Production-ready Dockerfile +- GitHub Actions for automated builds +- Agent Stack deployment configuration + +Clone it and modify for your agent: +```bash +git clone https://github.com/i-am-bee/agentstack-starter my-agent +cd my-agent +# Replace the example agent with your code +``` + + +You can push your repository to GitHub and tag it to leverage the automated GitHub workflow to build the image directly in GitHub, without running Docker locally. + + +### Option 2: Create Your Own Dockerfile + +If you're starting from scratch, create a `Dockerfile`: +```dockerfile +FROM python:3.12-slim + +WORKDIR /app + +# Install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy agent code +COPY . . + +# Configure server +ENV HOST=0.0.0.0 +ENV PORT=10000 +EXPOSE 10000 + +# Run agent server +CMD ["python", "server.py"] +``` + + +**Port Configuration:** The SDK defaults to port 10000. Make sure your Dockerfile `EXPOSE` and `ENV PORT` match, and that your `server.run()` call uses the same port via environment variable. + + +## Deploy to Agent Stack + +### One-Command Deployment + +From your agent directory, run: +```bash +agentstack add . +``` + +This automatically: +1. ✓ Builds your Docker image locally +2. ✓ Copies the image into Agent Stack's VM +3. ✓ Registers it as an available agent + + +**Why "copy into VM"?** Agent Stack runs in an isolated VM (Lima on Mac/Linux, WSL on Windows). Even though Docker Desktop builds your image, Agent Stack needs it copied into its VM to run it. The `add` command handles this automatically. + + +### Verify Deployment + +Check that your agent is registered: +```bash +agentstack list +``` + +Your agent should appear in the list with status information. + +### Test Your Agent + +Test via CLI: +```bash +agentstack run my-agent "Hello!" +``` + +Or open the web UI: +```bash +agentstack ui +``` + +Your agent will be available at `http://127.0.0.1:8333` + +## Advanced Options + + +If you need more control over the build: +```bash +# Step 1: Build and copy to Agent Stack VM +agentstack build . --import + +# Step 2: Register the agent +agentstack add agentstack.local/my-agent-abc123:latest +``` + +The `--import` flag (enabled by default) copies your Docker image from Docker Desktop into Agent Stack's VM. + + + +Test your Dockerfile builds without deploying: +```bash +agentstack build . --no-import +``` + +This creates the image in Docker Desktop but doesn't copy it to Agent Stack. Useful for testing or if you plan to push to a registry instead. + + + +```bash +# Custom image tag +agentstack build . --tag my-agent:v1.0.0 --import + +# Build for multiple architectures +agentstack build . --multi-platform --import + +# Use custom Dockerfile location +agentstack build . --dockerfile=./deploy/Containerfile --import +``` + + +## Next Steps + +Now that your agent is deployed, enhance it with extensions: + + + +Change your agent's LLM at runtime and manage model connections dynamically + + + +Visualize your agent’s decision-making and interactions over time in the UI + + + +Display references and link sources for transparency, directly in the UI + + + +Guide your users to provide consistent information with the form extension + + \ No newline at end of file diff --git a/docs/stable/deploy-agents/wrap-existing-agents.mdx b/docs/stable/deploy-agents/wrap-existing-agents.mdx new file mode 100644 index 000000000..432440b7d --- /dev/null +++ b/docs/stable/deploy-agents/wrap-existing-agents.mdx @@ -0,0 +1,122 @@ +--- +title: "Wrap Your Existing Agents" +description: "Deploy your existing agent to Agent Stack by wrapping it with the Agent Stack server" +--- + +If you already have an agent built with any framework (LangGraph, CrewAI, or your own custom implementation), you can deploy it to Agent Stack by wrapping it with the Agent Stack server. + +This gives you instant access to the Agent Stack UI, observability features, and deployment infrastructure without rewriting your agent logic. + +## Prerequisites + +- Agent Stack installed ([Quickstart](/introduction/quickstart)) +- An existing agent implementation +- Python 3.12+ environment + +## How It Works + +The Agent Stack server wraps your existing agent code and exposes it through the A2A protocol. Your agent logic stays exactly the same - you just add a thin server wrapper that handles: + +- Protocol translation (A2A) +- Auto-registration with Agent Stack +- Session management +- Extension support + +## Quick Start + +### 1. Install the SDK + +```bash +pip install agentstack-sdk +``` + +### 2. Create a Server Wrapper + +Create a new file (e.g., `server.py`) that wraps your existing agent: + +```python +# Import your existing agent logic +from my_agent import run_my_agent # Your existing agent code + +import os +from a2a.types import Message +from a2a.utils.message import get_message_text +from agentstack_sdk.server import Server +from agentstack_sdk.server.context import RunContext +from agentstack_sdk.a2a.types import AgentMessage + +server = Server() + +@server.agent() +async def my_wrapped_agent(input: Message, context: RunContext): + """Wrapper around my existing agent""" + + # Extract the user's message + user_message = get_message_text(input) + + # Call your existing agent logic + # This can be synchronous or asynchronous + result = await run_my_agent(user_message) + + # Yield the response back to Agent Stack + yield AgentMessage(text=result) + +def run(): + server.run( + host=os.getenv("HOST", "127.0.0.1"), + port=int(os.getenv("PORT", 8000)) + ) + +if __name__ == "__main__": + run() +``` + +### 3. Run Your Server + +```bash +python server.py +``` + +Your agent will automatically register with Agent Stack! + + +**Enable auto-reload during development:** Add `watchfiles` to automatically restart your server when code changes: +```bash +uv run watchfiles agentstack_agents.agent.run +``` + + +## Integration Patterns + +### Streaming Responses + +If your agent generates responses incrementally, you can stream them: + +```python +@server.agent() +async def streaming_agent(input: Message, context: RunContext): + user_message = get_message_text(input) + + # Stream results as they come + async for chunk in my_streaming_agent(user_message): + yield AgentMessage(text=chunk) +``` + +### With Context History + +Access previous messages in the conversation: + +```python +@server.agent() +async def contextual_agent(input: Message, context: RunContext): + # Get conversation history + previous_messages = context.history + + # Your agent can use this context + result = await my_agent_with_context( + current_message=get_message_text(input), + history=previous_messages + ) + + yield AgentMessage(text=result) +``` \ No newline at end of file diff --git a/docs/stable/experimental/a2a-proxy.mdx b/docs/stable/experimental/a2a-proxy.mdx new file mode 100644 index 000000000..04f9d1da3 --- /dev/null +++ b/docs/stable/experimental/a2a-proxy.mdx @@ -0,0 +1,79 @@ +--- +title: "Connect A2A Agents" +description: "Instant UI for existing A2A agents" +--- + +Got an existing A2A agent? You can connect it to Agent Stack instantly using the Agent Stack A2A Proxy — a lightweight tool that bridges your agent and the Agent Stack. + +No need to modify your agent’s code — just run one command and your agent will appear live in the Agent Stack interface. + +## Quickstart + +### Prerequisites + +- A running A2A agent with a valid agent card at `/.well-known/agent-card.json` +- Node.js installed (for npx) +- Agent Stack running (default: `http://127.0.0.1:8333`) +- The proxy installed (`npx i-am-bee/agentstack-a2a-proxy`) + +### Connect Your Agent + +1. **Start your A2A agent** and note its URL (e.g., `http://localhost:8080`) + +2. **Run the proxy** pointing to your agent: +```bash +npx i-am-bee/agentstack-a2a-proxy start http://localhost:8080 +``` + +That's it! Your agent is now registered with Agent Stack and accessible through the interface. + +## How It Works + +The proxy creates a bridge between your A2A agent and Agent Stack by: + +1. **Intercepting agent card requests** - Captures `/.well-known/agent-card.json` requests from any A2A client +2. **Adding Agent Details extension** - Automatically injects the necessary [AgentDetail](/extensions/agent-details) extension data that enables the agent to work within the Agent Stack ecosystem +3. **Auto-registration** - Automatically registers the modified agent with the Agent Stack, making it immediately available + + +The proxy supports agents using `jsonrpc` and `http_json` transport protocols. + + +## Configuration Options + +### Custom Proxy Port + +By default, the proxy runs on port 8000. To use a different port: +```bash +npx i-am-bee/agentstack-a2a-proxy start http://localhost:8080 --port 4000 +``` + +### Custom Platform URL + +If your Agent Stack is running on a different URL: +```bash +npx i-am-bee/agentstack-a2a-proxy start http://localhost:8080 --platform-url http://localhost:9000 +``` + +### Disable Auto-Registration + +To run the proxy without automatically registering with Agent Stack: +```bash +npx i-am-bee/agentstack-a2a-proxy start http://localhost:8080 --auto-register false +``` + +### Custom Agent Details + +You can customize the agent details extension data by providing a JSON file: +```bash +npx i-am-bee/agentstack-a2a-proxy start http://localhost:8080 --custom-data ./my-agent-details.json +``` + +## Available Options + +| Option | Alias | Description | Default | +|--------|-------|-------------|---------| +| `--port` | `-p` | Port to run the proxy server on | 8000 | +| `--auto-register` | `-a` | Enable/disable auto-registration with Agent Stack | true | +| `--platform-url` | `-P` | Platform URL to register with | http://127.0.0.1:8333 | +| `--custom-data` | `-c` | Path to custom agent detail JSON file | - | \ No newline at end of file diff --git a/docs/stable/experimental/connectors.mdx b/docs/stable/experimental/connectors.mdx new file mode 100644 index 000000000..0427758df --- /dev/null +++ b/docs/stable/experimental/connectors.mdx @@ -0,0 +1,207 @@ +--- +title: "MCP Connectors" +description: "Create and manage data connections to external services" +--- + +Connectors are remote MCP servers registered into the Agent Stack. Effectively, they take some responsibilities away from MCP Clients to improve user experience and reduce overhead within the Agent Stack. + +Once connector is created and connected, it can be used as any regular remote MCP server by both clients and agents. + +## API Reference + +Core of connectors API consists of the following endpoints: + +| Endpoint | Purpose | +| ----------------------------------------- | ---------------------- | +| `POST /api/v1/connectors` | Create a connector | +| `GET /api/v1/connectors` | List all connectors | +| `GET /api/v1/connectors/{id}` | Read a connector | +| `DELETE /api/v1/connectors/{id}` | Delete a connector | +| `POST /api/v1/connectors/{id}/connect` | Connect a connector | +| `POST /api/v1/connectors/{id}/disconnect` | Disconnect a connector | + +The connector can then be used by any MCP client at `/api/v1/connectors/{id}/mcp` endpoint. For example, to inspect a connector using MCP Inspector: + +```bash +npx -y @modelcontextprotocol/inspector --transport http --server-url http://localhost:8333/api/v1/connectors/{id}/mcp +``` + +Additionally, the connectors API exposes a catalog of preconfigured presets for common connectors: + +| Endpoint | Purpose | +| -------------------------------- | ---------------------- | +| `GET /api/v1/connectors/presets` | List connector presets | + +## Lifecycle + +Following diagram illustrates the lifecycle of a connector: + +```mermaid +stateDiagram-v2 + [*] --> created: Create + created --> auth_required: Connect (auth) + created --> connected: Connect (no auth) + auth_required --> connected: oAuth redirect + connected --> disconnected: Disconnect, auth expired, probe failed, ... + disconnected --> [*]: Delete + disconnected --> connected: Connect (no auth) + disconnected --> auth_required: Connect (auth) +``` + +Usual flow works as follows: + +1. **Create**: Client creates a connector by calling `POST /api/v1/connectors` with MCP server URL. +2. **Connect**: Client initiates connection by calling `POST /api/v1/connectors/{id}/connect`. + - For OAuth-enabled servers: The response will contain an authorization URL for the user to complete authentication + - For token-based authentication: Provide an `access_token` in the connect request body (see [Authentication](#authentication)) +3. **Authorize** (OAuth only): User visits the authorization URL, authenticates and grants access. Authorization server redirects the user back to the platform with an authorization code. +4. **Complete**: Platform exchanges the authorization code for access and refresh tokens (OAuth) or stores the provided access token. Once completed, the connector is in `connected` state and ready to be used. + +## Authentication + +Connectors support two authentication methods: + +### OAuth (External MCP Servers) + +For MCP servers that support OAuth, the platform handles the full authorization code flow. No additional configuration is needed in the connect request. + +### Token-based Authentication + +For MCP servers that use simple token-based authentication, provide the token when connecting: + +```json +POST /api/v1/connectors/{id}/connect +{ + "access_token": "YOUR_API_TOKEN" +} +``` + +The authentication token is used differently depending on the connector type: +- **External HTTP/HTTPS MCP servers**: Token is sent as a Bearer token in the `Authorization` header for all requests +- **Managed stdio MCP servers**: Token is injected as an environment variable in the container (requires `access_token_env_name` in preset configuration) + +## Error handling + +The connectors API endpoints return standard HTTP status codes in responses. Apart from that, there are two additional mechanisms used by connectors API to relay errors to the client. + +### Authorization Code Flow + +An error may happen during the authorization code flow as described in [RFC6749 Section 4.1](https://www.rfc-editor.org/rfc/rfc6749#section-4.1.2.1). + +When no `redirect_url` has been provided by the client during connection initiation, the server responds with a HTML page containing the error. Otherwise, the user will be redirected to `redirect_url` instead. The `error` and `error_description` query parameters will be included in the redirect. + +### Disconnection + +Connector can be asynchronously disconnected at any time. This can happen for various reasons including intentional disconnect, refresh token expiration or an arbitrary error. The client MAY check for the `disconnected` state and read `disconnect_reason` to read the description of what happened. + +## Connector Presets + +Connector presets provide pre-configured settings for common MCP servers, simplifying the connector creation process. Presets can include: + +- **URL**: The MCP server endpoint (supports `http://`, `https://`, and `mcp+stdio://` schemes) +- **Client credentials**: Pre-configured OAuth `client_id` and `client_secret` for public clients +- **Metadata**: Display information such as name and description +- **Stdio configuration**: For `mcp+stdio://` URLs, container image and runtime settings for managed MCP servers + +### Available Presets + +The platform comes with several built-in presets: + +| MCP Server | URL | Description | +| ---------- | ----------------------------------- | ----------------------------------------------------------------------- | +| Stripe | `https://mcp.stripe.com` | Payment processing and financial infrastructure tools | +| Box | `https://mcp.box.com` | Search, access and get insights on your Box content | +| GitHub | `https://api.githubcopilot.com/mcp` | Access and interact with your GitHub repositories and code intelligence | + +### Using Presets + +When creating a connector with `POST /connectors`, the system automatically matches the provided URL against available presets (this behavior is controlled by the `match_preset` parameter, which defaults to `true`). If a match is found: + +1. **Client credentials**: If no `client_id` is provided in the request, the preset's credentials are used automatically +2. **Metadata**: If no metadata is provided in the request, the preset's metadata (name, description) is used + +This allows for simplified connector creation. For example, to create a GitHub connector: + +```json +POST /connectors +{ + "url": "https://api.githubcopilot.com/mcp" +} +``` + +The system will automatically apply the preset's client credentials and metadata. + +To disable preset matching and provide all credentials manually, set `match_preset: false` in the request. + +### Configuring Presets + +Connector presets are configurable via Helm values when deploying Agent Stack. The presets are defined in the `connector.presets` section of `values.yaml`: + +#### Remote MCP Servers (HTTP/HTTPS) + +For MCP servers accessible over HTTP/HTTPS with Streamable HTTP transport: + +```yaml +connector: + presets: + - url: "https://mcp.stripe.com" + metadata: + name: "Stripe" + description: "Payment processing and financial infrastructure tools" + - url: "https://mcp.box.com" + client_id: "YOUR_CLIENT_ID" + client_secret: "YOUR_CLIENT_SECRET" + metadata: + name: "Box" + description: "Search, access and get insights on your Box content" +``` + +#### Managed MCP Servers (stdio) + +For MCP servers that use stdio transport, Agent Stack can manage them as Kubernetes deployments using the `mcp+stdio://` scheme. The platform automatically: + +- Deploys the MCP server as a Kubernetes pod with a sidecar container running [supergateway](https://github.com/supercorp-ai/supergateway) +- Exposes the stdio MCP server over Streamable HTTP transport via supergateway +- Manages the deployment lifecycle (creates on connect, deletes on disconnect) +- Handles authentication tokens via environment variables + +```yaml +connector: + presets: + - url: "mcp+stdio://example-mcp" + metadata: + name: "Example MCP" + description: "Example stdio-based MCP server" + stdio: + image: "registry.example.com/mcp-server:latest" + command: ["node"] # Optional: override container command + args: ["dist/index.js"] # Optional: override container args + env: # Optional: additional environment variables + LOG_LEVEL: "info" + access_token_env_name: "API_TOKEN" # Optional: env var name to inject access token +``` + +The managed MCP architecture uses a sidecar pattern: +- **MCP Server Container**: Runs your stdio-based MCP server +- **Supergateway Sidecar**: Wraps the stdio interface and exposes it as Streamable HTTP at port 8080 + +When a connector using a managed preset is connected, the platform: +1. Creates a Kubernetes Deployment with both containers +2. Creates a Kubernetes Service to expose the supergateway +3. Waits for the deployment to become ready (up to 60 seconds) +4. Proxies MCP requests to the managed service +5. If an `accessToken` was provided in the connect request and the preset defines `access_token_env_name`, injects the token as an environment variable + +When disconnected, the platform cleans up the Deployment and Service resources. + +#### Configuration Details + +The presets are injected into the platform via the `CONNECTOR__PRESETS` environment variable, which is populated from a Kubernetes Secret created by Helm. This allows administrators to: + +- Add custom MCP server presets for their organization (both remote and managed) +- Modify or remove default presets +- Configure client credentials for remote MCP servers with OAuth +- Configure stdio container images and settings for managed MCP servers +- Customize metadata (names, descriptions) for better user experience + +After modifying preset configuration in Helm values, redeploy the platform for changes to take effect. diff --git a/docs/stable/experimental/mcp.mdx b/docs/stable/experimental/mcp.mdx new file mode 100644 index 000000000..69aa9bea5 --- /dev/null +++ b/docs/stable/experimental/mcp.mdx @@ -0,0 +1,65 @@ +--- +title: "Register MCP Servers" +description: "Learn how to register MCP servers into Agent Stack and create curated toolkits for agents to use" +--- + +## Prerequisites + +- [Agent Stack](/introduction/quickstart) installed +- Arbitrary [MCP server](https://modelcontextprotocol.io/introduction) with tools served over [Streamable HTTP transport](https://modelcontextprotocol.io/docs/concepts/transports#streamable-http) + + + You may use + [server-everything](https://www.npmjs.com/package/@modelcontextprotocol/server-everything) + to complete the tutorial. + + + + If your server uses `stdio` transport, you can register it as a [Managed MCP Connector](/experimental/connectors#managed-mcp-servers-stdio). The platform will automatically deploy and manage the server for you. + + +## Register the MCP server + +Start by registering the MCP server into the Platform: + +```bash +agentstack mcp add +``` + +## List available tools + +List available MCP tools: + + + If you have multiple MCP servers registered, tools across all the servers will + be listed. + + +```bash +agentstack mcp tools +``` + +## Create a toolkit + +Create a toolkit: + +```bash +agentstack mcp toolkit ... +``` + +Toolkit is just another MCP server containing the selected tools. It is served over Streamable HTTP. + + + Toolkits have an expiration. They are temporary resources meant to be used by + an agent for a single task. + + +## Use the toolkit with your agent + +Grab the URL of the toolkit and give it to an agent. The agent can now use this URL with the MCP client to connect an use the toolkit. + + + We're working on a mechanism for agents to receive arbitrary MCP server as a + dependency injection over the agent protocol. Currently, the URL delivery must + be done out of bound. + diff --git a/docs/stable/images/architecture-src.excalidraw b/docs/stable/images/architecture-src.excalidraw new file mode 100644 index 000000000..6cab728ec --- /dev/null +++ b/docs/stable/images/architecture-src.excalidraw @@ -0,0 +1,3496 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "Z4zd1BeeYPtfiAa2eykzF", + "type": "rectangle", + "x": 1655.7032778287503, + "y": 283.6515325868795, + "width": 512.8263420685471, + "height": 282.56238775550014, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a0", + "roundness": { + "type": 3 + }, + "seed": 1916592754, + "version": 475, + "versionNonce": 524461486, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "FWF7qxxSYrUsyYBFQS9P_" + }, + { + "id": "lDXKPK8r6CGZ3D3rA3UQS", + "type": "arrow" + }, + { + "id": "SeWMyQtmgAvhyvK_2npTH", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "FWF7qxxSYrUsyYBFQS9P_", + "type": "text", + "x": 1741.6164488630238, + "y": 288.6515325868795, + "width": 341, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a1", + "roundness": null, + "seed": 1964753970, + "version": 349, + "versionNonce": 796770286, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "Agent provider (NodeJS process)", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "top", + "containerId": "Z4zd1BeeYPtfiAa2eykzF", + "originalText": "Agent provider (NodeJS process)", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "g8aUVo8Q69X6RigT9AdGe", + "type": "rectangle", + "x": 1091.886494993032, + "y": 284.2307006109743, + "width": 512.8263420685471, + "height": 282.56238775550014, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a2", + "roundness": { + "type": 3 + }, + "seed": 87584242, + "version": 541, + "versionNonce": 1296374894, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "l1nXiXPzlAe0sc8IrDr5a" + }, + { + "id": "R_L1RcA1JwbvQxrBLCVxd", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "l1nXiXPzlAe0sc8IrDr5a", + "type": "text", + "x": 1177.7996660273056, + "y": 289.2307006109743, + "width": 341, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a3", + "roundness": null, + "seed": 1201266610, + "version": 431, + "versionNonce": 1266900654, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "Agent provider (Python process)", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "top", + "containerId": "g8aUVo8Q69X6RigT9AdGe", + "originalText": "Agent provider (Python process)", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "Y8MDhb5yaeQe38gX_9TFT", + "type": "rectangle", + "x": 1102.6077887848187, + "y": 1054.4385970717456, + "width": 221.27696061180848, + "height": 63.830479415370164, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a4", + "roundness": null, + "seed": 677845362, + "version": 1308, + "versionNonce": 1375865646, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "G8ZHgcv9WZPYknf2piZ5K" + }, + { + "id": "TvW3zJ7Z-Ttt6Ic5yBFny", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "G8ZHgcv9WZPYknf2piZ5K", + "type": "text", + "x": 1141.746269090723, + "y": 1073.8538367794306, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a5", + "roundness": null, + "seed": 1994857266, + "version": 1316, + "versionNonce": 1882486126, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "my python app", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "Y8MDhb5yaeQe38gX_9TFT", + "originalText": "my python app", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "JQvGgKdMWC4EKOJDOKI1F", + "type": "rectangle", + "x": 1053.6218043926383, + "y": 714.7987498380162, + "width": 689.2532134268122, + "height": 193.64294884437987, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a6", + "roundness": { + "type": 3 + }, + "seed": 25224434, + "version": 1176, + "versionNonce": 1424781806, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "cZnnp7YXFZDBqdU9hGv80" + }, + { + "id": "TvW3zJ7Z-Ttt6Ic5yBFny", + "type": "arrow" + }, + { + "id": "WVBoG6C0rDUDf6p0u0nkq", + "type": "arrow" + }, + { + "id": "iFWDL9gVnPM0rNrKoqXs2", + "type": "arrow" + }, + { + "id": "RLesUF1XvwU021CYhYFPm", + "type": "arrow" + }, + { + "id": "lDXKPK8r6CGZ3D3rA3UQS", + "type": "arrow" + }, + { + "id": "R_L1RcA1JwbvQxrBLCVxd", + "type": "arrow" + }, + { + "id": "NK951lleKIiiDbV4dcouQ", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "cZnnp7YXFZDBqdU9hGv80", + "type": "text", + "x": 1282.7484111060444, + "y": 719.7987498380162, + "width": 231, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a7", + "roundness": null, + "seed": 834369202, + "version": 1184, + "versionNonce": 1587807278, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "agentstack-platform server", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "top", + "containerId": "JQvGgKdMWC4EKOJDOKI1F", + "originalText": "agentstack-platform server", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "dtSDO42av2JSQrnquPm1H", + "type": "rectangle", + "x": 1132.4231367793363, + "y": 335.5877084634874, + "width": 409.7855052270803, + "height": 178.40179774883393, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "hachure", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a8", + "roundness": null, + "seed": 675139698, + "version": 1886, + "versionNonce": 326477678, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "iclHxV4BAOc7LetwZqMJu" + }, + { + "id": "WVBoG6C0rDUDf6p0u0nkq", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "iclHxV4BAOc7LetwZqMJu", + "type": "text", + "x": 1172.3158893928764, + "y": 340.5877084634874, + "width": 330, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "a9", + "roundness": null, + "seed": 1489957426, + "version": 371, + "versionNonce": 829065646, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "agents/community/crewai-agents", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "top", + "containerId": "dtSDO42av2JSQrnquPm1H", + "originalText": "agents/community/crewai-agents", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "XKrZOHcbABS7Z5KpYh6dl", + "type": "rectangle", + "x": 1214.9594712595717, + "y": 384.5906974875586, + "width": 225.28293129741698, + "height": 60.053902025178104, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aA", + "roundness": { + "type": 3 + }, + "seed": 643161074, + "version": 1447, + "versionNonce": 1113224750, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "M_Kadg3TT1vMcz_FeXukD", + "type": "rectangle", + "x": 1378.2849927493771, + "y": 1054.1251999719177, + "width": 178.30711110518317, + "height": 62.090980513885384, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffd43b", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aB", + "roundness": null, + "seed": 2128186802, + "version": 1675, + "versionNonce": 809992302, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "bELj5jyK2ONkmmlKv3tTj" + }, + { + "id": "RLesUF1XvwU021CYhYFPm", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "bELj5jyK2ONkmmlKv3tTj", + "type": "text", + "x": 1434.4385483019687, + "y": 1072.6706902288604, + "width": 66, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aC", + "roundness": null, + "seed": 2004944754, + "version": 1719, + "versionNonce": 1754874542, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "UI/CLI", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "M_Kadg3TT1vMcz_FeXukD", + "originalText": "UI/CLI", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "7WK-TuODoJhUEB-zG3dAm", + "type": "rectangle", + "x": 1973.6461117554245, + "y": 775.4475459474959, + "width": 178.75678359971062, + "height": 71.98838141386206, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aD", + "roundness": null, + "seed": 1754482994, + "version": 1340, + "versionNonce": 99544878, + "isDeleted": false, + "boundElements": [ + { + "id": "qQK-gh6CtCZgOSCiA2Eds", + "type": "text" + }, + { + "id": "NK951lleKIiiDbV4dcouQ", + "type": "arrow" + }, + { + "id": "SeWMyQtmgAvhyvK_2npTH", + "type": "arrow" + }, + { + "id": "RvwXD_xf_4-IF95_ZtuL_", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "qQK-gh6CtCZgOSCiA2Eds", + "type": "text", + "x": 2024.5245035552798, + "y": 798.9417366544269, + "width": 77, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aE", + "roundness": null, + "seed": 158814962, + "version": 1388, + "versionNonce": 1558501742, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "observe", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "7WK-TuODoJhUEB-zG3dAm", + "originalText": "observe", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "5K-6eKkNX813dBOyoLB2z", + "type": "text", + "x": 1356.951038945339, + "y": 530.7226033297122, + "width": 110, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aF", + "roundness": null, + "seed": 213132466, + "version": 514, + "versionNonce": 395162222, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A server", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A server", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "S2USGowxL6Bk2qh_mG_CP", + "type": "text", + "x": 1072.6095969715207, + "y": 742.833352091413, + "width": 121, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aG", + "roundness": null, + "seed": 1005633138, + "version": 759, + "versionNonce": 742995118, + "isDeleted": false, + "boundElements": [ + { + "id": "WVBoG6C0rDUDf6p0u0nkq", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A clients", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A clients", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "KyzPFCda8j3pQJt0kxHlq", + "type": "rectangle", + "x": 794.1732071530532, + "y": 637.3894845940442, + "width": 404.19683587677343, + "height": 55.79387330716965, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aH", + "roundness": { + "type": 3 + }, + "seed": 334766130, + "version": 1058, + "versionNonce": 450522862, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "ONndy5P8h8CN-TRqnTsxu" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "ONndy5P8h8CN-TRqnTsxu", + "type": "text", + "x": 803.77162509144, + "y": 652.786421247629, + "width": 385, + "height": 25, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aI", + "roundness": null, + "seed": 1621445106, + "version": 1061, + "versionNonce": 264921390, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "> uv tool install agentstack-cli", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "KyzPFCda8j3pQJt0kxHlq", + "originalText": "> uv tool install agentstack-cli", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "TI3--jStTG2IoshuU3KAb", + "type": "rectangle", + "x": 794.5561599747525, + "y": 314.91076813680934, + "width": 272.4574765184915, + "height": 52.438191341733734, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aJ", + "roundness": { + "type": 3 + }, + "seed": 1493908402, + "version": 943, + "versionNonce": 1048709998, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "Q7dNJAACuYOS2AR0AJTOS" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "Q7dNJAACuYOS2AR0AJTOS", + "type": "text", + "x": 804.2848982339983, + "y": 328.6298638076762, + "width": 253, + "height": 25, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aK", + "roundness": null, + "seed": 994899314, + "version": 973, + "versionNonce": 88763822, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "> pip install agentstack-sdk", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "TI3--jStTG2IoshuU3KAb", + "originalText": "> pip install agentstack-sdk", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "WVBoG6C0rDUDf6p0u0nkq", + "type": "arrow", + "x": 1227.7456915593648, + "y": 517.6028448099687, + "width": 0.2303195297899947, + "height": 303.69574148725314, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#99e9f2", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aL", + "roundness": null, + "seed": 1756460850, + "version": 1700, + "versionNonce": 1368642350, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0.2303195297899947, + 303.69574148725314 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "dtSDO42av2JSQrnquPm1H", + "focus": 0.5349354620723489, + "gap": 3.6133385976474983 + }, + "endBinding": { + "elementId": "RmjajsbASk_UWPYVjrnTl", + "focus": 0, + "gap": 14 + }, + "startArrowhead": "circle", + "endArrowhead": "arrow", + "elbowed": false, + "fixedSegments": [ + { + "index": 2, + "start": [ + 0, + 115.13181128331371 + ], + "end": [ + 1.2250637876531982, + 115.13181128331371 + ] + } + ], + "startIsSpecial": false, + "endIsSpecial": false + }, + { + "id": "iFWDL9gVnPM0rNrKoqXs2", + "type": "arrow", + "x": 1848.4390038150486, + "y": 514.3942128980264, + "width": 591.5659868252296, + "height": 263.85474195676784, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aM", + "roundness": null, + "seed": 27246834, + "version": 928, + "versionNonce": 1536394478, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 132.69940465890295 + ], + [ + -591.5659868252296, + 132.69940465890295 + ], + [ + -591.5659868252296, + 263.85474195676784 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "hqEP1vCrHzRTKWXg-JFYm", + "fixedPoint": [ + 0.3732887331065876, + 1.0245063998069934 + ], + "focus": 0, + "gap": 0 + }, + "endBinding": { + "elementId": "RmjajsbASk_UWPYVjrnTl", + "fixedPoint": [ + 0.6878959459722289, + -0.06583949088088022 + ], + "focus": 0, + "gap": 0 + }, + "startArrowhead": "circle", + "endArrowhead": "arrow", + "elbowed": true, + "fixedSegments": [ + { + "index": 2, + "start": [ + 0, + 132.69940465890295 + ], + "end": [ + -591.5659868252296, + 132.69940465890295 + ] + } + ], + "startIsSpecial": false, + "endIsSpecial": false + }, + { + "id": "RmjajsbASk_UWPYVjrnTl", + "type": "rectangle", + "x": 1151.0797115145615, + "y": 783.2580888273392, + "width": 153.79259914918657, + "height": 76.08099493976555, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aN", + "roundness": { + "type": 3 + }, + "seed": 27185842, + "version": 567, + "versionNonce": 268559470, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "JMbnfnjme98qev9Lu0oiy" + }, + { + "id": "TvW3zJ7Z-Ttt6Ic5yBFny", + "type": "arrow" + }, + { + "id": "WVBoG6C0rDUDf6p0u0nkq", + "type": "arrow" + }, + { + "id": "iFWDL9gVnPM0rNrKoqXs2", + "type": "arrow" + }, + { + "id": "sEjhF4bHmgT-PUW6fGW_m", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "JMbnfnjme98qev9Lu0oiy", + "type": "text", + "x": 1178.4760110891548, + "y": 808.798586297222, + "width": 99, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aO", + "roundness": null, + "seed": 1532341362, + "version": 484, + "versionNonce": 728180398, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A-proxy", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "RmjajsbASk_UWPYVjrnTl", + "originalText": "A2A-proxy", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "sEjhF4bHmgT-PUW6fGW_m", + "type": "arrow", + "x": 1900.2634443369316, + "y": 515.0222271161072, + "width": 672.3874332477753, + "height": 445.99269996775433, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aP", + "roundness": null, + "seed": 344673842, + "version": 986, + "versionNonce": 1156626222, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 445.99269996775433 + ], + [ + -672.3874332477753, + 445.99269996775433 + ], + [ + -672.3874332477753, + 349.3168566509976 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "hqEP1vCrHzRTKWXg-JFYm", + "fixedPoint": [ + 0.4997559698946842, + 1.0280266234034225 + ], + "focus": 0, + "gap": 0 + }, + "endBinding": { + "elementId": "RmjajsbASk_UWPYVjrnTl", + "fixedPoint": [ + 0.499349773652622, + 1.0657194349779282 + ], + "focus": 0, + "gap": 0 + }, + "startArrowhead": "arrow", + "endArrowhead": null, + "elbowed": true, + "fixedSegments": [ + { + "index": 2, + "start": [ + 0.025331262455438264, + 445.99269996775433 + ], + "end": [ + -672.3874332477753, + 445.99269996775433 + ] + } + ], + "startIsSpecial": false, + "endIsSpecial": false + }, + { + "id": "8YHZFV23RZTVCm28VX_bw", + "type": "text", + "x": 1191.484892987799, + "y": 1009.1835428007021, + "width": 110, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aQ", + "roundness": null, + "seed": 1646704626, + "version": 343, + "versionNonce": 1125239854, + "isDeleted": false, + "boundElements": [ + { + "id": "TvW3zJ7Z-Ttt6Ic5yBFny", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A Client", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A Client", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "R_L1RcA1JwbvQxrBLCVxd", + "type": "arrow", + "x": 1397.9693142287615, + "y": 714.5264206102793, + "width": 3.8065426823122834, + "height": 143.53496726742378, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aR", + "roundness": null, + "seed": 1143925170, + "version": 1099, + "versionNonce": 195303022, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + -3.8065426823122834, + -143.53496726742378 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "JQvGgKdMWC4EKOJDOKI1F", + "focus": 0.005853574996093576, + "gap": 1 + }, + "endBinding": { + "elementId": "g8aUVo8Q69X6RigT9AdGe", + "focus": -0.16141740456219525, + "gap": 8.529655486161573 + }, + "startArrowhead": null, + "endArrowhead": null, + "elbowed": false, + "fixedSegments": [ + { + "index": 2, + "start": [ + 0, + -94.18687170645717 + ], + "end": [ + 1.582259508973948, + -94.18687170645717 + ] + } + ], + "startIsSpecial": false, + "endIsSpecial": false + }, + { + "id": "RLesUF1XvwU021CYhYFPm", + "type": "arrow", + "x": 1449.697813738734, + "y": 910.6242098983112, + "width": 0.22325467698829016, + "height": 135.7246769014232, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aS", + "roundness": null, + "seed": 566944626, + "version": 905, + "versionNonce": 401807534, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + -0.22325467698829016, + 135.7246769014232 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "JQvGgKdMWC4EKOJDOKI1F", + "focus": -0.14969364972650803, + "gap": 7.675019237785136 + }, + "endBinding": { + "elementId": "M_Kadg3TT1vMcz_FeXukD", + "focus": -0.20209544125864382, + "gap": 7.776313172183336 + }, + "startArrowhead": null, + "endArrowhead": "arrow", + "elbowed": false, + "fixedSegments": null, + "startIsSpecial": null, + "endIsSpecial": null + }, + { + "id": "SeWMyQtmgAvhyvK_2npTH", + "type": "arrow", + "x": 2059.604655252544, + "y": 513.7634427224518, + "width": 3.3198483027372276, + "height": 256.68410322504405, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dotted", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aT", + "roundness": null, + "seed": 1698800946, + "version": 926, + "versionNonce": 1752015214, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 3.3198483027372276, + 256.68410322504405 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "hqEP1vCrHzRTKWXg-JFYm", + "focus": -0.7670073528319857, + "gap": 3.7412156063446673 + }, + "endBinding": { + "elementId": "7WK-TuODoJhUEB-zG3dAm", + "focus": 0.0047883289346902155, + "gap": 5 + }, + "startArrowhead": null, + "endArrowhead": "arrow", + "elbowed": false, + "fixedSegments": null, + "startIsSpecial": null, + "endIsSpecial": null + }, + { + "id": "RvwXD_xf_4-IF95_ZtuL_", + "type": "arrow", + "x": 2062.9245035552813, + "y": 852.4359273613579, + "width": 1.504523770781816, + "height": 198.89701754518364, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dotted", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aU", + "roundness": null, + "seed": 402271986, + "version": 906, + "versionNonce": 1981862574, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "R4HnWCTndDc9zwo4HEFcm" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 99.44850877259182 + ], + [ + -1.504523770781816, + 99.44850877259182 + ], + [ + -1.504523770781816, + 198.89701754518364 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "7WK-TuODoJhUEB-zG3dAm", + "fixedPoint": [ + 0.49944058067064756, + 1.0694556524511216 + ], + "focus": 0, + "gap": 0 + }, + "endBinding": { + "elementId": "6NczKbyxcFD029YiPc4dS", + "fixedPoint": [ + 0.49944058067064756, + -0.06945565245112181 + ], + "focus": 0, + "gap": 1 + }, + "startArrowhead": null, + "endArrowhead": "arrow", + "elbowed": true, + "fixedSegments": null, + "startIsSpecial": null, + "endIsSpecial": null + }, + { + "id": "R4HnWCTndDc9zwo4HEFcm", + "type": "text", + "x": 2012.6722416698904, + "y": 939.3844361339497, + "width": 99, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dotted", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aV", + "roundness": null, + "seed": 2122023090, + "version": 134, + "versionNonce": 306605934, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "telemetry", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "RvwXD_xf_4-IF95_ZtuL_", + "originalText": "telemetry", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "SdG5N9TvhfRUNTHULcao9", + "type": "rectangle", + "x": 1230.7045486741627, + "y": 408.20330574133095, + "width": 225.28293129741698, + "height": 60.053902025178104, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aW", + "roundness": { + "type": 3 + }, + "seed": 879532658, + "version": 1502, + "versionNonce": 719475118, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "PLUIAHXLbcA_PuyIrwKO8", + "type": "rectangle", + "x": 1256.9816298342594, + "y": 427.9161778939879, + "width": 225.28293129741698, + "height": 60.053902025178104, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aX", + "roundness": { + "type": 3 + }, + "seed": 1217690674, + "version": 1528, + "versionNonce": 38267886, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "LiQ1CdEkK4KHlZV9xlkih" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "LiQ1CdEkK4KHlZV9xlkih", + "type": "text", + "x": 1342.123095482968, + "y": 445.44312890657693, + "width": 55, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aY", + "roundness": null, + "seed": 1490691570, + "version": 1599, + "versionNonce": 109067822, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "agent", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "PLUIAHXLbcA_PuyIrwKO8", + "originalText": "agent", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "hqEP1vCrHzRTKWXg-JFYm", + "type": "rectangle", + "x": 1695.4706917233889, + "y": 331.6204293672731, + "width": 409.7855052270803, + "height": 178.40179774883393, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "hachure", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aZ", + "roundness": null, + "seed": 776247218, + "version": 1825, + "versionNonce": 2087995502, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "D4tbEhvWB7DqZdS2VGDAD" + }, + { + "id": "iFWDL9gVnPM0rNrKoqXs2", + "type": "arrow" + }, + { + "id": "sEjhF4bHmgT-PUW6fGW_m", + "type": "arrow" + }, + { + "id": "SeWMyQtmgAvhyvK_2npTH", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "D4tbEhvWB7DqZdS2VGDAD", + "type": "text", + "x": 1729.863444336929, + "y": 336.6204293672731, + "width": 341, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aa", + "roundness": null, + "seed": 1492832626, + "version": 325, + "versionNonce": 602737326, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "agents/official/beeai-framework", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "top", + "containerId": "hqEP1vCrHzRTKWXg-JFYm", + "originalText": "agents/official/beeai-framework", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "VDPrdKAG_MyrlcMdhRDk2", + "type": "text", + "x": 1713.7216123695725, + "y": 524.729573066088, + "width": 110, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ab", + "roundness": null, + "seed": 1279513394, + "version": 502, + "versionNonce": 1498629038, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A server", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A server", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "QyeCQDZOUlqBbFEkFsN7T", + "type": "rectangle", + "x": 1709.0684679037731, + "y": 384.9636958815581, + "width": 162.38327362609172, + "height": 57.582967281370685, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ac", + "roundness": { + "type": 3 + }, + "seed": 2055642354, + "version": 1605, + "versionNonce": 1506450926, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "flFWA1ME8MoyeiTEiQwJU", + "type": "rectangle", + "x": 1727.4060190823475, + "y": 409.6365883647079, + "width": 162.06085220206634, + "height": 59.84042469248903, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ad", + "roundness": { + "type": 3 + }, + "seed": 1900687026, + "version": 1577, + "versionNonce": 2128392238, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "-dzqfmX2P0KKk5Mj4sSMi" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "-dzqfmX2P0KKk5Mj4sSMi", + "type": "text", + "x": 1780.9364451833808, + "y": 427.0568007109524, + "width": 55, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ae", + "roundness": null, + "seed": 1966442610, + "version": 1650, + "versionNonce": 1416788590, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "agent", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "flFWA1ME8MoyeiTEiQwJU", + "originalText": "agent", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "type": "rectangle", + "version": 3410, + "versionNonce": 22346926, + "isDeleted": false, + "id": "GrVkZwEscfZS-6gtRmvKp", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 2085.003981251153, + "y": 321.4612771236293, + "strokeColor": "#000000", + "backgroundColor": "#3078c6", + "width": 63.82322736398904, + "height": 46.804509570011355, + "seed": 504343090, + "groupIds": [ + "jiWI4ts4LcuHNHlif97PN", + "nmkDxVin5q71mr4euoViJ" + ], + "strokeSharpness": "sharp", + "boundElements": [], + "updated": 1741870553059, + "index": "af", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 2148, + "versionNonce": 1798125294, + "isDeleted": false, + "id": "KrcEyjUtwUIfnAdpd2bnV", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 2097.450199411822, + "y": 330.3681651406505, + "strokeColor": "#fff", + "backgroundColor": "#fff", + "width": 38.15530975287421, + "height": 33.01901805537191, + "seed": 1925157874, + "groupIds": [ + "jiWI4ts4LcuHNHlif97PN", + "nmkDxVin5q71mr4euoViJ" + ], + "strokeSharpness": "sharp", + "boundElements": [], + "updated": 1741870553059, + "fontSize": 26.415214444297533, + "fontFamily": 1, + "text": "TS", + "baseline": 32, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "TS", + "index": "ag", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false, + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "LFjTJvkBjwHkG6LbNAcFp", + "type": "line", + "x": 1572.7036967514541, + "y": 335.53517419070977, + "width": 44.730504675006344, + "height": 45.66506639666152, + "angle": 0, + "strokeColor": "#000", + "backgroundColor": "#ffda48", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "groupIds": [ + "sIzVZ9h_H-Ux7toxQCC1Y", + "15J8pQtBN1Iy5hf1p7ilL", + "IC11IOcuLLTyTMNT4BePD" + ], + "strokeSharpness": "round", + "seed": 88884658, + "version": 1984, + "versionNonce": 1518610734, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "points": [ + [ + 0, + 0 + ], + [ + 6.500469792707247, + 0.547580508480814 + ], + [ + 9.664703528717338, + 4.565175032808508 + ], + [ + 10.926224204314332, + 14.100122971331992 + ], + [ + 9.27728793240277, + 22.714872608322384 + ], + [ + 2.9570230112503064, + 27.410127718953436 + ], + [ + -6.690270556645404, + 28.19006749581451 + ], + [ + -16.511911622647915, + 28.226151221364653 + ], + [ + -19.971417863504193, + 28.65590219181018 + ], + [ + -19.79594828375973, + 32.14531461321165 + ], + [ + -5.547681077655756, + 32.8049029749501 + ], + [ + -3.977564917288063, + 37.378229675105324 + ], + [ + -5.826323165717811, + 41.60195901077371 + ], + [ + -11.864695257119388, + 45.36964182215237 + ], + [ + -22.693949612535274, + 45.66506639666152 + ], + [ + -30.483958676743963, + 42.64786982874864 + ], + [ + -33.08016633561649, + 38.0680465284608 + ], + [ + -33.804280470692014, + 30.878655873103106 + ], + [ + -33.499946327330726, + 22.186595593130075 + ], + [ + -29.022145225880095, + 16.906552503978464 + ], + [ + -20.86087906899885, + 16.821998020029916 + ], + [ + -12.413950516728342, + 16.905759960485025 + ], + [ + -6.1076376791814795, + 15.469339232557118 + ], + [ + -0.7988852199744912, + 9.681862152333276 + ], + [ + 0, + 0 + ] + ], + "lastCommittedPoint": null, + "startBinding": null, + "endBinding": null, + "startArrowhead": null, + "endArrowhead": null, + "index": "ah", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": { + "type": 2 + }, + "link": null, + "locked": false + }, + { + "type": "line", + "version": 2066, + "versionNonce": 516084590, + "isDeleted": false, + "id": "54iMrNid9NngMR2VHvS67", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "angle": 0, + "x": 1534.9148657691148, + "y": 365.17019394011913, + "strokeColor": "#000", + "backgroundColor": "#3371a2", + "width": 47.06097395177715, + "height": 47.1777959055915, + "seed": 249347954, + "groupIds": [ + "sIzVZ9h_H-Ux7toxQCC1Y", + "15J8pQtBN1Iy5hf1p7ilL", + "IC11IOcuLLTyTMNT4BePD" + ], + "strokeSharpness": "round", + "boundElements": [], + "updated": 1741870553059, + "startBinding": null, + "endBinding": null, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": null, + "points": [ + [ + 0, + 0 + ], + [ + -6.953114118984614, + -0.029322683198829264 + ], + [ + -11.534590170795337, + -4.637507818077104 + ], + [ + -13.39675204490687, + -14.302968146478364 + ], + [ + -11.670627219894149, + -23.31049485740817 + ], + [ + -5.857030882063543, + -28.83713317452735 + ], + [ + 4.735294545789309, + -29.356268964886233 + ], + [ + 15.778747911113095, + -28.981660195111864 + ], + [ + 19.796434687040996, + -29.355750646317784 + ], + [ + 18.315054583023556, + -33.3353286816693 + ], + [ + 3.9862771738721183, + -34.076920276578626 + ], + [ + 2.2851200880819547, + -37.20194085139175 + ], + [ + 3.4928058845568852, + -43.32760004800533 + ], + [ + 10.342745601740738, + -46.758499413412586 + ], + [ + 21.63418671853502, + -47.1777959055915 + ], + [ + 29.81568980436157, + -44.35803010880034 + ], + [ + 33.331146958829045, + -39.64028216177995 + ], + [ + 33.66422190687028, + -31.833416902002046 + ], + [ + 32.523577922108515, + -22.997416372012225 + ], + [ + 28.27819828269684, + -17.284084989519254 + ], + [ + 19.69442344135926, + -17.19515312060478 + ], + [ + 10.810197464680257, + -17.283251416055418 + ], + [ + 4.174830134076728, + -14.983237620848616 + ], + [ + 0.12876024570309721, + -8.800699092676812 + ], + [ + 0, + 0 + ] + ], + "index": "ai", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": { + "type": 2 + }, + "link": null, + "locked": false + }, + { + "id": "SD4yjHrenxuuwupxnE6RY", + "type": "ellipse", + "x": 1548.6039573738199, + "y": 321.5091062191043, + "width": 5.958211666635718, + "height": 6.3516510880411765, + "angle": 0, + "strokeColor": "#000", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "groupIds": [ + "sIzVZ9h_H-Ux7toxQCC1Y", + "15J8pQtBN1Iy5hf1p7ilL", + "IC11IOcuLLTyTMNT4BePD" + ], + "strokeSharpness": "sharp", + "seed": 196608306, + "version": 1133, + "versionNonce": 842151342, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "index": "aj", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 1207, + "versionNonce": 1576751086, + "isDeleted": false, + "id": "YyaEPqQ5iIV0JX8s7XB96", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "angle": 0, + "x": 1550.6071749520124, + "y": 371.5454912630846, + "strokeColor": "#000", + "backgroundColor": "#fff", + "width": 5.958211666635718, + "height": 6.3516510880411765, + "seed": 1464142578, + "groupIds": [ + "sIzVZ9h_H-Ux7toxQCC1Y", + "15J8pQtBN1Iy5hf1p7ilL", + "IC11IOcuLLTyTMNT4BePD" + ], + "strokeSharpness": "sharp", + "boundElements": [], + "updated": 1741870553059, + "index": "ak", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false + }, + { + "id": "TvW3zJ7Z-Ttt6Ic5yBFny", + "type": "arrow", + "x": 1163.5807224359196, + "y": 1051.5703088910202, + "width": 64.29528865323664, + "height": 187.23122512391546, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "al", + "roundness": null, + "seed": 1208671410, + "version": 2995, + "versionNonce": 1579622958, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + -94.68146847159505 + ], + [ + 64.29528865323664, + -94.68146847159505 + ], + [ + 64.29528865323664, + -187.23122512391546 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "Y8MDhb5yaeQe38gX_9TFT", + "fixedPoint": [ + 0.2766480859540257, + -0.04227074784931552 + ], + "focus": 0, + "gap": 1 + }, + "endBinding": { + "elementId": "RmjajsbASk_UWPYVjrnTl", + "fixedPoint": [ + 0.499349773652622, + 1.0657194349779282 + ], + "focus": 0, + "gap": 0 + }, + "startArrowhead": "arrow", + "endArrowhead": "circle", + "elbowed": true, + "fixedSegments": null, + "startIsSpecial": null, + "endIsSpecial": null + }, + { + "id": "-Nt9Tfjq9ssgnzHZ7ICnD", + "type": "rectangle", + "x": 1334.6526132612707, + "y": 781.3867954777115, + "width": 152.53898093440512, + "height": 79.80101994615279, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#e7f5ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "am", + "roundness": { + "type": 3 + }, + "seed": 1269855858, + "version": 580, + "versionNonce": 94284910, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "9MN0nxqV6Px3wEr49u_Ii" + }, + { + "id": "R_L1RcA1JwbvQxrBLCVxd", + "type": "arrow" + }, + { + "id": "lDXKPK8r6CGZ3D3rA3UQS", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "9MN0nxqV6Px3wEr49u_Ii", + "type": "text", + "x": 1355.9221037284733, + "y": 796.2873054507879, + "width": 110, + "height": 50, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "an", + "roundness": null, + "seed": 50124850, + "version": 547, + "versionNonce": 1646980782, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "provider\nmanagement", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "-Nt9Tfjq9ssgnzHZ7ICnD", + "originalText": "provider management", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "JzR7cUVw_O28IL-jOIes4", + "type": "rectangle", + "x": 1519.4997822461519, + "y": 782.127037594573, + "width": 150.0317445048422, + "height": 78.56098558797657, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#e7f5ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ao", + "roundness": { + "type": 3 + }, + "seed": 1574371826, + "version": 591, + "versionNonce": 1618274542, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "vmdS-QnG0-Sw622hARQwW" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "vmdS-QnG0-Sw622hARQwW", + "type": "text", + "x": 1539.515654498573, + "y": 796.4075303885613, + "width": 110, + "height": 50, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ebfbee", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ap", + "roundness": null, + "seed": 927331250, + "version": 586, + "versionNonce": 1879415598, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "env\nmanagement", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "JzR7cUVw_O28IL-jOIes4", + "originalText": "env management", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "L_vlTqkGIrawVwEIhBZ_F", + "type": "line", + "x": 1352.0364309415781, + "y": 1046.7547850379833, + "width": 36.51748877611617, + "height": 37.28045461853377, + "angle": 0, + "strokeColor": "#000", + "backgroundColor": "#ffda48", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "groupIds": [ + "I_PrQlBOQmLhEiT8WNp9a", + "CEW0zLxr6qnTqcOFae2PC", + "2BHV2t0MOmby9Xa9n5W9E" + ], + "strokeSharpness": "round", + "seed": 1903209842, + "version": 2005, + "versionNonce": 658900334, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "points": [ + [ + 0, + 0 + ], + [ + 5.30691156783009, + 0.44703866450318036 + ], + [ + 7.890156956615549, + 3.7269583527578822 + ], + [ + 8.92004846905566, + 11.511184282147843 + ], + [ + 7.573875153114803, + 18.54417050628109 + ], + [ + 2.414080847256503, + 22.37732479437201 + ], + [ + -5.461862810101289, + 23.014058992979994 + ], + [ + -13.480141834598735, + 23.043517347012695 + ], + [ + -16.304444427186354, + 23.39436128123145 + ], + [ + -16.16119300502101, + 26.243078948505353 + ], + [ + -4.529065409806655, + 26.781559584296154 + ], + [ + -3.247239960261407, + 30.515172868023473 + ], + [ + -4.756545725472425, + 33.963378734003925 + ], + [ + -9.686205846141272, + 37.03927326674242 + ], + [ + -18.52708920417261, + 37.28045461853377 + ], + [ + -24.886766355927076, + 34.81724874583858 + ], + [ + -27.006281544324192, + 31.078331709691085 + ], + [ + -27.597440307060506, + 25.20899277708487 + ], + [ + -27.348985281901925, + 18.112890999977093 + ], + [ + -23.693358039330896, + 13.802322280790687 + ], + [ + -17.030590707547702, + 13.733292936252871 + ], + [ + -10.134611758923205, + 13.80167525705741 + ], + [ + -4.98620777964791, + 12.628997278326768 + ], + [ + -0.6522010486084335, + 7.9041650669612995 + ], + [ + 0, + 0 + ] + ], + "lastCommittedPoint": null, + "startBinding": null, + "endBinding": null, + "startArrowhead": null, + "endArrowhead": null, + "index": "aq", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": { + "type": 2 + }, + "link": null, + "locked": false + }, + { + "type": "line", + "version": 2088, + "versionNonce": 1736386478, + "isDeleted": false, + "id": "tO5ueHwz3h1toI1BYW9Pp", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "angle": 0, + "x": 1321.1860477123628, + "y": 1070.9484870527906, + "strokeColor": "#000", + "backgroundColor": "#3371a2", + "width": 38.42005809152819, + "height": 38.51543024121027, + "seed": 295579442, + "groupIds": [ + "I_PrQlBOQmLhEiT8WNp9a", + "CEW0zLxr6qnTqcOFae2PC", + "2BHV2t0MOmby9Xa9n5W9E" + ], + "strokeSharpness": "round", + "boundElements": [], + "updated": 1741870553059, + "startBinding": null, + "endBinding": null, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": null, + "points": [ + [ + 0, + 0 + ], + [ + -5.676445384282699, + -0.023938713912994895 + ], + [ + -9.416711708474788, + -3.7860100378077743 + ], + [ + -10.936960045292144, + -11.676784837306254 + ], + [ + -9.52777084920426, + -19.030429915912762 + ], + [ + -4.781615165111947, + -23.542316248995462 + ], + [ + 3.865842039651448, + -23.966132960553907 + ], + [ + 12.88159509783481, + -23.66030650844786 + ], + [ + 16.161590099274544, + -23.965709811013266 + ], + [ + 14.95220778367895, + -27.214593258573135 + ], + [ + 3.2543525500775603, + -27.820020426013418 + ], + [ + 1.865546739856865, + -30.371252624152127 + ], + [ + 2.851488053022743, + -35.372172971103645 + ], + [ + 8.443702997986321, + -38.17312122730004 + ], + [ + 17.661910510836236, + -38.51543024121027 + ], + [ + 24.341199047354145, + -36.21340466417417 + ], + [ + 27.21117934634493, + -32.36188747348783 + ], + [ + 27.483098046236044, + -25.988449110296592 + ], + [ + 26.551888925889223, + -18.774836106731392 + ], + [ + 23.08600799163737, + -14.11053562207808 + ], + [ + 16.078309247732932, + -14.037932628918963 + ], + [ + 8.825325523426027, + -14.109855101931238 + ], + [ + 3.40828509919594, + -12.232149304475584 + ], + [ + 0.10511843900344156, + -7.184793300988071 + ], + [ + 0, + 0 + ] + ], + "index": "ar", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": { + "type": 2 + }, + "link": null, + "locked": false + }, + { + "id": "mwOdJXvW7YfiFZ-dvhWwr", + "type": "ellipse", + "x": 1332.3616705375935, + "y": 1035.3040584377013, + "width": 4.864218037398285, + "height": 5.185417625009543, + "angle": 0, + "strokeColor": "#000", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "groupIds": [ + "I_PrQlBOQmLhEiT8WNp9a", + "CEW0zLxr6qnTqcOFae2PC", + "2BHV2t0MOmby9Xa9n5W9E" + ], + "strokeSharpness": "sharp", + "seed": 1547008242, + "version": 1155, + "versionNonce": 14287342, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "index": "as", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 1229, + "versionNonce": 1449104430, + "isDeleted": false, + "id": "khCKVak-7ZgA5eVSlvLJd", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "angle": 0, + "x": 1333.9970751895053, + "y": 1076.1532092021896, + "strokeColor": "#000", + "backgroundColor": "#fff", + "width": 4.864218037398285, + "height": 5.185417625009543, + "seed": 1891470002, + "groupIds": [ + "I_PrQlBOQmLhEiT8WNp9a", + "CEW0zLxr6qnTqcOFae2PC", + "2BHV2t0MOmby9Xa9n5W9E" + ], + "strokeSharpness": "sharp", + "boundElements": [], + "updated": 1741870553059, + "index": "at", + "frameId": "4ftzrW38Idx-CUriHjUan", + "roundness": null, + "link": null, + "locked": false + }, + { + "id": "F47k3YB5jCVph0rYmDvzY", + "type": "text", + "x": 1075.8445273289217, + "y": 868.1372437662139, + "width": 110, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "au", + "roundness": null, + "seed": 1709007986, + "version": 321, + "versionNonce": 33087086, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A Server", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A Server", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "lDXKPK8r6CGZ3D3rA3UQS", + "type": "arrow", + "x": 1398.1484111060454, + "y": 709.7987498380162, + "width": 290.07657113995083, + "height": 140.8788496734362, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "av", + "roundness": null, + "seed": 1700529714, + "version": 1213, + "versionNonce": 2020532398, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "W3OZnD4ERbE5CqnIxCHcv" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + -92.16556689975914 + ], + [ + 290.07657113995083, + -92.16556689975914 + ], + [ + 290.07657113995083, + -140.8788496734362 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "JQvGgKdMWC4EKOJDOKI1F", + "fixedPoint": [ + 0.49985491543883875, + -0.025820718130140766 + ], + "focus": 0, + "gap": 0 + }, + "endBinding": { + "elementId": "Z4zd1BeeYPtfiAa2eykzF", + "fixedPoint": [ + 0.063416602754971, + 1.0095765747299028 + ], + "focus": 0, + "gap": 0 + }, + "startArrowhead": null, + "endArrowhead": null, + "elbowed": true, + "fixedSegments": [ + { + "index": 2, + "start": [ + 0, + -92.16556689975914 + ], + "end": [ + 290.07657113995083, + -92.16556689975914 + ] + } + ], + "startIsSpecial": false, + "endIsSpecial": false + }, + { + "id": "W3OZnD4ERbE5CqnIxCHcv", + "type": "text", + "x": 1504.6866966760208, + "y": 605.133182938257, + "width": 77, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "aw", + "roundness": null, + "seed": 1210085362, + "version": 148, + "versionNonce": 761961198, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "manages", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "lDXKPK8r6CGZ3D3rA3UQS", + "originalText": "manages", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "0Nb4W_hXGZyUqFXQIfn5F", + "type": "rectangle", + "x": 794.4842051548658, + "y": 1059.3414114386503, + "width": 272.4574765184915, + "height": 52.438191341733734, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ax", + "roundness": { + "type": 3 + }, + "seed": 842113458, + "version": 1032, + "versionNonce": 930417966, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "WYAxNVb8UcM947YBtbvOP" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "WYAxNVb8UcM947YBtbvOP", + "type": "text", + "x": 804.2129434141116, + "y": 1073.0605071095172, + "width": 253, + "height": 25, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "ay", + "roundness": null, + "seed": 783719282, + "version": 1063, + "versionNonce": 1945581422, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "> pip install agentstack-sdk", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "0Nb4W_hXGZyUqFXQIfn5F", + "originalText": "> pip install agentstack-sdk", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "NK951lleKIiiDbV4dcouQ", + "type": "arrow", + "x": 1747.8750178194496, + "y": 811.5202242602063, + "width": 220.77109393597493, + "height": 0.1784876057793099, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "az", + "roundness": null, + "seed": 1274436914, + "version": 1541, + "versionNonce": 725227950, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "pZBV8R4pbZwPtkBVNUuzQ" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 220.77109393597493, + -0.1784876057793099 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "JQvGgKdMWC4EKOJDOKI1F", + "fixedPoint": [ + 1.007254228058133, + 0.4994835856373981 + ], + "focus": 0, + "gap": 0 + }, + "endBinding": { + "elementId": "7WK-TuODoJhUEB-zG3dAm", + "fixedPoint": [ + -0.02797096646802776, + 0.4986108869509787 + ], + "focus": 0, + "gap": 0 + }, + "startArrowhead": null, + "endArrowhead": null, + "elbowed": true, + "fixedSegments": null, + "startIsSpecial": null, + "endIsSpecial": null + }, + { + "id": "pZBV8R4pbZwPtkBVNUuzQ", + "type": "text", + "x": 1819.760564787437, + "y": 798.9309804573167, + "width": 77, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b00", + "roundness": null, + "seed": 928507634, + "version": 155, + "versionNonce": 879145966, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "manages", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "NK951lleKIiiDbV4dcouQ", + "originalText": "manages", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "6NczKbyxcFD029YiPc4dS", + "type": "rectangle", + "x": 1972.9786338462945, + "y": 1055.7300694055, + "width": 177.0808167398936, + "height": 63.30837511105257, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#99e9f2", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b01", + "roundness": null, + "seed": 1499609266, + "version": 1445, + "versionNonce": 428213806, + "isDeleted": false, + "boundElements": [ + { + "id": "zuOEsJaGGSCi9NHR0HChw", + "type": "text" + }, + { + "id": "RvwXD_xf_4-IF95_ZtuL_", + "type": "arrow" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "zuOEsJaGGSCi9NHR0HChw", + "type": "text", + "x": 1990.0190422162414, + "y": 1074.8842569610263, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b02", + "roundness": null, + "seed": 1908167282, + "version": 1511, + "versionNonce": 998314094, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "arize-phoenix", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "6NczKbyxcFD029YiPc4dS", + "originalText": "arize-phoenix", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "yinQNrtIrteceMDS1SI2Y", + "type": "image", + "x": 2107.882891747344, + "y": 1004.4512993801902, + "width": 73.82396677851678, + "height": 73.82396677851678, + "angle": 0, + "strokeColor": "transparent", + "backgroundColor": "#fff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b03", + "roundness": null, + "seed": 858600498, + "version": 602, + "versionNonce": 1141787886, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "status": "saved", + "fileId": "b5d5d8198c3c6cbcc25b767d2ebfc5822b8d1659", + "scale": [ + 1, + 1 + ], + "crop": null + }, + { + "id": "qHsfG6G6gmbrkU9U-fs30", + "type": "text", + "x": 1924.3585160626244, + "y": 523.1156913552277, + "width": 110, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b04", + "roundness": null, + "seed": 1604050418, + "version": 549, + "versionNonce": 1657962286, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": "A2A client", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "A2A client", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "qyf9Z2ct27wdPOVoapE8J", + "type": "rectangle", + "x": 1585.8428704944672, + "y": 1051.740618635202, + "width": 364.3257185589388, + "height": 63.46110161145084, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "#343a40", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b05", + "roundness": { + "type": 3 + }, + "seed": 1678725042, + "version": 1086, + "versionNonce": 1993763182, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "SyuwpWagDMibmmPciOaE3" + } + ], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "SyuwpWagDMibmmPciOaE3", + "type": "text", + "x": 1590.8428704944672, + "y": 1058.4711694409275, + "width": 341, + "height": 50, + "angle": 0, + "strokeColor": "#fff", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b06", + "roundness": null, + "seed": 1957595506, + "version": 1061, + "versionNonce": 795020206, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "text": " > brew install\n i-am-bee/beeai/arize-phoenix", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "left", + "verticalAlign": "middle", + "containerId": "qyf9Z2ct27wdPOVoapE8J", + "originalText": " > brew install\n i-am-bee/beeai/arize-phoenix", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "AKd7nyWx8o7bWpio5kxNf", + "type": "rectangle", + "x": 1909.0991667790004, + "y": 385.1876598917006, + "width": 154.29369667677358, + "height": 58.83662453022318, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#d0bfff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b07", + "roundness": { + "type": 3 + }, + "seed": 683707186, + "version": 1645, + "versionNonce": 1788962286, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false + }, + { + "id": "0TVJkpG81-Hi-d8LTJ8O8", + "type": "rectangle", + "x": 1927.4367179575784, + "y": 411.11420962370266, + "width": 158.62694693394928, + "height": 56.88800468652768, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#d0bfff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b08", + "roundness": { + "type": 3 + }, + "seed": 380392690, + "version": 1642, + "versionNonce": 1503295534, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "Rk1ID32Za9XegxNCezHph" + } + ], + "updated": 1741870553060, + "link": null, + "locked": false + }, + { + "id": "Rk1ID32Za9XegxNCezHph", + "type": "text", + "x": 1984.7501914245531, + "y": 427.0582119669665, + "width": 44, + "height": 25, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": "4ftzrW38Idx-CUriHjUan", + "index": "b09", + "roundness": null, + "seed": 1878582962, + "version": 1717, + "versionNonce": 607601262, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553060, + "link": null, + "locked": false, + "text": "tool", + "fontSize": 20, + "fontFamily": 8, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "0TVJkpG81-Hi-d8LTJ8O8", + "originalText": "tool", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "4ftzrW38Idx-CUriHjUan", + "type": "frame", + "x": 780.2879589180775, + "y": 261.8980115823963, + "width": 1417.2287696638464, + "height": 894.8055393352073, + "angle": 0, + "strokeColor": "#bbb", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 0, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "b0A", + "roundness": null, + "seed": 491602034, + "version": 868, + "versionNonce": 18957166, + "isDeleted": false, + "boundElements": [], + "updated": 1741870553059, + "link": null, + "locked": false, + "name": null + } + ], + "appState": { + "gridSize": 20, + "gridStep": 5, + "gridModeEnabled": false, + "viewBackgroundColor": "#ffffff" + }, + "files": { + "b5d5d8198c3c6cbcc25b767d2ebfc5822b8d1659": { + "mimeType": "image/png", + "id": "b5d5d8198c3c6cbcc25b767d2ebfc5822b8d1659", + "dataURL": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAAGQCAYAAACAvzbMAAAAAXNSR0IArs4c6QAAIABJREFUeF7sfQmcVcWV/qn7lt43aJpNoME9Jgq4L5HGJCaaUWnXqDFCEjP5Z8Vksk3GEWecmHWik8xkzJiAC6IgtoCIGhXQaBI3IEZNNEmDKMgmW6/vvXvrz6m6S93tbf1ev9fd5/1+CrxXt5av6tZXZ6lzGNCHECAECAFCgBDIAwGWxzP0CCFACBAChAAhAEQgtAgIAUKAECAE8kKACCQv2OghQoAQIAQIASIQWgOEACFACBACeSFABJIXbPQQIUAIEAKEABEIrQFCgBAgBAiBvBAgAskLNnqIECAECAFCgAiE1gAhQAgQAoRAXggQgeQFGz1ECBAChAAhQARCa4AQIAQIAUIgLwSIQPKCjR4iBAgBQoAQIAKhNUAIEAKEACGQFwJEIHnBRg8RAoQAIUAIEIHQGiAECAFCgBDICwEikLxgo4cIAUKAECAEiEBoDRAChAAhQAjkhQARSF6w0UOEACFACBACRCC0BggBQoAQIATyQoAIJC/Y6CFCgBAgBAgBIhBaA4QAIUAIEAJ5IUAEkhds9BAhQAgQAoQAEQitAUKAECAECIG8ECACyQs2eogQIAQIAUKACITWACFACBAChEBeCBCB5AUbPUQIEAKEACFABEJrgBAgBAgBQiAvBIhA8oKNHiIECAFCgBAgAqE1QAgQAoQAIZAXAkQgecFGDxEChAAhQAgQgdAaIAQIAUKAEMgLASKQvGCjhwgBQoAQIASIQGgNEAKEACFACOSFABFIXrDRQ4QAIUAIEAJEILQGCAFCgBAgBPJCgAgkL9joIUKAECAECAEiEFoDhAAhQAgQAnkhQASSF2z0ECFACBQSgZrVr07nHBYwDo2gwfzujx+3sZD1U13FQYAIpDi4Uq2EACGQJQKCPHRYBwwaxCMc9sf1ROu+9hn7sqyCipUIASKQEgFPzRIChACAjzzA2pL4TT0XHLeAMCpvBIhAynt+qHeEwLBFoLFjQ2MiEt8sJQ/PViSkkH6SQsp89olAynyCqHuEwHBFoGbVq+s4sFm+8XH8hoEBMK/vwmMXDdfxD4dxEYEMh1mkMRACQwyB2lWvtBkQWevqtkkc4g/52dR74bHTh9jQRlR3iUBG1HTTYAmB8kCgZtVr6ziAlD78xGF2kgHX+dS+9mM3l0evqRdeBIhAaE0QAoTAoCJQ2fF6qxblnZI48MOcv5r/Vjp0fe+Fx9w6qB2kxrJGgAgka6ioICFACBQCgZqVr8/lwBdmIA6rqRW9Fx4zpxDtUh2FR4AIpPCYUo2EACGQBoHqVa8v4hyudYqEbUPy+94Lj6Z9qkxXFE1MmU4MdYsQGK4IVK18fR1Y9g+v+65fhQWcabP7LjgSn6FPmSFABFJmE0LdIQSGOwKSQALcdwPIhON3nN/Ud9HRdKmwDBcGEUgZTgp1iRAYzghUrfyzIoFYI3VvRYI4nM/6vguPahvOmAzVsRGBDNWZo34TAkMUATeBpCUOe4R9Fx5Fe1UZzjdNShlOCnWJEBjOCEgCcauwPBKHMnxUYQGAAbP72skOUm7rggik3GaE+kMIDHMEvAQSTB4mcZhYMAbX9154JN0HKbO1QQRSZhNC3SEEhjsCVaveuBU4/2oocSAASjwTiQe7s++iI+YOd2yG2viIQIbajFF/CYEhjkDlir8sAMZu9A/DLXWYxCG5hMOW/jlHtA7xoQ+77hOBDLsppQERAuWNQOWqN9uAcyWQYhriUIbSb0SaoH0qJZkqo+klAimjyaCuEAIjAYHGjs7Gvkhyr8gB4lNVCXVVYGwsZhhkSC+zBUIEUmYTQt0hBEYCApUr39wMHKa4x5o2qCLyyvX9Fx5OhvQyWiBEIGU0GdQVQmCkIFC54o1FAMyMh5WBOExQOLA7ExdNI0N6GS0SIpAymgzqCiEwUhCoWvnmfM7ZT6V9XP0EbUn2d5v6L5pGCabKaJEQgZTRZFBXCIGRgkBlx5ttXGNuQ7pv8N5b6gCJi6bRnlVGi4Qmo4wmg7pCCIwkBCpW/JULQ3rgx/leSijy39yAGcn2qRtHEk7lPFYikHKeHeobITCMEahY8TckghPcQwwmDlvPxaE90T71oWEMy5AaGhHIkJou6iwhMHwQiK/4+yIG3DakWyNzbCLm9qQaSTjclGifSqHdy2QZEIGUyURQNwiBkYZAxcq/zQfTkO4mjwDisFRdnK1ItE+hFLdlsliIQMpkIqgbhMBIQ6Cyo7ONa/JGumrncLtlebYoBpsSF7WSJ1aZLBYikDKZCOoGITASEYiv+LvbkO7y6fVuT/K+SHLOFNq3ymSx0ESUyURQNwiBkYhAfEWnNKRnQRy2mosbs1PtUylHehksGCKQMpgE6gIhMFIRiHd0LgIGPkO6g4f3lrrQd12fbJ9CIU3KYNEQgZTBJFAXCIGRikBFR+d8zuCn/vsgAcThGNJvS7ZPmj9SMSuncROBlNNsUF8IgRGGABrSDea+kR4YoFe9cMhgffKiyW0jDKqyHC4RSFlOC3WKEBg5CMQf2iwM6RmJw4aEQXLOJNq7ymCJ0CSUwSRQFwiBkYxA/KEtyCC+0O5+TJTtirMZyfbDKKRJiRcOEUiJJ4CaJwRGOgLRji0PMQYXSRzSx8Yy09sCh0i73j6BQpqUePEQgZR4Aqh5QmCkIxDv2LKAM7gxmDzMIIoIkmAPa8viN6XaJ1FIkxIvHiKQEk8ANU8IjHQEoh1b2xhTc6Q7RGHbRVzkIRBbn2o/jAzpJV48RCAlngBqnhAY6QhUdnS26izS6eAQJnWoJdiWZPvE1pGOXanHTwTimYH2Tc+2VKTiX00YRv1zu9/7ybvnnbe51JNE7RMCwx2B2ENv2SFNLDtHJntIqn0i7V8lXhg0AcoEXPPy87NGV1StiQBU4dd9utFzIKWff/fMmetLPE/UPCEwrBGIrdi6jnOY5bZzeIfs3a602an28RTSpIQrgwhEAf+rf3plV5RBs/xKQqMD792b7D35zumnvFrCeaKmCYFhjUC0YyuGJvlqOkO6BQC3Delsnt4+ftGwBqbMB0cEYk7QtRte+lxzPH67Sh74dxSn+7mxeZvOT+044YSdZT6f1D1CYEgiEOt4ez4HDGmifvzbk0Me4urhTan2ieSJVcIZJwIxwf/0xg23N8Vin3NOOe5Z6U7pv7v9hBPOKOFcUdOEwLBFAD2xANwhTdTBqsThfM/W6+3jyROrhKuCCMQE/0t/3PRKRSTy/sBwCuaXB1LG0jumH39FCeeLmiYEhicCHZ2NUYjt9aqwgokDIRBb15ZDKizyxCrhiiACMcH/wPqnnv1Ic4tbwrDZxIHpvUT/dxbNnPn9Es4ZNU0IDEsEoh3vKNc+0t9ItwDQ28fTHlbC1UDgm+C3PP7oxmNqa084qaHRyq8ZGFYBV/i7if62JeSZVcJlS00PRwSiHW+vO6TGmhWirvIMWW5dGmgzku0tFBOrRAuCCMQhEI4W89ObRsHh1dUB5MHAMOOFGhx6dyX7zyMSKdGqpWaHJQKRjm3oUWUml7KGGLRFMXHIw8Ocxji58pZwNRCBWATy2KNCfI4xDc4d0wJNsZj5i0Mc6jwlDL55O9fJM6uEi5eaHl4IRDveWcCB3eiMyrs9OcRhl+HsJuOSseSJVaKlQARiE8hjtv61NhqFj7eMhShLkxVNXDTU//S/J3zgAyWaO2qWEBhWCEQ7trdxwJhYWRCHGDmWY7cZF7dQdsISrQQiEBP4sY89to4DzJL/ZNBSUQEfbjbvFNqL1T9LXanUo3dMP/68Es0fNUsIDBsEYh1vTzcgssE9oA2AQyEYBny9fvE4cuUt0SogAnEkkEUA7FrVjfeY2lo4EY3qIR+r7L5k4od3zpj+rRLNITVLCAwbBCId283XKj1xOAPWthgXjyFX3hKtACIQE/gxjz8+Fzgs9J5+Tm9qgmnCqO58XHdFuITwvWTyO3fPPJ7ce0u0kKnZ4YFApOPdzYfewynu+1ghhnRzyMbFLbSPlWj6CXgT+Ma1axtjiSRG3m3w6mDPb5FGdd8lQ5M8sAqd8949qdR5S2YeT4EXS7SYqdniIYA3xTUAoSrSILWor31qUaJURx58V1ElY2sB9hDPYY4zfSq0jy9Kf4qH6PComQhEmccxjz0u1FjeqUXPLCSRmmhE/qQQh1oWSeQdnmylmFnD4+WgUUgEYh1vLQJmqnftxE7oPjup4JFwtQffDQmqGBQXSxIMZzAb2scUvC80/5kRIAJRMBq3Zk2rrkXxUlKD/FrCg+8MSiAfGd0MMU1Liyq6974LSXLvzbz2qMQQQCDW8dZczthCb5h1BrAl2X5YwW0P2vIdCw5xguLK65dCpCbA2bo44/OgvYWi8pZgPRGBeEBveew3ti+6E1dBwjS2Iu7xzHI/jDdoOefQbxh/+uX048i9twQLmposHAKYKTDFohuB+9W65hY+7xCJFHTjjnTsmMM5dKgHOGtEXuKwvmccbjIuGUN3QQo39VnXRAQSANWYx57YyAFOkOKHGyK8pX5ak9szywq9gORhfboN/dFfT38/ufdmvRSpYLkhEO3Y+hAAu8jfL+ud4AXPS453QQweWau26bxVwcZ0BnCbcXEz3QUpwQIiAgkAfdyata0pZiiqLPusI/5yYkM9HFNbI/nFlDpc1TAJ695k6gf3zHz/t0swr9QkITAgBPBOBgfw3Mkw5Q6l5hRoBTdgaw/uVIIq+tu0pROHWdbzS5rpLsiAZjy/h4lAQnAb/dgTcxhnpijtX8QohUytEplvnY9JHOpXe5Kpb9878/0/yG966ClCoDQIxDreXsRdcancW4UtdQNcb7RPQMN3wT5IIGHqKtv24XaJJAIpGPq5VUQEkgav5jVPLgCmxuaREof1Ob+lGRqjUYAA4rAN8BxgR19/27JTyL03t6VJpUuJQLTj7X1BLu0BkXLX6+0TCnr6Zw/uQulfqpDdJzQlUrY81Nk8cslo2stKsGAI9Aygj1nz1CLOZIRQ78sT1zT40OjR0BSPqmKI+LtiDgGD855d/YnziURKsMKpyZwRkNkBNZcdImj9y4oZ6NDXBO1TkXAK8mEP7kKXXDOskGzDfQkrIEYdEUhBsM+1EiKQLBAb/djadcDVBW0uagCIawwuGtvicu9VycOqPmXwXTu6+9/fcSblVc8CcipSQgQwKu6hM5DtSps5Pwdv19snPFSoLmsP7r6VA/+qnzg8UofdIMO47lOhvYkuExZqErKshwgkC6DwlrrWD+uYEKv9kDXiHZHmURBl6e+I9Bt88+7u/lOJRLIAnYqUDIFoxztIBhdlSCerbN/stlT7uIJ5QWnLdy3gqDp22TnSR8YGg82Gy5roMuEgrxoikCwBb1yztjXCmMczy7xoyAEaY1H4cPMoQLWW96O+B7268dyvZxx7ZpbNUjFCYNARwMyAHDRFhWR1ISCsiFzchY2Iu3zPfAb8p7LVIOJwNAA2OEQgg75OAmahJH0YMo2OeXztdMNgmHZT3FT3qqow6OJpTfX2eIJiZ+F33br+6J0zj6U7IkNm5kdWRyMd2/w2CB8E7mi5xsXjCncYXbarjWnaWt/7YxKKfzZEyZvgktF0mXCQl2rhJn2QO16q5prXrG3jTFvrJg8HxmnVVXCqQiKin+ZlROuFwD8PpPTbF5947OdLNQ5qlxAIQ8AhkJAouGY6Wed5BhozCpdadtmuNtC8RvygvigUY8BNcBkRyGCvaiKQPBAfvebpuRz4wiB7CFY3tbpKSiIBxKG+dPsSyW8tOfl9P8yjC/QIIVA0BCSBMI8KK0N+Ds6vNy4ZV5j7IB17G8Ew9soBhm1RJnk4pzIikKKtiPCKiUDyBH3UI0/PBwamntaqxIFzZkMdHFUj84j4RXGnHJFInhNAjxUNgUjHdkWF5bjQps/Rwe40Lm6ZW7BOLd/Dg8nDQxz2q8fXA91GLxj82VZEBJItUgHlRq15GgPJXetd6Jb3CkohrVWVypN+uDmHnj2J1PnLTjmW8ogMYC7o0cIhEO3YdisHZrvRZiAOq+FNxsUt0wvWi+Xvec5dIcRhHdEYEIEUDPzsKyICyR6rwJKSRJwcIl7Xx1Mb64VKK+hj2VE4QM97RCIDnAl6vFAIRDu2L+Cc3ZhLVkBsu6CZAZe/p0hBYeSh9JAIpFDTn1M9RCA5wRVcePSaZ9YZPp0xlpXwntpYZ5IILngM+e6vxwDo2UskUoDZoCoGioCMiMuUm+gB7rtKI1bcKs74DGhvQVf3gX8EgXBph/G7MwbVvwUubS54fpKBD2R410AEUoD5bexY28gqY3hiUuL3mNCaix89s9zqLH/DfQbv3N+rndZx5hE7C9AtqoIQyA+Bju2tGmed2aSTlQ2Y96EYa4f25sLcSF++ex1wryE/kE1kF/A9u6yZ9rP8ZjzvpwjwvKFzP+iQCDPziFi/my8XAHxwVANMrIz7WlTVXv2CRBiRSIHmharJDwHtwR1mMEWHINSagqLlFjSx0wN7zNS2VqvBt0KkdGL+dtkY2s/ym+68nyLA84bO/6AgkXh8HTBLEnHIA0tj3KzZo5ugKSZzq4eFiug3DJJECjgvVFXuCEQefHcdF2pZ9xYRRByKFLKCX9w8J/fWAp5YtsdMbRtCHDZvKL8TgRQE+lwqIQLJBa0syjaufnY6Y3hbHRqCjJCSRBpF6BPvR305E4JESBLJAnIqUgQEZG5yJ5VBOHGYEooowDbxS0YXxhNr2S5fKgV7mKrUoY49lZoKV46ngIpFWA9hVRKBFAFsJBEwSSTIlx3jZc0e3WCTiPfl5KZInkyxzkUnHT6tCF2kKgmBtAjI3OQyoVpaqcN1SmLACxVWPYhAwojDGclsuGwMBVQcxLVNBFIksCWJaEIScZpw4EZJpE2QSEw5WFnuik65Xt14trufXUyG9SJNFFUbjEDH9lbGIwGGdFXiUB81gx5qmOK2AGHVMZwJMz3BMhKHxWIYkZcIZDCXNBFIEdF2SEQGX1Q/uOQtSaTBtIlYoU+8ZRM6dB7o52RYL+JcUdV+BNiDuxRDuvV7huROhlGYTdwikLTk4bWPEIEM9jomAiky4o2rn58OzLAlEa86QNhERjVAQ4BNRA1lndB550EikSLPFlXvkilcmQGzyAooHtauh0uaBh4TCwkEwJcVUfYvwLAurlgRgQz2CiYCGQTEkUS4IBG/JIJSB5LImU11MKbCUmcF50Do13lnF5HIIMwYNSGoIOvETqZaS27uhQmrHkogHvKw/ylCBLfDFWMLcw+FlkBWCBCBZAXTwAtJEuGOTcSM1KvWfEpjHUyprvA1pt5cTxhEIgOfDaohKwQe2D2HMRCGdPnJkBVQFloPl4xC6WFgHx+BpCEOqyUON8EVYyknyMCQz+lpIpCc4BpY4cY1z8/lBiwMrkW+nKc01kKrSSL+kCdyutDFlySRgc0FPZ0FAh07pzND25AlcZgVGoUJarhkeytEo52h6irRmo9UiECymNZCFiECKSSaWdTVuPr5uRxUEvGf6oQkUuW9sY7l5AuD/xcBGFOp81aectTTWTRLRQiB/BAQYdW9n6Btw/IgBIBLRxdmX1m20922qq4KGg03boIrxpMEkt9M5/VUYSY6r6ZH7kOSRNhC/5vpTMfJjTXQWoXqLDdxSNSkQVMEYNSTRCIjdykVf+QYk8oOFJqBOKzeFJpAMhGHdazinAik+CvC1QIRyCADbjVX98iLcxjnmE/EdPH1RzxFEkFJxCEad4BGSxIhEinRJI6EZh947yFgcFHwUBWpQy1g6DPg8gJE5V2qSiABgpAtj1vaLCKQwV6SRCCDjbjSHhrWDQi/bIhF0ah+UgNmNvRmhnNP3cFU6pvLTz3iRyUcDjU9HBFYvmcBgBPSRA4xhDjs73lh7oIIAsmGOCzgiUAGewkSgQw24p723CTiTIcqdbRWxeGkhprQkBKmJAJIIh1EIiWe0WHW/AN75wDjpidWGHEopCI0rOx6uGT0wO+CLN0Rwh6hBEYqrEFefkQggwx4UHONqzdMN0BeNnSpq5TXp7U6Dic21Loet4hD/bInxZc8cOrUq8pgWNSF4YDAsr1toBnOhT7flh50qY/fBJeNGbgx20cgGQmMCGSQ1xwRyCADHtZczeoN0zWTRFR1lSwvp6kxFoFZo2shwrQQwV6W6zX01ctOnvYPZTI06sZQRwA9sbIhDluTBHfCZc1zBzxsm0DSEYdL+iECGTDouVVABJIbXkUtLUiEK5cNPbkYLBL54Og6iDJ16qy/O295P4dNif7UuRSEsahTNjIqf2CPEhMrzCZhG7IRk/WHghoO/DLh0nedxgKb9X1JBDLIK5IIJAPglz3/5qwxFdoDlVGtRtfhd9v6Uv+27JQj1xdrniSJgCeKr2zNSkCFwRfRsO7Ez/K6y8tpTRr87/t7k6etOfvIXcXqL9U7AhCw08umM0kov3HYBJePGXheECSQ7IhDToIBN8GVdA9kMFckEUgGtL+w6W+7okxrtoolDL55R7dxajFP9o0dGxr1mCARkR7Xn7lQxs/64KhaaIhp9giscuoNdoNDz/5+/WOrz5r2zGAuLGprGCEgCARm+UYkNvcQUilEdsD7FQnEPEIFomr1A++BXDlx4LaXYTR1xR4KEUgahK966S+fa47Fb/cW6Urpv/v1zCPOKObkIImkYiKzocyxbn/cOUVmNlTDhMqYvJ3uepedcgbnPQeT/Curzmj9VTH7THUPUwRyTu7EAS5rGfje4iKQIGO9h8CIQAZ9AQ58kge9y4PX4LyX//poXTTyUatF1UNqfzK19K4Tj7iimL1xk0jQVMnvZtZXw+RqJzGVZXT3vnIHUsY3Vp3W+uNi9pnqHoYIqASSTupQpREOA79MKAgkC+KwX1CSQAZ79RGBpEH8cxv+9kplRHu/c85xw7U7kfzOkpOO/H4xJ02qs7RbOcC1QZKIJXUcUVMBH6ivEkW8r5wqmfQaxuqOU1vJQ6uYk1bKujv2NkIqJe0P0ehGaG9CA/jAPg/sngMco/KmMaD7fitAbo77t/sbTEdgJIEMbJ7zeJoIJA1oH3n2NX5MHW7KfpisTXlHf7KtmEZ1q3t1D29aJElE9iVIXTW5Kg4zGyWJeMvIN1E+mzD4pp7+/o+QcT2PN6acH1m+ey4Awwt8DeZc7weNtUF708YBdTuX5E7Wls/02XDZ+IHlJ1cJJBvJB8O5kw1kQFOd68NEIGkQa1j9PL9g7ChoikddpdTNW+fQuy+RPHnJKUe+miv4uZavXf3KrdzgX3U/555C9Mw6a1S17earEodNLMDB4Gzn/gS/9LGzJpNxPdeJKMfykjwWBhx2Bp6fI5BAwlRL4uhyKPoJvx4uGzew2+hIIGmJw2zLWdhEIIO8NolA0gBev/oFXhuNwAVjm4TXkz8/h3wYPbN29ehF9cyyulmz8o9zgWE4eH/wRatMdUSDk5uqoCEacUlP8m1UwqVw6DmQ0r+85ozWXw/yuqPmCokAqpgY6wiSlMVmrmkzCiCFuPx03ScqZwe3vx9ocqcl29uAYUrbMLVZYHeIQAq5rrKoiwgkA4Hgz2Mr4vDRMWbQ3IDyBgD06Py5RTOnnpkF5gMuIkgEVRXMVlX43ucY0+DUpioYHY+aeUQ8cbbwkGiSSVfK+O+Hz5j8pQF3jCoYfAQ69raCwVFF5Vmg5gYr/7gTLh09sJvhy3b5Ddr2Hh4ojQwsuZMgEB6QE93Tluufxk1w5SRy4x3EVUgEkpFAJETT66vh+HqMiut8kDjkR5Y5mNDvv/vkaZ8YjPmrWf3qdNBhHTBubxxB6qoZDVUwqUp6aNnqafEXdyKrPoM/nUhEL11z9ni6dDgYE1ioNpa/h3YGzx0NF3lgS/vh0tGNA2pSTe7k2rRDJASG4UzG5k9agQQSJgRZ36MXFhHIgOY5x4eJQNISyIvKimUwe3QdTDIzBXrJw6pmbzL1nSUnHV5UzyyrLUEiBkfj+gnB6gs5vUgg0xsqgQUQh0osOoe/7U/oc5/44JTf5riOqHgpEBCBDtVTuo84zF6h8DDAEOvLdm0G4FMcjVIG1RKH9XDFuPzDmbgIJBNxWOATgQz2MiQCSYN4w+qX1nHgs6zNGe0g545pEEENgzdsPOYz2JXQ25adMrVo4U7ULqObbyIa91w49EwrB6HKOrmxCmKaR5WlVIY2Hg6s+2BK//JjZ04Oyd0+2EuU2gtFwE72FEYc6vEAboNLm+fnjebSnaakE0YcSluySAEIJMcowEAEkvf85vkgEUhaCeQl9CIRXk/Wa1MTQaN6o2sjFlVw983vPUnj/MEiEWy+etVrmN3QfVfEftelukoY1xsroV4QoPORzgHMxYk9urF49emTPpnnuqLHio0A3vcwjL12M7593fsF2wSXjs4/PtXSd5XUtt7BBRLYAAnknQWHtKw3ynfLtVpDkMUcz3weXD0Z3wP6DBICRCBpgMY8HToYG5wiMivg2IoYnNtSL79WiENd50nOd+/pSR5XzJhZ3q5Xr3rVyR4XqK5igviOq6uASVVRx6vMFdkX30NZc5LDxmRCO5fsIoP0NubSDLrWatranMKsX9qc//seSCDpJJ+BSiDvoDFcEoifRTxImfZ9BrPhykkDu3uSyxxQ2YAbcgSKC4G61S9tPuRfr+h+8WcGh9dUwBlNMsFTkFCP36F7797e5KC491qdrln5+lwOgDfXFa8c/74xtToGx9VXusZqEYdVGsfAOXR363D1Y2dOXEFLo4wQWLbHOaFn2mDFAhX/yz/VrItAwmwSrs1+PVwxfgA2EItAMqjM1J+JQAZ9geZ/Ihn0rpamwbrVG+YC56Y9wA3XSY3VcEytc/M76KzUZ/BXFs2ccvxg9h6N67qurWPOjWRP83Ico+MROKmxEiKmBKISh3zAycPeY/Cfrzlj4pcHcxzUVhoEXAQSssk6xGFVdD1cNia/y302gWRlb0HC2gRXTMhfZbZk2yIAwxO+R3nDgoYdU2ZwAAAgAElEQVRMBDLorwwRSBaQ1z38spRCPB8Mn37GqBo4vLoi7RmwWzcevfvE1vOyaKpgRRo7Ohv7I/3pjesAUBXR4MTGSmiIqlkOHeJwOsRQotqwL8U++gy5+hZsnvKuSBAIV1Q8Sk1+4jDPAyx/Q7ogEJiVU36OK8bnv78seTvYPTmdQEIEkvdyyvfB/Cc43xaH4HN1qzbMAYbB5OTHm5/j42ProSnmDndin+DNuFVdKf3RxScPPokkoolbOed2DK2gMaBd5H21cTgM74sEvKBClWU+aHDoPqDzq9aeNXHlEJzK4dPl5XvmA+c/9Q0ojDzkyl0Pl7Xkp1a6f8ciAFxH/mNUaB8+MSH//cVHIMGBea03Uvx51eT82xs+K2NQR0KAZwl33cMbHgKAi8KSO31kTB002d5NElZv6JOuFH908cmTB1USwX5UrfzzfAAmNht//y0AGBxWGYXj6uKuOFoqn6jP9uj8nsfPnHBNlvBRsUIjgEZ0xpyb2umJw2p9P1zWkt+Fwvu3Ow4a5jEqI3kNjEA2A8AUcXQJlTo8PxCBFHqVZayPCCQjRLKADKvONnKxqPHjhg7viMwZ1yhiZvnJw/wOALpSxpp7T558fpbNFqxY9aq/zDE4QxdHT8gLt7qqPipVWpUR730RZbzmO53i8NeelDF37dkTny1YR6mi7BBAN15d35tTsEGsOd9ET/e9i6Fzvhq6mwcR2IAIZGtIOlsLngBWIQLJbu0UsBQRSA5gYr5yxrXAfOVIKCiBfKS5Trkj4hCH2kypSKSy4/VWFok8ZN9cDzzZMYhqDI6vj0NLhefCZID9FFVa3Zz/9KkzJ9yQA5RUtBAILNud5njumVznn/klerofbSB4qdbzSSf5DIRA7t2azjPA3QksyWATXDU5f6N9IeZjBNZBBJLjpNeu2tAGTFOCvLklDkEiY+ogrslc5YFvAQfo0ksjiaBxvU9L3grAfAmq3H1l0FodhWPr4vYgvGOR4Vzk+JMGrO/W+WVkYM9xQQ2k+LJd4XGw1HrticMImkZ+eTru3+5uK63KzFz5n5iY3/6yZGvboRzsnkCKmTzN+Hq4ujU/+85A5mCEP5vfBI9w0OpW/3Eu5xhSXX68yZ0kidRCzCQRGy68LKtglzB458H+/tMG87Kh1XzVyjfncw4/9QZV9KrmUKU1o6ECqhSVlkocFkOahvbu7hT/0rqzx9Nt4MF4R3wEEiZ1KN8PlEBUMgoco9JWwQgkxLPDdTwjAhmMJedtgwgkT9SRRAzDIRHvxoteWeePrTMZRv5hk4eCekIvHYnEOt6YrmmqSi5oOcjb68fUxmB8ZTqVlqWu49DP+d17E/xrL82esDtPeOmxbBDAXOXitnYWxGGfdiC/MOsogXCM+htm0fb1YRNcOTE/ldK9b88HQA+zbIjDBuomuHoKhXLPZt0UsAwRyADArFn1ih0ry12NhHVadRxOb6p2SR2W7Z0rIVCShlEySQQ6OhsrNP0hAObRb7uXBr7KEytjcGxdDKxIWvL1Vo3tqJKXcbc4wI4eg1+3/oPjVg0AYno0HQLLdrg9o1z7bZjKx7gJrhif+0a7ZNu6Qwme/DYQKYM7vXQklPVw5WH5qZQWb1kAjPnvuKRVmxk3wdVTcx8XrbABIUAEMiD4AGpW/XGRY0/wb7rTqivgtKYqe5+1Axd62jU49OzrN87vOGPSoETx9Q67csXfFhza/s2XNsQDi+PFQyZUWrVRaeOR24dlXXdyjFj7SMKAu/al9K+TNDLAhRb0+AM75gDHTITqHp5BQuC8gAQSRBx2Z/InkCVvPQQcLlIWWJg10fme8Xa4aiq62tNnEBEgAikA2DWr/uSLhKuezlESOW1UFahSh9Mss4MXcuA9+0tJIh2dbVzj+BI2uKQL356EscCiMK3GvDxpSh0OobglE4PDjj5ukDRSgLXmqgLvgnArH0gYcSjsIk/w+RHIfdsw6+EJ1pHBvbmrvbL7kT+B3PuWcus9i3Fh8wafDddMpUCKhV5jGeojAikQ4BaJeNU6snoGGLzwxAZ3Pg78XgYwlE+Ji3ooiej8podOnfjDAnUtt2qESgvQ1VeqK1zvr3u5NMUjcFxdDCqtuy92S8HLqi8Fdx0wUiSN5DYj4aUFgSg5M3wlLcnQ9UN+Ydbv25Ympa1voeAX+RPI4rfS3R4MVpkleRPMm7qvUNBSPdkhQASSHU5ZlZI5OdRwD54NNxaBDzXXCO8slTj8nlAAB5KpWx48bfI/Z9VwEQrFOzqVaK/eZeLcb4kygKPrYjDee2fE0yfDDMyoc/5uwoBvPz2r5c4idHtkVblsextw1aXcGn4gcVg/5kkg7zhHiWxsLfkSyMJOvI3r5DlxzWiYygwAPtlKe1kJVj+BXmDQHRLxQCvWPhPZDM9prjYvG7rzktuvv2lg79H5I8tOnfDxAncx6+oqOzrbDCZuryuBJIP6zGBMhWaGQfFXb5GHek5NGrDugJ68bETaRpbulN5Jl7egWmhgn6U7gnfVQM2P+DK/MOv3veO5GZ5BtWTATXB1HvnJ7+6UeU7CiMMn7Ih+bIJPTs3P42tg6I/4p4lAirAEXNkBTeJQXze8J3LOmBo75pTaBWEnMWcFDe79HP64N8E+8viZ43YWoauZq+zobIyDtggYxgHzftzLB6WR99XFzBvs6LbshEkJ2m44Z919nP/kmbPHBEeVzdy7oVVCxq9aBNyM8YSbeTQ6B9qb8le92ASSTupwoZ87geDFPlA39TSGeusnnieB3LNlPjC8n2QfpwL/6tGtroBPTp0ztBbD8OgtEUiR5lGQCMcUs94Tu4QcJZEPjq4SaWbFoUohDvlvp2MJzjsPJFJzHzlz8tNF6m7GamMd4sVGN0kzllbwnRGsaExcE+6+UTPPSBDxqONLcXipK6l/5YUPjX0uY0eGaoHlu+eCuDfkRYPfCZe1zM17WEvftQxoAVUEbvQDIJA0xOGVDPIlkMVbTNf4MHVVQB+wrWvIhTfvNTSAB4lABgBepkerV72+iAsSwY8fagy82NZcI8jEPm8Fvh+CUHr2J/nVq86YUDJXxcqOzlZdi6CLpemNY/XasYlINgRBHiiNNFc47r6CTD37gjrcfgNu6zUSNw87tRaqrBhe2OSeQJYmfqnUVLhyPEafzf1zv0kgvidDF9J6uHJCbvczlryNBGdHXnA3FWLv5nnmJ1+82QmZ4htCCIFxaIdryIU398Uz8CeIQAaOYdoaJIl44045hCJIZHQ1NCgkolYoXhnlvTmo67c8dNphJTOuY3fiHVsWcPuil0IeprpO7b+QRuqlNKKShxrSxRmi8Ep7t5/zb/1uVvNdRZ6awakeo+amUkgOweQhJpflnynQRyAhxGExO0fvqFwJZKsnlLs5Y+nMIPkmd1q8OUCiSteQOJvNgKunDtyeNDgrYli1QgQyCNNZveJ134brJgkGpzRWQmt1zJFEPMQhXyGpDuvV+eounX26ZHYRAIh1vD0dGEd3XzNffNhSYoC2EVRpNcdl1kN1O7DGZQ3cIhmDw0vdunH1i+eM+csgTFHxmli2U7lo6jsayC84bILLW/IzAtsEEno697q95k4g975thnLPgjis2WVsNlw5Kbd7GYs7pwNnGxyUMhCHtViumUb7WPFWcNqaCfhBAr5y5RtzGVi51a19ww3/9IYKOLLGiX7r5hD3Le+kAX/v6ucXrDl7/GuDNAR/Mx2djVGIoV3kq/4fvbfymbCNHF0XFYSiSh3i79ZegXYTZd/o1/lt/RC7+aXZ9UMvrhYazSFDVFlrrLFYU17GdEEgYVJH4Pe5E4jIDmiGcg/d0z0/5JObY3HnXOAswE7kWV3u6KXr4ZppuankSvbCDL+GiUAGcU4tEgnOCiinorUqBic3VihbgtsIb5/YOQcdoKcrpX9l9RkTfzWIw/A1Fe3Y2saALZLJtvzEoT6A5NFaHYGJVfIWexhxOD6jQvXV1Wewf/nD7KbbSjnOnNt2SR8ZVEv5R8l1V+zVeXo7zY3cL/gt2bpZeo4FfUIYJR8CuWfLIgAjIG2u2a43xadcQbfBNYfPz3lu6IGCIEAEUhAYs6+kZvUb03UdPEmp3KfuMfEInDG6yuXmqxKHqzXG4GBKv3fV6ROuzr4XRShpSyPMlkbSEWVjTIPDayJQg15ouArNfUglDhfBAEPCfClh8AXPzx71cBFGUPgql+1C99yGrKLloifRFWNzDwZ4/3ZnB09LHhbA7E64ckJuXl+Dldzpnk4zjW3AVASSB64bYx586ghKH1D41ZtVjUQgWcFU2EIOibAGv/ZBShwNMU3YRepj6MWkXAyxuyLvi+B7heWTHP6+v0//hydmTXi9sL3NrTaURjhEPJcPsY5gt98pVRGYWKmJLIiSPEyjvL0teiUaMdankkbkCy+eU1++9hGhvrLiVFknaAvLwFP7Crh8bG53GZa+PR14ZEP6tLaetpCorpyYPVEtFrYuxS4haN2/KFzklUduDryBHgP/DfQw4rD6oGlkQM/tFS1oaSKQgsKZfWWYGbBfS62T6WWtj1tdhVkNT2ysgAkiTIhTxiIO61W2XmfDgJ4ug395zRnjfp19T4pQUkgjFRjd15RGvMtMSlxWvzGW1uG1URgdd3tquUPFy/K2VCMuWfJbDYj8R1naR5btMnNaePfbMCMC3wKXj2vNaTaWbG8D5iEpVwVKW85fcyOQe9+aA2BG/A3LBeKXfHLPzSHsH4qrcChxuAhsP1xzeGNOmFHhgiJABFJQOHOrzCSRRRyY55a3e1owrez7amMA1sU8c/O19wTXPsGgJ8UX79eN60udXjbasb3NAH4rA2aSpJs4XKQIILy0ptVEoMIMzqgSpEocnnN8V4KzO16Y3XB9bugXubSVqyNoksI2+cvH5fY+hhJIIHGYTMZvgitzCDGyeOsCYDw4UkCYyozD9fDJKXghMPvPPZ0yonV2xCHr5WwFfGpablJb9j2iklkgkNuCzaJCKpI7AhUr/mrevg13hZ1QGRHSiLhP4dlBnX866p8U53/r0dm8x85qeSb3HhX2iWjH9gUc2I2q1CFb8EsmaGSfUKUJqQtTjtjpc32aGNVuIiST7QkO33/pnIb/Kmzv86zt/h1mUqQwicOiR0VCYdqMnOJj+QgkjDiUtjCcey4E4s3NYVeVZlyGkXtodbR/cJ6dod5p+nr41OG5EVWe00mPBSNABFImK6Ny5V/ncuHCqH6c6cF3Bm+sn9hQAfUxt9FdNVaLA5z4WT7bneL/sebMln8p+TA7trdGuPDUmhVEHN7+VUQwBH4ERgkbkPPxHnrlXmJnQcSLiNsTAJ/deE7DIyUdsyAQK0GXtyeWQdvzPea0uHJ89ncn7tuG3kf+1K+uvd230c+DKw/L3uh879bNAObGHiZx2MMw29L4VLhyatY3689csumDz6bqAsL0hNlaTBbTjRkw7yi6QFjChU4EUkLwvU3HOjqna5qxDoC5bi1bmySWjzEGx9fHYXJVxLEHWO6wYjaV+yLm+5cEvnFff+qqp0tsYMfeRTp2zuWc46kxMKaWOlYs3xBlMLUmAtUR1+URUwrzZ0C0wqUYjL+Q5LCgZERy/445wEBmC/RusKGH9xyTPS15x01S6YnD7AWSVJYX/NCwXRGRhu205OGRfHIMrf7lB56+72e9h10RiJX1pV/s3nLI+yo3m1EZvevDpStEIOU2kyKhE8dT6AmuzdS1OTCYXBWFDzTEIIqShr23ej2YXP4y3d06v/WxspBG9jZqPIkkYvv8e4nDPS0MWuIatFZrIHkkiDjMgJTyZ/tjQImIxJWrI51qyd6dcQC3wScmZH+nwSKQrIjDbid7AsEovAYa6cMYL0CviKFSrmnN6WLfvKXPvrWwf/wkOWlBusqA7xncCdcckZs7crm968OgP0QgZTqJ8RWWUdHbQWfK8C4F3l6Xrr7m6xeiHZG/MkgYfOOBJLvq6VnNJXX3xd5EO3a1GZzfKj3Rgt181f0E7T/jKjU4rEqGRHE+iveWXQ2X9ljL4YDzF1IGW7Dx3EFUbYlQ6+kmxLcx5xYpV9wQZzJzZNDmq35vNcXZDLj6sOzUPou3oLtvSKj9EFJk/Da4emrWJPjdpWvPeijV/MyrerX/TfRLHeqUt8M1R5QssGiZbhuD3i0ikEGHPPsGYx2dcxkDU93jNzhb79f762MwzRtHy7PB4j+tLIiYh6Nbh58+8cHmG7LvTRFLLt81/5CDmTtUvG9vdaQODPA7qUqD5oqIcpPdvYmK+zG+OjjoHF5IGsaCV84dVXwbydJ39wFPF0TRgynnm+ATE7KPiWUTSAYJQf35qknZv/MiMq5KUNjfMGnK/J7BPLh6atY2lhseeOqhf++dcpELiXTEYRGlHqUUtkV8JbOtOvvFlG2NVK6gCKBdhDGGJ620WQFHxyNwSlPck6TKUWnJdxL/70x5wuAbulLs6nKQRqBDqrU4aNe6xYvgDIg4mvooE9JIHToVmONTpQ5nIpzRW98Zg0EkS7avA2bmllelAXWF2LG/zF3zExOyfyeXvB1+c9vv8iZbzYlAtgSzRQAx20PKMTLuhfc9v3NlcvQYe3m6/xLwLonGV8CnjiT33YLuNPlVlv1iza9+eqoQCGSZFRA1WdPr4zCuEi8eyql1cq8rHl02j6Dqh0NPyvj5E2e3fLkQXR1wHR07pzNDuxVPvsHnav+SrY+CIJLaiJds/MQhaFQ54SKRpAx+2ysfHbV4wH33VMDufWc9MDjbIjdf/ap2SzPVkLkRSABEIfk5ZOPr4apJ2dknMDIuaBvCbRICST9kn5ya9Z5y8/In59/eN/6nW/UKs54wScrbFp8Hnzoqaymn0PNK9TkIZD3ZBFrpEXBnBQy/MzKpKiKSOcmotx7iML/yqndSnO3oTRmfWztrzMrSjxQAHtg9BxhD9Z0peQWN121faI4zmFilAeZYsTY3dUtSicNNJKL0tpTOb3nlY6N+Xqjxa4vf+jcOPFhNKDrm2TDFHZ/IOXD1YZ6c4AE9WrK9FUDvdH5JSxxWW+vhqsnZEYhILcud1LKuroZs9MKAPjW7+gFg3tLfPbWwr2V2KMGKHwKEIIPUV4VaowOthwhkoAgO8vOo0gItgulyw7MCchCpck9oiMFozMFhvYNm7Cy1yyJvuXL+S3C+vk9PXlbqW+x2p5bvwWRGaJT1JGRyk4e6kTbFGIyt1KDWihvvkTq85KnKKQaHbSnOfqFHIr/484fr9wxkerV7/v514PBjVx1BxKEU0IB/KPWpI57K2K7IU85MoklHHupmz26DqyZlZ+C2MgOmU1d5O8mNm+Caw7OKs/X15U9OeTVRv/nRZFOaoQZq0FbAtaS+yrg+BqkAEcggAV3QZjo6Gw858GKIia9640XJA5szrVOro3BkbcRjG8Eb3m7icFiEgcF4d1+K/Xrt2aO+UtB+51sZZvXjxgLgGOk3nDi81ddFGUywicT0ylIKeRVcimYLm+lKcX67rum3v35uy5v5dD266G8fMkB/wjlIZ1bRaIx9ODX36Ccztnfv21JCCK0ySLvFs49RhZkBs5E6hJBgn1CyvoF+wzI0nk92G89dRxnlVKOCoRntcM3R5H2VcYEMTgEikMHBuSitiDwcDDAroBnVN9gVFm0jH6iPwTgzP7lFHu79wZ93ROdsR0+KX/fbtlGrijKAXCvt2NsKOl5C5J6NJ1SlIlQgSCTjFYnEKe2hEIubFBEFv+IAD+jAf/zaR5v/kEuXY4teP9HQ+Yu5XMJjWuR7qc8c+92M7YgYVVm42FoV4UAYtMNVkzNvvnd3ovrQvAQZhq0NltNVXcvaM+rC+/6wc2X/KGk8z4Y45LloP1x7FAVPzLg4Bq8AEcjgYV2clkTk2yimTQ04zbmnd2xFBI6pi0KldatbnB69xCF3GtWJJ2nAyz26cfXvZjf/uTiDyLFWDJWusQXAvR5O6qnVv/HFNRBEMipueqepm6s4SLufUf+Fx3GDw/OGDne9/vHm/862x9HbN6VjN//eybTvp/7x+O9kqv+yZS/+aVmi5Ti7nAi0GSR1iEmWxbKNUbW481bgZiTl0Dwc3rb4evjUEVnZP/512VP/dWvvhC8f4FaUaU9dIWozjfPbjHnHZKeCywQg/V4QBIhACgJj6SuJdGybw8BAz5SGdFkBURo5oiYKmIfDfaPbTxxyVCbBcIAE5ysO6NHPlk34dCQShh5b3LEHic0nTK8jv0ciaanQAG0lETRce57x0oj6s4nStqTB74V4xffT2knueHVUNNkXYEdRWvCoicZr+q1bP39axsjCF97/0s6VqWbzBJ8dR0GWIUZcrrXepR1KKOwmuDY7+8c1S57dfXdizGhZdRgWAfPI+VSYd2zWMbZK/1YO/x4QgQynORbSSBxJxJZGwrIC4h0KlEaaYuat7gA7rHjWVm9LVbcBrLvP4L/qNSL/XjZEsnz33ENhexfYQf98cxp8MkdBDKWRMRVMxBgLJA5zH/Odt83SnLNlB1P6HVsvGPu4t9noz1/4Z8b1/8i8xJzamab9MPHFU7+V6ZnTl2ziv9M9fgXWQ8Ekuh+umZpR/XPDsic+sah/wpKthuVaa1aaMcw6OweuPSKj99i/3v/kf93ah9KHEiQzROJwYcBhJXz6mBCbSSa06PdiIUAEUixkS1gvSiOH0qub0ojaEf90Y5j4o2uiInS6s/+Y5SyhxHdIZBhmvbsvxf/l2dlN5RNOe+nOuYdypqAXkHLpMkw94v5+VFyDpjiDGnGXxPzN/Yd5Xg7+zTDgD9fsfe7FD+tbbzz12i/t+fmSu0799YGJq/7UWzEmXCIKOGVHtB8nvnzWN9Itn39/YN31P+qb8J8Hucwrr0xcuPTF4Gl+zeFK2JPgFr619Knf/LB30ofd9aaX6ICx/TxL28Qnlzy9e3GfKX0EVhvcFufaOfCZYzISVAlfuxHZNBHIcJ32js7GCFSYAQuDjeuWnQO9XadUR+Dwmqhj77U4xLOJWhKNdSA1AN7t0/kPfleWRKLkl7D3pbDNUG7mVREGeJ8E44xlUmWZjOJs2noSjuna2hfbt2Pbuyk27feJ2oAN3iUm+FafoWk/SX5t9j+lW5Y3PLD2Zzf3Tv5SVsRhdpIB3GV86kg7eGVY/Rcsfm7nw5ZqLFM6WbMSBvwuY+6xGev+l/t/s+K/esddeBBtH9lIHWJKRME/8s+83+O2Plxf3KE1LiKQoTVfOfcWswJyKY04F/J86iq5DHDzPKomAi2Vyt0RK4+dN1e5604JA53zd/sM/v0/zG66LedOFusBSyKxExVlOEkrP6N6C+/QNMUxX3uA1BGsJrL3Rf3AHji4Zxcc7OmFlMyKZX7SExhn0Z/2f/MjX0sHyVX3PXfnkkTLp3wE5nvIaet7h+1c+Z0PnZVWBXTDfU98YlGiZcnbejxN836JjjP2GZh3TNo0ynjv4/XeitfW9Dd5oiamwcMkMA7wGfjsB0qbprlYa3SI10sEMsQnMNvui6yAnN3of129S4AJ4zKmlm2MoTrHHwLFOZi6c5hj3TqHdzEz4B9mN5QXkQD3qLbE8TbDvi5/R2mkMc5kTpIMxGHfgMdy+F9/D/Tu2Qnd+9+D7kQqZLoURo9Gb+v99vlpPY3mLn/u9TsPNh+TyVlA5ayfTN39ta+1neXcLA/oyTeW/OaJH/dO/FBwJ8NUgQC8Xx8FXzhe5g0J+Xzl3iee/VnPuDOyIlK35PMWv+74kEyF2a5+KlcsBIhAioVsOdabY1ZAJJBj62JQifYRl0rLTxzqdoxbjcHh3WRZE0mAgSODlIAxG0eb6i0rbbuzrcq/WcEcrarMbwEMHfQDe6F7xzvQ1dNtSiV+Oj+6ji3c+LWLP51u+cx64KXuZw7WBMQ/DybFs+q6YWzUmLGs/ZS0YdzPu+uZ7seSozJLCIo6kANbxT/7/gvT9XfBfWu+tqyn6Sev2yHbQ6SOAJUZM4zP6p+f+atyfJ2oT8FJGAiXYY5ApGPHHM5FmPjAOFPy9XbOFnh3ApM5VWLAP+WwbG0D3j+trIBYC9pIkpz9rw78v8vGawuTPRnC2B5gVA7Y3AKkDpRKGmIMqk07tr33uXjJSy9yYfGeg9C36x3ofW8P9PYnXKuNR2MbehZcOjN0Cf76jWkaT/3N/3uYNMXhgqaDvSsuOT2EcGRNN9/3yA9+1DX+m45hPgwHN0kxDtfp/3jCHWH93bL4f5q+kzzqr/f1N48KlZjCPby2Gp+fOXmYv45DengkgQzp6RtY57XlmLfbiTPlJQ537QzGYVbAGg0qzON3OuJQT+CmV1NXQoc7DI39R9kQyZLtbRBh84Gj23N2xOHGhAPeq0EPrvqIeadEUoRdzFsrXla090s9Bcb+XdC74x3o3b8XkviDFtnZ/R+fHBs2s9odr94MwJWb6unUcPK3jzV1v/LIpacfn261zLvzN2/c2d9ypElx3mGa//ZjZOiRtOqr6+9e8+TCnuZznEuDnl6kMdQbEe1S+NzM5QNb5fR0MREgAikmukOh7o7trRqPLOAivWy4t5YzFCbUOBj1VtpIzC2Hu5+1VDfewzv+O2Xwjn6ufXvTh+vfKAuIMLKtJhJaSU8iRUUT3L/gTRuDN6JUgn8GEYeoOoyn+rshtesd6NnxDtx4WPfzX7ruulO9bX97ydrWHx0cvQkY1Ps2ele97kb+3/j9a35+wazzw7D+1v2Pnb3qYO36v+hVIcThJkWl0Gr98yf+Q1i9/3rPI19/oKf2x38JzDYYosayJ8B4Vv/CaWeVxfqgToQiQARCi0MigHk4uAifbqp1lEuENkbujRE3ysMqI+IinvpBiSOIOOQG6mwcugEvpoDd9vKHG+4pi2no6GyEvkoMUhgQ/TdgEw3ZtFEqqYlIw7uV6yqUOBArqx4Txur9O6A5yn67tXrMf/d2b39i21Un7f73e1f/8C+J+FVL+sdMzJgV0EqXPJMAACAASURBVAPmETXG/L9cc1KoU8MX7nzkd7f3tJzmmUTraBAyNdhp7XL9CyctCyrwT3c9NvVAov/lX/WPdV9ezHghUdamaeyk5BdOe6ks1gV1ggiE1kCWCHTsamOcYbj4tBkQVWkF81dNqZIurxHmMgK4lDnW3qHuu/h3w+DbUwC/0CKRX5SRemuuSSTm/YNwG0M4shwqGBMZEzFvvXJXU1CslzhsfBz3VRhvHIC63gPwSo8Gu1L5nfc0pp2c/PzMF4P6+a3Fj529riu6/kW9Tv6cUfqShRjAgdQXTwu5Cg9w46Llb97cNeEINymFSR2e7w32v/pXz/h/Wa5YKlZCBPJbkSXsMDU9SAgs3z2XAUPVlseFMmzJcJHACsOno9HdidfoDqPuspsoYgrumUK9xeFBHYwf/fEjo38/SCNN34zIuxFBMvGk2s2gggn4uSbKoCYKUBf15GgxN25LOvM/iuAYYCT6obe3D3p7e6FfT9e+NSRZJvWl00Lf8wVLH15z887RH8uWOOya02zy37jzodX/d6Dp/C4rWGKWFxJNAjuYMqKt8LUz3iuL+adOpEWACIQWSHoElu9BdQ7aB3xBGl1HVs9+hsEKD6tiYEaQd2wl9inXtJK4BRazLyLy7V9SnC3u1yL/M9DETgWZ4oWdjVAZRyzmpo25lXZflz+iDwKGTKmJauLyZjhxuEQC5BD5MXRI9fdAX3cP9CYS0vju/ZjfzYx0wfNf/Gjge45qpi3ceK1jX11leozc0tfMih7YlKw9ov8rZ/q8wb656KFfPHyw6vNvoD0lS3WVSRxwdEU/TKhkX/rNZ87NOtpxQeaWKskbASKQvKEbQQ9iQifDmO/ODBim0nFvehi0ESUSjDPli3rruxHvzmFk7T9Jgy/nwDqKkbc8r1m8d9scYPpcJ4R+Vulk3U2Z0heSCV5QxAySVUoGRZcnl+cmOz5qo6+nQO/rhf7eLujtS0DKcBWGWqbDZ+sP3v6jeRd+3jvWf/pVxyO3HRh9Xi4XErGOebV7On/5uUumeev71p0rPrn+QOSXLyWqPdZ4taSH7BSVGWPaY4mvn/OxTHNyweNbZ7VUaAd+NWvihkxl6ffiIkAEUlx8h1ftNpGANDL7Dr7hx2+0s4+rZNBcoQEGkneXDCYOU0ax755g3nKDw5ok03+Qb5bAgk6IyEuemgscpRKvqs9NpK52FdWd+j3yR3WUCakEc7bYEodZyEIpCGWOhZFMensg0dcHid5uSBiy5MxoV+85Nf2PfO8zl15qtfeNXz/4v6v3V/zjX42wvT78gPClUQd+8Z+fbv+C2vdv/brjky93wS/X99eGVBhGHLIWztjBZKQmo+qq/Tdbv91cFbkFnzmQ1B+7f/akjIRT0DmnylwIEIHQgsgdASSSlDEfmEkkofk3LKZwbx7NcU14btVG3USi7qtis/Qc7G1Cke6wf04YsBgixv1lQSb3voVZ/JBMgu+U2HwSRrLu79HgjkRi/RdGR4I4AtyDOUoivV3Q39sNyZ4uiBspmBHt6kuxaGeNpte/0l858V0eFPMqo2S5LfHNj070ksdLB1L/93R/kCosSL3mX3JMi53X/80PP5puMaLkMa46sk7FYkevfumqj0yiuyK5v8UFeYIIpCAwjtBKpEQyF7hwe3Ub20NO2Q5SXNhHxooMgVIqkRuDwxrW1uNsQR6DvPmDAfDnpGHco0VhacnJBG0lFRqqt1AqkR5cWWBh4+Lab02bCZJJVEoleIlTE9kHkUQNn5lBJVmbWDgAQzLp6YJk9wFI6TramDyfcOlRLci1yA2Jb370Zuu7b96x/JqX9iV/+dtkFuShqKvUOo1I5CfJb52fNgIxlp/zm62XNFdFHrDHxQD6UsafFrcd9oER+gaWfNhEICWfgmHSAUzqxAHTzE7JKf+FuamIGFNxBg2eS3jWhqjaY93uwO4t0+DwZ875PaDD0tcvaHmzpOgufns6ANpKAPOzBAQEDDvtp9/MMTYZEgkSiuXtFkgcylHdvpuD3yX7wTiwF5J9PZDC/9TmMvDIMVX8ik1fu3ApVhP7wSOn1euJJ7oMrcaNc3ZSFj7DGHuh77sXnZLNPCGBjK6UBCJynQkW5LAvof/wwQ9PyZiEK5s2qExuCBCB5IbXoJQ+97fv3dMY1doNgK73EsbSp/awG6C9ad+gND7QRh7YPceUSDxxptKpMpzfMN0sxplCW0lcC5Y4nH3RYzuxf7Cfe103YDHn2m/euKD5+YEObUDPo4oL+BzgSCbcuT+RiyQQ4NUU05hI0Yt3ceKaEuTSrDfIbuLcP+EAqSTwrv2Q7OkGo+sApDDUihhoMAl8q6Xn3pv+8eqrq7636nSeSj0OwJWkJ9mpq2wcGXTFjPjUgwsu2J0R24Wvt7ZUaA9fMLnuOIs4JJHIMb/blWhbc/7U9RnroQIFRYAIpKBwDryyy37/3u1N8cjn1It6SYPv3pvi333otMZfDryFQaph6c7p0kbCrvVtRiGqDKdnsgDq/0fFUDJRVVwqfcgnVA2RetPdqk+cUzl/x+DwCGd87ZsfH7dkkFAIbubezjnANVT9Kfk5Mhz9s7hLgXYTJBGUTvA2vB19XumFqMa87BkUOZh17we95wDoPV2g9/X4VF2tWh+MiWtv7EoYR202LO/f7A4HbjA4GNH4Bf03XPxwxrlY+BoeSvBya8NpY6rgfY1xmzjk/Au3755d3cnziUQyolnQAkQgBYVz4JV9+sX9u6Iaaw6qqSvFf3cwkbpu1VmjXx14S4NUA3oqRYVNAIMWmifvdJulfXR2dbA+JtVbeKvbCqWukkcYcfjyc0jCOWhweE43jBUMIr/520Utfx0kNNzNoL0kDqZU4uSxdxXKgjjs8h5iRgKpiMh87yipiEA0Vlh+xdvXesybyheSfcIQj5KJfnAvGEYWtpOMhwOT9KPRu/oWXJExi6Eo/evXFgHn4iCCBHn+YbXQVIHZM511JC6ict6zqydFJDKIi5kIZBDBzqapz718wK/UUPZUnbPe/Ql+94Nn1P9jNvWVVRnMEAjC4B6QnjSYOJz+O7AgmdRHNcA7JmGqFksNo4ZZV7YbJQikqOGdFIfVwGFtqj/5+NuXTxr8W9BuMmkDrqi5fJPoWSIZN21ZAOGKI6GAJBSLgP054JX6bZUWAOvrAd5zAIyu/QAH38PkYQEX2LM4HIDWrUHl1K5bLt+V1fq8408PAXMIdlRFRJBIVFHZKfPds5tIJCtYC1GICKQQKBawjnkv7t8VMyUQt7HYeeGxOaHWShifX3Vm49BzYRTqLY4SiaPeCt13wtQj8nskEwwNUheVkomqv7dUNGHEYUsw1lZqbogc4LUU8Gc0rq0rGaHc3SltJtIAHyy5ZSQO9wi9Zg1LzRUVEopnESvEYf0iw804aGoWmXTtA959QHqFpXsXzB+NeOV/9958tZPTPdP7c8drc4AZHWqxyTVRmD2+1plv6/zB+aFUL9Czp9f45KPntbqeydQM/Z47AkQguWNW1Ceuen7/czURdrr71feHB7f2vB4D/rSvD65Yc3bda0XtWDEqF/dJ+ueCDninxOOllLteHQ3JqOYSLq9oXFX67FN32L85901sNZj5rPw3GuTZawbAMwB8LXD9xa3tkwMSOhUDILPOu99sA9AsMpni3qXDtmxVigjqm/85VHmhO3VU2E8UDy9bOFSp2B14EaHSuvcBdO8H3nsQoGs/8FQiMMQWr6pp77n5modyQuxXr647hP8sVeJ8X2MFnDRa3lsUPVNID/+9py/1zUfPm/ajnNqhwjkhQASSE1zFL9z++33fbo5pt8hXNZw4XJsjZ7BP57d36/Cvj59Zt7P4vSxCCyiViPskhnLidjFAqLoqyEiPJ+paEWsK/7TuTbgChLguKgqyUEhHnqY9r4fUs4MB/G2Dw281jW80DO3lbRdP/E0REAmucmFnK0QQI94GEGQ3CVNvWdWFEE6ArQVHL0jFJBQUUoSUp0g+oRGFgYPW2yVUXdB9ENA4z/p7MEMlGJV11/V8/9rQLIaBA/+/V9pAg7Wu3zjAGS3VcHhdzCYRq2tWcM6ulP5IP9fnPf7RI4bmezFoCyu/hohA8sOtaE+d++zBllFRvgMjtwaeG13qaZNgzKIG5z0HU+xnHafXfrtoHSx2xZiTI1mJhmW8PzFroJfwrO6iRxdiin+KAI+WuioH4nC2YLf7MFZmGPCcwfgfQdeeinL+5KDZUe7+6xwwoA2YIBTHtuTjiTTKpWwN9aYNHkkEiQX/xEuNKqn47CmqDSXRC6j2Ygfeg/NH9f/+wfj7vpu67oSnsl5Sv3oFg3re6BVrTm+phml18la9RRxSKpHu3EmD/31/vzH3yX+Y9kzWbVHBrBAgAskKpsEtxB7c9dCMhuhFx9VHhNeJdbxyTleKZGL9rOwPOoddexP886vPqntwcHte4NZErCljrggR4ruIF6aiSaeFl6yBkIp4U5pUd6FBOUzikJuSZVlRLSxyi3JtWq7QK0JKeYYztpGB8fKOS6Y8UWB0/NWhIT6qt4GBEgpDQkl/qTNb4nCGGioFaowLMkEPYcQXl6XbxcEhXZZKAehJVA1izVsNBiuA8UXJ607MnEAKDepgKO7PEobTxlTDVItErDA4ps3GkiT396f+5/GPH/7Fos/DCGqACKQcJ7tj51zG2UJUwcyoj8IRtZaFkzl++TZx4CbmNrBbQ+rT+SvdKVgw5IkEB7T07elgoDuwCKcujcpZn7LTR8vFzQ/JBG92i3sU5gUK1TXYdZPbbNw+7do2AvPc6+E2m/gNeBa4sYkD2wgMXt59+ZTMG+ZA1ufC11shEkGPLotQTNwykKzapqKuCu+KCwBXMc10HRYrGAkG/0j0ATMMvIWOQRTlNIq/a/sPxX5caQBfp3O2Fq47odPX5sINjaBrGwGY72b/qWOqYGpt3BZaLU9lKYnIPiYM/veulP61tR8/csVAoKVnJQJEIOW4Ejr2NjJD32x532A49OkNUcAcG7Yfv7wRFryPKpsrlupJGWt6UuyfhqShPWh+7t+GahvTqIxkErYh5h9mHclE3p+QYUPQDiA/Ui0ijbbWN84/3J5zTs/k7Wm7BnveOIffcjA2MQ4bdR7p3H/llCeLtiQXvjEdwEDJBAM/po8UMEDiCJ0TlFD6u83dR8opSCT25R6FUICx/ZrBNh6y96xLMbYROJPRGJixyE0gzjwf1VAB00dVCvuVShw4HGvODM6hV9cf7gX49DPnH5mdK3HRJmVoV0wEUqbzpy3ftYADu1E1pB9ZK4kE80c4m1HAAHz++VJ90KXzNf3DiUhw6CI3hzC8e4zv6cgjhHBUVvDAiidpVHUhoUTFPQr0VrLmwZ910ZE6/MRh0Y1bwnHsMgD8lUNpN7ZwAzbpwDdEtEjn/k+0vlzwpbrwdSSTNtAY2pyUwI9pV5eXOYMXoPdbE1ump0BL9kkHBVMCEZKIIAb5naMAMzd9+/sgBPzzPLE6BieNqRL3XgRxKERvuSLjnwbn3d0pY6EegX8jIslvdRGB5Idb8Z+SkW43+nX/DE5oiMBxdRHhbun6eN8ly4ApPGecjawrBY906ewbTw5F1990yAsySc0BzoI9ucIklTTEER6GRdIAblJIJHiXAucDXyhxSQ+xtzculUTcc+GWZGS5QClG1vcWMNgCnK3nHPYxzdhg6HxfzzVHDpxc0H4CiTbp3SXun+QQ/DEN4XiwZWhI11NCdQXMXMAmkQjyEII10jWXkgnioWE571YVdkCQGDbGIzCzuQoaYhGbRNQ89JZKUhIJdPfq+n+uv+Cofy3+iz28WiACKef5XLarDTTNdF10TxUG0Du2ToP31UWEqsU61Vp/ETKHy04iByoNl6ZEkuKP9Bja8CMSHChGwmVciYQbIHWkJQ4Xoh4eCVOZmYuJS7MtEovtrQTorSS/d8WosvdBlXDMuXIm06cyk27Hso+K59HT+I3BYbMGfIvBOeeMbY5o2ma0Wzsf5x8p1OfIT6vnv2AVV+DQs5ToOIdI70G3tGFJIabZXRALfkypgyPJWN8peAS/tv5+HFNfAUc3Vti2Q4tEHMkE3bLlJ2XwHf0puO63c45cVc7bQjn1jQiknGYjqC/L9yw4FP3uRvdPzosSZwDH1EXg2LoIxMWLxkE6t3hOuuaXrgt15h7ZZ8CmHl1f8PjZTbld7ip37Kz+LelsBY4X8YRkUjDXYN/wc/Rqkp7a8qQtSMWcVuHFpAY8RNIx7wQ5lx1dxGHOt/synTS7ODPuTdKl3ucIn0pF/+MfcJp93L+Zs2Q/aIlet+oKCQLXpiASkziEegu/jgAI6cNcqMofYe+D/b1yOGiIR+B9jZWAIVCsKhBHQSLme2KRMP67XzfW9mpw+UsXHJ05SvBQeQeK1E8ikCIBW9BqH9yzCDhGtQ1/kTD0+TF1UTimVnOFpRBnXk8ICuvdMhUmdq3oL38wBb94Ylbjjwva/3KqTCR8ikg1DQfT1dXqoLLp+fa/NFJHKHEE7Hh2NemkmEL3I0z6Ct2RB2TnCJturWe/8L6ybR9IikLCECltbYO6pd7ikahDHgORfMxaDquJwRH1caiMCN8woda1icOsH6+Pmt9196b4F56/+Oi7ymn5lltfiEDKbUbC+iMkEfUSVcCmJ8J5AxxdF4Fjai3VVoZ84/YpzGlY57ynKwWL+iD186dnNb8+VCDKq58onejolaSZ+n9vrvfM6qrgdj3PDZg40mz2OUo+roOIr/PpyCukD1moApmhy1Anwu7hEIc0oJv2EKG2QjLRgEdi3oskSk+zVJm5xuY8g0b2ybVxEanAUmWpNhH17wnDWPSH9mPn5bW2RsBDRCBDaZIxWROwRaHB9RROQYnkqNqoUG+hx1CY1KHuPc6FOQeUXoM/05+C/3xy9jBVb3nnf3HndFMykR5KdiBDT8FSb9piL89SinHt+1k8U4DTvpekIn1d4v4HB9OmYXtgaaZB3VRjIblEkTy8HiLBBybxbRYEZs+eMra6mAaTamIwujIqYn+5ScS8yW4AJHX+4AuXHnvJUNoqBquvRCCDhXSh2kHvLF2/FQAj2Xo3Nf9LhhLJxKoIvL8+AtVm4nHrHfKSh7pvWCczcbUYE9dx2Nmf4v/XlzIWP/2RYS6VqLB6CaUIYdaDNrf0UkI68hiA5BPKLWEqsLAHvH0wIHpgjyl5SC8r22XX/Lvt1huJSukjnWTkWuZZEGKI4KR6RoyrisG0+gphixJqLMMJhYIt6AZ/8MVL30ck4pkXIpBCbeyDXY/w0GKYg1x6y6Q9hcmXrLU6Aq01GmaUc/YsT9hum1ysYFEeFRf+3qfzp3XOVz45e/RPBnvYJW9PBDO0Y09Nl/GnBrBpB25uaTbFYkg+BSOOgMHgxcG+btD6DprEIcnDduM13XnRgC5UV3EZXdf5DExdFey5HVwnumEfWVcBjRWq668VX4tDUjeWb7r8/ZeWfA2WUQeIQMpoMvLqChIJANpHPG6XfmnEIprGGIMj6yIwpUrqgNWScn+S39pSiFlA/NsiFjypceju042nkpzf8fQ5Y1bm1f+h/pCIP5WaDqC1AWfTATiSSpoYVAriOamKsjxpu/bwLJ7Jtg92uWylDmdiY/t2AB7pxX0OJAkMaWL+KborpBANjIoqj+oqV8nHQ2C+xzPgYf48qiIKU+vjoNm32U1pBEOhcH7Dq5e9/+ahvmwL1X8ikEIhWep6fESS+VSMtpEjayMwuToCVcJ31MmNIV5FswoXcTj+OYpdhaOKa0fS4I+nUtrtz5w76tlSw1HS9i1SEVFykVQAicV9MS/bzW0I2TlcJxHzCILSR6Rnv6myQvJQJBDTeC7iYVVUg8vryjuBaSXswhCH2mR1lEFrXYWIj2YdpKw/Uzqc9tqVx/2hpGusTBonAimTiShYNwSRcEcicW1U4afHCZUaTKqKwHiUShTikERifmHeVbDeZcu/y/m3lFp0znckdOOxhMF/+ftzx45sMrEm1iIVDpj3ZPohZwi8uOdKkORbA9mqq7KcY1usDFxsAzzth0g+sfe2AUM1qSp5iJv6DLg4tGhgxCpM1VVYH8zKhX+tFeXKiQQnntIiIeG30khMoQKJ/AEN62gXqUZvLeHyK1e8weHPr33iA8cW7J0dwhURgQzhyUvbdSQSbhFJFmoHswjG2UISmVoTEcmYZGgJJc+CcqdEJQ5n/3AHGzQAuhMGf8IA3tHXm1r90gUT6HKWOnEYMTeqTQcDpRRBKq3AMfNe0CfDBpvumQybpevRXE77IcQh9vTeLoh2vWfaO/BSIC4nqa6y7SCRGOjVdUrzUgoWPlHGoWhw3AB0AZaXM6QsjN9h+H1BJtb34k9DEgmSlaYJby4h1bg8unKMkcZBeGo1oF1EebRPN77w96tP+MVw3T6yHRcRSLZIDdVyy7a3Adfm+7PXecQM/w4CDTEGrTURGFepiVhP1oVEay9ySypKKA4v6Vi3obmQTl5Kcv5w0oDHX/jo2OeGKqxF7zcSiwgvgiowDfOiTAcNycWMUWUTQigzZLgMGPJcWvLwPJNO8jEMiO15R2z2Zqh25U+TQLSIJA/rwqBFCniF3vw7/imUSJb0If7U5diQQAwpGUgiwbJILg6xCOknEhX2FSNeCVyzLieqM5gZi4k1ccAb7Y4Uwt9546oTDiv6OijzBohAynyCCtY9TM4U1RYA59L9N/CdCX+RxlUyEU5+bGXEiXJqVuRXecle2you5eQmzpDmAwaHd1PAf29wvk7XI4++eN6YvxRsvMO5IhGWXW8076ngSNGRAj+m5JL5cOCDJxfiSCN1WPVGDr4Hke69KIeIcCSO15WUDlCqNaobhPrK/qDEoSdNKcMiBilpSMIwbKIQwa1MdZYkF+U3hXCklGIApowUKzJWAXplLRhVKPVkJg4VJ7SJYDQUbA5DiKWAXbn56hPuG85LLdPYiEAyITTcfkci0TApE0OpRCYYCmOUgE0F4zchiSCZtFRiJj/vJUWFOLyGeCGZmIZ6H6kI//vtOue/N4Bt0g14fMN5Y3833OAvyngWvt6qMd5ucJEj3Yz1FdRSmAosCynGt0zCn0GVU3xHp9zzhZ3DUiuZ4dqZBnpFNRjV9Z7lx4ElE3LDx/8sKUOorFSpIuTfJsk40opJPirBmHVylH7qmkCv9r4C4ePCYJittRWgaQwMg4PO4dEt10w/ryhzOkQqJQIZIhNVlG4u3T4XOJ8PwJxc2vbpMrtNBYlkjPgvAhErAKBZh3luNOnJTxz4gz/gn5JCVp70XtAN9ludGy8DM57f9PHD3igKFkOp0oUbGiNaxWwMW88ZzOIGn5JeozU4xGEdRGJ7tgn7h7A9YLpg8afjvmtE45CqHy1/9xxSWCoppBCfnQMLWlJHGvUW2kZQjWWKCY7qy/rOQyao0ko2jQejsjrNCnDwq4poMK46Jtct57DlUzNG9B46ogc/lPaMovZ1yfY2KZXAtdlcSAwWWDg0xSWZNMQ0qJGhZp184h6tiqrKcojGNMCbm4qLgMznDY55sfhrwNmfDG5s0Ln2t9cuHL+mqPiUuPLYnW/OAK5PZxpv45zNMjifItQo6kTkqJIMVd+EXoq0TxYhaMgDArrtxndtMQ3ZpuRhZRwUMa6ikKofY942F51eARww7w2qtRrB0OdG+nsbQu0cwoiuuyQSS1KR6irdIRqXKktKNTIAsiOZWLYTvaoWks2TACUTj1hk8aL9dXNlVHhmoRrL0Ni5266Z8ZsSL5GSNU8EUjLoy7Dhjs5G6IvPB8aQTJR7CzkYT81hVUYAGmMajI4joWC4cidniXVZMYw4hGRiwWOqumzCsTzCFEIyDZvbDM7fBmB/1Lmxj3MQqWH/ctGkR8sQ6cAuVd7156kQgVaWYjM4g+nAjCmGwc7GwroZOdbGxdroi0UcLr7IQho1i1RsexO0VEK67VrqK0sKYRqk6prxtL/pUM6nWyEGD8G8GTJNrfX5xQszIv3d64Ab9XjMVw3oGe0clieWyztLqsK83loWcajeXRhCJTG2VV5oVD+e4aMqa1x1XJQwOP/Wu3NP/OFQWWOF7icRSKERHS71oVQCfC4w0+ju2lB8//CM2u8qWRtlUB/TAG/B15mE4soQ55U6FOKwrCq2LVW9zKiEYpH1yaB44qNkBdQ5/F5sxACvADf2coB9hs7sTH6bL538WLGnruGev30IIgB6Sm/UNG26IfT8MIsDa+TAP+AOL446dimROaSh4B66pw+uukoVWGLvbYfo/p1SXWXeOpcutRFJKLGK3ydHj/8OXDdjXTqsoz996ntgGN9R7R6W1CAJRZUgVDuHaTBXbCc+9190CkMpxV5MHnIBAxItUyBV3xwgpDnY1scj8n4IsFt2zJ35z8VeO+VaPxFIuc5MufRLSCUxjAKMRnfTVhK2e6XzsXeTDqq48M5JfVSDqiiIv1vqcKt2S+qw/60a3gOIQ7bgJg9XXSapqNKNlZBPnFOVi3spDoqLsazFvoogT552aZUI5alUUBfWdzwACEux5WzgjMVMaGTeq0Gduo7OQsINNUjBpGCerdRhl0szX4En7XQShzmP3lN5fw9Uvv1n6W0lCMS5jwEssh04/1z/gssezmZZV/zwycN1pv9Vell5CQJxtVx7ETDFxddFLqZXlqXGEndK0P3XUWHZUonynPVdYuxUSDaMUbrrHjBK1BjF1zCM1bs/c/I/ZDOu4ViGCGQ4zmqxxoQeXFyfDww8ObOzJw67axZbKH2ti2qAISSqIkguAJVR08vLo66yqMhxE/ZLHUHEYavMLOnGvlnsbA4qiak37WV0VicWmPV3wSLKRUvxvXAZdWKJ2e2aJIW/InHpKF1Ib55wl1K7ptxdr7O2c7iaT0ce4fNcueUV0JL9tsEcL/RxpnUBYz857d9uv3kdrHMl1c20RKM/fFSExHVv8qbrrmXn8KumukCDPwBofVs4VwAAIABJREFUKzWNvdD7r5f7vPgqvnvXBQyMk1jKOJvpyZOB6zX+C4lSkukbfwSkGlUSUfmEi6CLDNhv93725A9mGs9w/Z0IZLjObLHHJXKOA6q4MLOfO86TmyXcPcnlVMxBkElck6SCPvhom6+yDfTB6ir3hu0EwpMXIc1MdCYZqNFC3FKCJSFItZiUIqw0qOalSQ9xyMMuBgN3JI6UIXNuS8Lg8sDs44sMEkLonh7wQ7ZkZM1KxvlQCoZIPuiyG9u301RbSXUVA3iY9e77bPdt83fksxSj33+E+6UPSxrx3EJnsB2YdkucJ5ccXHBVTpEOqr79q6+zVPKTLJnAeGXOPRPzxNA/4UhINo31rGEJBKqw4hH47b7PnkoEks8k0zOEgEBAkAlHw7spmYTseGk3N88zrn/668O8DRjoDgPTV6KRVrzQ8rY8alEqUCWmqrnE5i7rsdKWqnuoozZyiEPEa1KIxiWRmLYJJAj8pAxDkANKE9iO+FOVHtSlknHTNguUOXHgkKIHdkPF238RIUSE3QPYTh7RPt/7/c90DOTtiN3ysJmUw7RrKCFLxD0ReRroAk375/4bL/vZQNoSZPDN2+ewRGIBSyVkeH4h/eA9Fg16p04H9NLyJvCqiGhQEWU3H/jsKTcMtP2h+jxJIEN15sq130gmoM8BxpBMpM0kl1NxBuJwDVsVH9w/uP6FkYadauXfUKqxFr+60atdTaL0oGz2+FdLirAbCO2DPXCnLxmJQ3km5LTvm/aMdeZGzK7OZiAwluiF6r++bIYriYBeUbU4wmqu77rl8l0DXZ6x763k9oVCn50DTwn8yTgYn8hV4sjUr+qv/89ntf7e7zE9OUaeHgzg0Th0H32KL9EV2kGqonBz1+dOIwLJBCz9TgjkjADmGzci6M2FZBKQHlbZoYpAHG4BIGw3TLfBBpCABUKpiSMjMQeMNyPZmJVmIfngZT8kD62/F90Fdhm1jZ/ruWXeQzmvkYAHYj9YdQpLpv5ghR/xuOB2QSRaEKkjrK+1//SLFp7ov1Pr6/6Y5e6bGDMJ+g47RnmEC9f0uAaX9H7+9AcLMe6hWAdJIENx1oZqn+/ubANNQ0JBMlEyKaoDCt29ipcDPJAn8ulHPqf9oaOuUmepavMrEH3vXZQ6ntWi9Rd3/fhTOwu1LCtuWfExnkyt8RrRGTe6WDR6bpBxvFBtq/XUfOXWb0d6DtxixdzqOuZ0V+TgqMYgGtWm9V13amcx2h8KdRKBDIVZGq59REIRkglrAxYWwlwJqBWIQ9imnYYA1MsLvmIFIA4XIWWQfLI47dvDzkUVaPWBw3pgBkb1DXF0SCd1BHeucsurULHj7716Zd33un42v+DZ+eI3LbvhUNz3f7OM6FKVxbtYLDZo5GFhXjv/Z22RrvceAT1Vlaptgu5jTrOnIwJsi/7F0zFq8oj9EIGM2Kkvw4EjoeANbKnukqlhs7RzuCNwZKGuytbGIPbXLMkoK+JQCuXUh0AxyZxEV0WbgMO6Q1LeOojoG0GPYHIxGYHZ90nnfh2sAsMwJZVbX3/LqK6b2/WfX1xbjFVUe/0vj0vWN/xJXhgU4UdKQh42iXzlttla93vLtFRydM/UEyDRPEn8pDF+m/HFM/F+1Ij9EIGM2KkfAgMXWfyQSAyZGhaz+XlTw2a1aQ9bO8emQ27UG4GzzWDAOrjuA/YN79gdr87Qmb7Q4NblT3W+06nNwmwnHOI7NkPlO288qVeMurr7tuvycs/NdtVV3rB4IevvOYFxeI7X1N7bu+DKkuaO+f/tnQuUHFWZx79b1d0zSQgz4SGiAvG4rjwOJsNDfKxLEBcfC3HEXVbRQ2YUEhPRhJegKzIeMBLFMBFNJMRMEkRZdSHAUYTVkIjAHkEmCUQe0UPYXXwsLmSSySQzXXW/za1Hd3V3VVf1a+Z2z788MpnuW7e++/tq7r++7z5q2uL+o1J7dz/OQhyzp+u9qhlDZBqzaeE7diVtUyuWg4C0oldbvU2375xDLGY6/xfOeIpKI8SvRWnadJXYcnDX5N2uWJhbyeRd1HuSuwFhyJFe+/RllpTXccF2/X7BqKgjIsry0mbGgeE9U5/9zU3DK6+4vtVvr6j2KRFJ73n1puE3d73J6jhyEX3mXZE+mCyMICCTxdOToZ3qRUsp0UlszCaWnV46rJO4WGDGeZyjIEoKiQSItnifbiUmtbngLpJyF6WsrSWbDZbxY/uGbW/MZkW/ZJpb2sLqhCNwuV6a37VuMtxGaGNyAhCQ5KxQshUIDOz0395HZMj8v6XodF4bG3a44zL5Nw+VTodVu8sW7irrDsrsJsmBp1Rj18Ft83cRqV09Uruo94S6pT9U1GGzvE5y8CVhnnLVPlAP8WiFe78BbYCANAAqqgSB8SLQvv7ps7KWIxxn1hR1hM/wGjoogksQeYyXN5vvOhCQ5vMZLAYBUukq2zL6bOaL8qvlaxvnKMKqxGMOze+a9Hl+3G7RBCAguDtAoJkIDAx2pjh9GbP8cm5jxpz9lU/LjdixF+LRTPfEBNoKAZlA+Lg0CCQmkBMOXszEHYVRR92EQ5mzjWzqpoVddRufSdxGFGw6AhCQpnNZQoMHXuh8yxHtq0+ZkfnntEmUtemxAzbfe/eZR9yYsAYU04CASlVJy+iTTBepXYRrSleVDv4XtZDvobToqWTmlwaIYMIEEoCATCD8Rl76o79++d+mp80L1DWCg6uW5P0jttwyZtPdG888cnUjbUDd1RNQg+PSlkski7lqG/nk6ary6znCLXL2oP8KLThVrVrHAQKJCUBAEqNqnoLnPPDn1xzXmXFWCvvdSfB1rf5nWVv+dcSmX45YtOr+9xzpr0Vonoa2mqUDg51tlP4ks0pT0bHu+9CLo466pqvUHTJ0cCV7Dy3oqstOuq3mErSnPAEISAveId2/+stHjmhL/8QRkMA+TsEMRv7lSO4tYEt+eZ8lN+23BcRknO+JKQNPnU8keiTzee672cdFOFQrt5EtMN4xzv5upctBQFrJm15blIAcnkk5AhKMQnK/e6LC3iuVfJHxRUUyvTxs8aaszT/66XuOmrTvOmjkrXHIhh3vsW3qYaK5zLJDibubphqHdJX7ZIGUVSMdPEnqhoC0oKPP2/Snk6Zm0k9P894dHiUc7ltB3f/ljsD7v5W+2MwjIxZvGWO+yxqjex9832vr9t6HFkRftkmHrHv6I1KIOSzVViN8rENeRRvjKRwq6iDRg/Udk+3ua0x7ISCN4Trxtf7gv3cd3W4c1zUjTUe1q3dVK7Hw3O11WO5n/s6s7ndONKJeLe68T9z7zFUap+QBm7ePSflIVvKDD773dcibl/F0xx3bZ0g7db7a8JGZ5kpJh0rvPe3++9rV78kHyMPiyQpeGYyoY+L/LlvMAghIizk015w7/qvn4NbnA+r3Q1KCumZk6NhpJqVEQBSc/qhUOIJi43d0fteVT3e5ufoDNm8blfyITfwfY2PikYc/eHTN78NuZpd0fH/H2YLEh5np7yXzybnUlHq1hRNxMKlxjkjhiNCI0AV/yV8wtYVIqC1JsKq8mW8uDW2HgGjolLqZdMeLajFYbpvzjCHo2KkGndCRphkZwxGP3CB7cdThPSlHCUf+cyISbipG9WeW5D+M2vIZKcU2m/nxh97/+nvq1h7NKnrNnTvfZFl2F6tX9DLPYhJ/J1nNm3LF1f/pRB0eTyUetixIGnqtqnm33ACdXEryxYPhZB/2stLsxmkhcyAgLeTMkqa47yD33hpXuD7gEFPQ8Yem6ZhpKZpm5tNbfl4++CDsd37+hC6/Y/QTYPmpwn7Hme8g1blZKX5vs/2SzWK7Je0nyTR3PnzO6x9pJvRH//DZ01jSG7PEs4V6nzsLNYZxjJ9+UnGFLxrOZ87YhjfCxM5YUkiqyqMcunSjyvUcLtQhYu6njNGPRYHNdJc1n60QkObzWWUW3/FiPxEvzp1U0C+5vxwzNUWvn2o6P/1xd1ck3O+Ln6iTCkfwPHWOevp26vM6WJvpzzbzHy0pn5YkXrVt+SQZ9Iot0//52/Ne99fKGlp76WPufO50ImMGs90lhehkpllqPQYxneSnnnyBzbdNCYMrmMF0lbt+g3PCUWpd3ddzuJcQtJ4s0YetSGq/H1BDPAEISDyj5i6hXgvbJjaTDL7aNOLplojeoMRkikGvm5KitMpy+WPs/qvBRX5hm9uZqtyXu27BFYd89OFuu+F1roGUTn7el0qhqTOF09mWipWKXvgJdZN66bGH3WuoRJF7SDZ+aRdkhCwK/m5LUyXqzpaGVEaq9qizOwUJZ3xCveeDiU4MRlm+fdIpnxeHnIjkZk3l2+as3/DGOSzpxCMh2474t1IF6aqk4xwQjub+O21S6yEgTeq4isxWIpIW6n3ZsyJ2X82HBoGKX9Nu0tFTTDqizaTOjOrs82c74pCbBux35sUikB8byQuRKzpuF+uOwRREOAERcq8XrCPYmfuRkX++G+HkbMydG+jkPcXwO3ff2uBUWscWTwjcyMK1MVi3+s1PUzlRhySynGjD/Tz6qEA4fDUt2IgmWLNzoaGDb13ciIijor8GFK4jAQhIHWFqXZUjIqReSfqhUDsDK9YLv3d7xKmmoCPbTXptu0mHZ0xKqejEK1gqAsExkMC4iCMc+XGBwhSXP2YQLO8Lk79eJSAauYgmOsLJDVx74YoSDj/N5IQi3uI9NQkgJwjO2IUnbgXrNNwknC9QNhNZtpuiUv8uK8z+HOhw8KWflqlP2BaZQy+/ZM04ag3GOLT+i5sUxkFAJoWbA42844UeYurPvaI1RjgK8OSerpk60gYd3mbSjDaDDk0b1G64iSZ/LMAf5wimq5yOOfAQ7gtIcfrK/93t9/MRiDubqTjy8H53ivqiUCwq7u9OR1+QknJTbzlbczOlCiMndba6dtaLNFSKKo+tXMhRv3GO1ND/UmrPX4dTw6/+gMxDrx2+6SIs6Jxsf7sathcCoqFTGm6SikZScomzIjkwzTd/3aJOMSAcUbZNMYmmp10x6UwbdEjKINNZv5gf5ygcTwgTgkDHXZC6ykcl/qrtguilaHGeu5dUfvwiv1AvEGn4YqeGRkLGOZRIWCrKkNJJT+VSUwlYuIwqSFdF1GnsH6b0q39SwjEiDL6daPqXIRwN/+vABSogAAGpAFZLFr39hW4i7iYi9bMjKuKIbntAbIp0p90kajcFdaRNmmIKajOFIzBu5x+MEiLGOXIztvLjHH7ayc/yFKTEvG1BlK0F6apAKiqXrvLGOVRUkpXOYL2TjrK8KKMkJdUI4chlvvLgjLEDSjAo9eqfyRjdN0JtUzcQt18H4WjJv76mbxQEpOldWMcG3P77biIxh1iJib8AMSpFEy0cuSfwMNO83I9KgakalKCoY2rKIPWJ+qw9JUh9Wi5dlReggBB5s8GC4xz7bXd+mIogxrwwYlSFFsS53wvNjIq+cr19BPDq01XGgWFK7/4Lmfv2kPo3CTEi0+3rDW7rg3DU8f5GVXUnAAGpO9IWqXDghZlk2nPE/r3/lBp+9e/tQ4+YLtumlg4Wl+hLhOBEjrWEdMxFT/tqBX3aGWMJxgVuIQ5Me1LRhCsSUTaUE4GQcxJFHZULh5AWmft2k7n3FTJH9pDIjuZuGk63rTTMQ/uGv3bBpN4SpkX+ilq+GRCQlndxfRo49Qvfv8Sw9q+2ph9O9vTDyG6fTnLqoYHKywwmJx2oL6giJvKJvFyUEJSxr1hwEgmHJ0ah1RZ+KKRN5r4hMkb2kDkyRMaBkRKnsGlutdraLxy74aJn6uMx1AICjScAAWk845a4wiFfXLdKHDjw6WBj2DBJTjmErOkzSCpByUxxfs8/TidIf/mFYzvtKlJm/kBJpAei0lXlxCZeOIzRfWSM7neiC/XTOLAvMipiw/iLTLVdc2DpRWqKNQ4QaCoCEJCmctfEGDvl2h9eaR7Y943Sq4d3tPbUDpLpduJMO9nTOojNFMl2X1iq6bS9cxI87efFq4J0VaLIp1Q4hG2TGBshY2w/qcFv9W9z/3BAQMvYIMQ+NlNr9y+d97mJ8SquCgK1E4CA1M6wpWuYcu0P7zOyo+cWNrLcGEMUDiaZzjiiItumERspYvXTNEmm24jTbSEnNlg4Cvr38Dap9JMY3UdCSieaENYoieyYIxok7fDIIi6AMcSTdibz8bG+C59t6ZsHjWt5AhCQlndxdQ3MLL3v+NT+4ftI2n9T9EgdXmHZQXLvFOepO72JKPWQMOQfLDKeT5H8W6cfl/wmNlKzuS1zBlnZt5BKj2XUoL17qAF8JTbBQ4kOpzLuRwnSVcboiNfp5/t9w87usabNGFRVmKP7THPvK2n1b2N0nymssROIeFr4mHwVA/WC9nG6bdGBvo9uqM4rOAsE9CIAAdHLH1pY037DXfPEWPY7TucZdyQdIDfT30pR+/V7+84ru8vu9L77jrDk8AMk5Smll65mgLyoFiF2sjAe41TmJxlueyzMHmVDlkZuEpY1L675ue/LChirNN76tMFX7u27cNx3GU7cBhQEgQoJQEAqBNbqxduu33i3YVtqHYh3xOVjwojkz2Fh7BRmqmf/l7ofjWPXdv1d54ps9k5RIlzVpcxyLRBiH5mpn7CR+drotR96rpwdU5be9Q45Zq0T0o2MCo8qBEyIP7FpXjJ63QU/jWs/vgeBZiMAAWk2jzXI3rZl97+FsmP3Cmav46x9Wi4LY1PGTv1LXNShmtR2wz03Cyu7JFGn7RRKMi1XDLORXpORqa8msuH6uz9LtrVUEAWmkkVcK8GsMWmm1rbJsasRdTTopkW1E04AAjLhLph4AzI3PvgJYY+uUq9PL29N8mm5bJhfHvvCudfHtm7pLw5v5733s6TTkz3tl6sxYJ9p3DUqps+nL773/+JtuOvwjDRuFdL+SGzZnJ5Es2BBfxQifcnotd0/S1QfCoFAkxKAgDSp4+pldtuyB9YQ258qW19lq8iHrUz6AvvK990fZ6N54wMfSMmxHxFzeeFK8LSfu5YQf7TM9MX2NR+Mvb46J730p2cIttaRlMfH2et+Xz7yESL1vVHDvpq+eH68cCW7IEqBgLYEICDauqaxhrV9/cE3sxD3EvPx0Vt/JEkTeXaqooZ4VpAxd/Tz5+yMsz7z9fu/QZKvzJerYlZTcWdumD8eszMLE0UdSjyW3b9IWNkbSdD0snbEDgM5BfZyKnVN9upzV8a1Hd+DQKsQgIC0iicraEfm5s0fI9u+lYkDHWdRBUlnV/mdq2nelr3i7PmxZix/9LC0tfdnRHxG+agn/mm/8HzxmezVH0jWeS//+WHpMV5FLC+IFbDYqMOR35eEQednrz7vN7HtRwEQaCECEJAWcmaSpqRv3nIrsYzu6JMKR75/3ysMY/7Y5WfdGXf9VP+m94ms/WMSwhOuKmY1FUUdzPSSSKXOz15+dqLOO73s528jQeuI6QTX3toiHyHEA2Np40K6/P2vxLUf34NAqxGAgLSaRyPak+5/5HQWvI5YnhhapLJxDr+K3wmDerJL5jwehzG1/KFriPhr1XfaxRGS0/f/3DKtjyftvNPf/MUCZlZbstRJwIxl1lXnXBPXdnwPAq1KAALSqp4NtCv9rUcXMMlAxxn4sirhYBIk7szK1Gfo8neWf/Je/uhhKcO6g1i+PxJ1zCK8kvOc8uIa64qzliVyn0pZUfo7zPTRSAFLsJI9fy2xl5kutq/6hx8luj4KgUCLEoCAtKhjnWYtf/QwM218W5D8WOVRR1Bk8v9mwXuYjavk4neujkOXvuXXp7Hku4noDaGpooo67Vy26X8E84ezl5/1RNz11ffp5Q+dZguxziA+qWYBc15YJXaYQvQkvX4SG1EGBJqVAASkWT0XY3d65W9PtdlaL8I6zsrHOZyrsaAdpmHMyy56+2/jsBm3PPZ5wXJZHYVDXfIRW47MTZqyMvt/dQkJcRORDL64JKCG5VpRNDaifhX0MztjfoIWvfvVuPbjexCYDAQgIC3oZWPl41cJoq+XNK3KdJVbj/iBbUy5lBa9tXznuXL7DNMeuZ1I/mN41ONIUQT16PmybIil8tJ3/Wsid618eIYhjVtI8scrt6HIPt8kIb4qF7/7S4muj0IgMEkIQEBaydErt88QRvZ2QVTYedckHLRHEF9hLzpjTSyqbz9xiiGsjUR0TKl4VSgc+cWDewyRPt+69G2/jL2+KqBsIGsdEZ9cmQ0RwkG8RwrxSfrsu/490fVRCAQmEQEISIs4O/XdJ8+WggZKOu+q0lW5x+6npEE9tOC0J+MwGd/5zZUknBlOhUfsOEdYebczF0S/tg1rbuKU0arHPylY3nxw9Kc0ZZV0oL4wCNrORL106Tti2x/HB9+DQCsSgIC0gFeN1VvVnlOF6ZXaog6V7/+2XHDqZ2PxqKhHjG0QxIUvnapk+5GCh3/vREE3yEVvvzb2+qrAwGCnMWqvYKKLKhOwkHEOrwJBYoOcklpMvV27E9mAQiAwCQlAQJrZ6QMvdBKNqpTRmblm1CocRENEspc+dbKaPVX+uPWJUwwWqtyxBQWTPu37JwXFRtAeg/jD1sIzNsVd3vl+9eBsIlLvE5+V3IYCxSodkmF5OS08XUUyOEAABMoQgIA06+0xsHMOGazEoyNePKKftIt6z21E1E29J+yKxfLdrZcLIb9Zdadd0IfnUmbbmffPSZyyum1wCZEo7OgriXxKx+yHSNrdtPD0zbHtRwEQAAGVZsbRdATWP99PRIsrFo7QTjsXBqyg3hND3sdRRGdgsFOM8QYS4rz89Yue6EuARglY/nNh0C1y/mmfS+SLgcFOsgw1UP6h5AJWTkQd+7eRNLppYVe8eCYyEoVAoPUJQECayccDz8wkw1RRh5uuqT1dpWoZ8qKO+KfuNVu7hBQbSXgpq0qe9ks0JnfyEAvupfmnxqfMVB0qZSWEYnBcVQJWEnU4CzzWU0YswXhHM/0xwFYdCEBAdPBCEhtuf66bpFC5/o46CYe66haiA92JOs412y8TzMsLO+0ow6M2SSxSEUHbWRjddMmsF5IgoOKUVewMr4AdocLhXPUyWnCqiuhwgAAIVEgAAlIhsHEvrgbKjWxfLmWVdFpuQYcZukDvK9R7gqq3/KHSRba5nojmulFPSShRdH58uso5QYhv0SWz82m4cla4KauNRByYLFDOjnLCkTtviKTEeEec//E9CJQhAAHR+fYYeH42Gd4Mo6TCUdCvhgpH8pTVwI7ZJFmNNcyqm3ColBlTL82fnSxlddvgHCInZeVOFqhkhld01LGNMsacRJGXzvcHbAOBCSYAAZlgB0RefsPzPcTUT8z5WVYlhRM+7efO43uIRnsSdZxrt/cSOzOcOmrrtANGM2+jNKuUWbKB6tsG+4jEdfHCURSNRAuHO96x4JQeXd0Ou0CgmQhAQHTzlpuyUsIxL9q0SoXD6WAvo94T43P9Kl3EaVVuXkXCERP5GCRXyIu74md5qXqCKatKUmZlhcPZDFEN1qtxJBwgAAJ1IAABqQPEulXhpKx4I3FghlFB5QkHpwvPeZFIPfWftDXWTpWyYlpHkmdFb3hY9LQfIxwkhEqZ9dDFb1VpqPgjmLJKmq4KzdQVfDhEwphD87viGcRbiBIgAAIeAQiILreCmmVlU8S4QFXCoVqWPGX1vR0qrdNPVM+UGW0j006esloz2E8sFlcU+cRFHUQY79DlHocdLUcAAqKLSwee3UxC5GcZ5eyqKl2lnvr7kqes2ryUWeijfPj267FrQMQKuvjkZCmrVYMzyXQGypNHPvHCgfEOXe5t2NGyBCAguri2RECqEg4v6lCL4hJsR+LMslKzvFTKKuyoJvJxFiYmT1mtGfTWtySMfOLTVaohQyRoCcY7dLm5YUerEoCA6OLZgee6SagUVjWdtnPOloOjxH3Ue0L8inJVOjZlVY2AVZGykmJxohdMJRMO1TJsSaLLPQ07Wp4ABEQnFysRIV5CwttdNzZNxC8S0UYi0Z8o4lBtdWZZqZSVjJjlVZVwqDTXCrp4VrKUlbIhKzYnjnwSpaucTNsK+vSpyWzQye+wBQSalAAERFfHDTwzh4hnHxSHzlITeSuR2JpYNPwKcgsDi7Y+d76vJvJxzhsiEslTVuqUWwf7SJC7vqPkKLeKPGqMhrCqXNf7GHa1NAEISEu7N9C4simraqMOuY2yXPkOtqsHVZqtaMJAOeGIEDi3eVsoYyTbz2uy+BrtBIFxIgABGSfQE3YZf2EgU0jKqlrhcM5LnrIqbvzqwcB29FE2+CeVmRkm+Su08PT4/bwmDD4uDAKtTQAC0sr+jUxZleu0I9NEbpqL1QynClNWxYydMRDaVbDmJOk4hx91SKMH7+5o5ZsXbWsGAhCQZvBSNTaGpqyqHudwh0jUXlZ2FSmrMPud7Uq4nyR1F7xVMWwsJnc+r3c2d8QbA6u5I3AOCNSdAASk7kgnuMLIlFUN6Sp1qqghZRWHZNXgHDJpJrGcSYJnE3sTBwTvJhZbD27FspUy5uZEm0DGXQvfgwAI1I0ABKRuKDWoKDRlVaNwOIvyakxZaYAGJoAACNSfAASk/kwnpsaSlFVN4xxuG+qZspoYKrgqCIBAAwlAQBoId9yqXvv0OsrNsqphnMMRDc/qRqasxg0MLgQCINBIAhCQRtJtdN0Dz8wkaXubEAZ7f+/CsSvZfQOd2VX+gZRVo/2G+kGgRQhAQJrVkWt+103Ced1sR8kq8uqEAymrZr0XYDcITBABCMgEga/psmt3qO3XwzchTCQeBRGHawpSVjW5BCeDwGQkAAFpJq/nUlYh268nEg4vzVU4TIKUVTPdA7AVBDQiAAHRyBllTcmlrGRHQbnqhQMpq2bxPewEAU0JQEA0dUyBWU7KSi4OFw4vqohsR0i6CimrZvA6bAQB7QlAQHR2kZOysgKzrDxjE0UdEcLhLAyUPXRxl6oXBwiAAAhUTQDumGgOAAAGCElEQVQCUjW6Bp/opKzsdQX7RCUSDi8iCdsTEQsDG+w0VA8Ck4sABERHf699qp+Y8imrWoVDLfIQ6o2BXXhbn47+hk0g0KQEICA6OW5gcCbJVD5lVRBFxG6zHtESHiLBSFnp5GfYAgItQgACoosj12zvJiHyKatEUUfkOIf/7o76bb+uCyfYAQIgoA0BCIgOrgimrBIJR5lxDn9PEqSsdPAsbACBliYAAZlI9wZTVvUSDmeWFVJWE+lWXBsEJgsBCMhEedpPWTEFFgZWPc7htoIJKauJ8ieuCwKTkAAEZCKc7qesEkUdXqFQbQl8iJTVRHgS1wSBSU0AAjKe7vdTVkyz/JCh/OWjBskL1AQpq/H0Ia4FAiCQIwABGa+bQaWsyJ9lVS5VlWCA3LcZKavx8h6uAwIgEEIAAjIet4VKWUl/YWCN4xy+vUhZjYfncA0QAIEyBCAgjbw9VMrK9hcG1inqULOsJPfQAuxl1UjXoW4QAIF4AhCQeEbVlcilrNQbA8sdicY53AqQsqrOFzgLBECgIQQgII3AuuapfiKVsqpTusoRD15B87GXVSPchTpBAASqIwABqY5b+Fm5lFXIGwNzZ8RsP1JcM9MQMVJW9XQT6gIBEKgPAQhIfTgSOSkrKtx+vaTuCtJVSFnVyzOoBwRAoEEEICC1gh0Y7CTbVCmredFVVSwcKmeFlFWtvsH5IAACDSUAAakF7+ods8lwXvrkLQwszT9FD4OEjI84H/EQMWGWVS1+wbkgAALjQgACUi3m723vISYVeYTMskq4/Yh/bVc41H+2kU3dtLBrV7Vm4TwQAAEQGC8CEJBKScemrKpKVykrkLKq1BcoDwIgMKEEICCV4l+zfWt4yqpq4UDKqlIfoDwIgIAWBCAglbjBTVsNFJ5S8bRcP12lqtlCaeqm3q7dlZiBsiAAAiCgAwEISCVeuG37ZhJ0pntK1eMc6uQhIlpC87vUADwOEAABEGhKAhCQStx227Y+EuI6Rzzi3s8RrDc/SK7SVf2UoX5EHZWAR1kQAAEdCUBAKvWKEhGiHiI6riSVVVxXXjheJEH9lKJ1EI5KgaM8CICArgQgINV6ZvXgbBLGHCLZSaR+Bg5HOKQabN9KNm3GtNxqIeM8EAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCUBANHYOTAMBEAABnQlAQHT2DmwDARAAAY0JQEA0dg5MAwEQAAGdCUBAdPYObAMBEAABjQlAQDR2DkwDARAAAZ0JQEB09g5sAwEQAAGNCfw/oPIX2erPpNsAAAAASUVORK5CYII=", + "created": 1741608871970, + "lastRetrieved": 1741870557194 + } + } +} \ No newline at end of file diff --git a/docs/stable/images/bee-banner.jpg b/docs/stable/images/bee-banner.jpg new file mode 100644 index 000000000..17fe6affa Binary files /dev/null and b/docs/stable/images/bee-banner.jpg differ diff --git a/docs/stable/images/ui-example2.png b/docs/stable/images/ui-example2.png new file mode 100644 index 000000000..db50a0a08 Binary files /dev/null and b/docs/stable/images/ui-example2.png differ diff --git a/docs/stable/introduction/quickstart.mdx b/docs/stable/introduction/quickstart.mdx new file mode 100644 index 000000000..1462293c4 --- /dev/null +++ b/docs/stable/introduction/quickstart.mdx @@ -0,0 +1,171 @@ +--- +title: "Quickstart" +description: "Get a running instance in minutes and explore pre-built agents" +--- + +## Prerequisites + +- Basic terminal/command line experience +- LLM API key (OpenAI, Anthropic, Gemini, watsonx, or free via [OpenRouter](https://openrouter.ai/) / [Groq](https://groq.com/)) or [Ollama](https://ollama.com/) (requires a powerful computer) +- Fast internet (2–3GB download) + +## Installation + + + + +## One-Line Install + +Open the terminal and run this command to install Agent Stack: + +```bash +sh -c "$(curl -LsSf https://raw.githubusercontent.com/i-am-bee/agentstack/release/install.sh)" +``` + +This interactive script installs Agent Stack CLI, downloads and starts the platform, prompts you to configure your LLM API key, then launches the web interface. + + + + + + +Follow the [installation instructions](https://docs.astral.sh/uv/getting-started/installation/). + + + + +Install QEMU via package manager ([instructions](https://www.qemu.org/download/)). + + + + +Open a new terminal and run: + +```bash +uv tool install --refresh --quiet --force agentstack-cli && agentstack self install +``` + +Follow the interactive prompts to finish installation and setup. + + + + + + + + + +Windows support is experimental and requires Windows 11 22H2 or later. For the best experience, we recommend Linux or macOS. + + + + + +Open PowerShell as Administrator and run: + +```powershell +wsl.exe --update +``` + +Follow the instructions displayed, if any, and continue by running: + +```powershell +wsl.exe --install +``` + +The command output may instruct you to restart your PC. Do so, and afterwards run the same command again. It may take up to two restarts to complete the installation process. + +WSL is properly set up once you reach a working Linux shell. You can verify this by running `wsl.exe` without arguments. Once you have a working shell, you can close it and proceed to the next step. + + + +Even though WSL2 is required, Agent Stack CLI can't be installed _inside_ WSL2. All subsequent commands should be run from Windows PowerShell. + + + + + + +Open PowerShell and run: + + + + +```bash +powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" +``` + + + + +```bash +winget install astral-sh.uv +uv tool update-shell +``` + + + + +```bash +scoop install uv +uv tool update-shell +``` + + + + +```bash +choco install uv +uv tool update-shell +``` + + + + +Close and reopen your terminal after installation. + + + + +```bash +uv tool install --force --python-preference=only-managed --python=3.13 agentstack-cli; agentstack self install +``` + +Follow the interactive prompts to finish the installation and setup. + + + + + + +## Usage + +```sh +agentstack ui # Launch web interface +agentstack list # See what agents are available +agentstack run chat "Hi!" # Send a message to chat agent +agentstack run chat # Try interactive mode +agentstack info chat # View agent details +agentstack --help # See all options +``` + +## Platform Management + +```sh +agentstack platform start # Start platform +agentstack model setup # Configure LLM provider +agentstack platform stop # Stop (keeps data) +agentstack platform delete # Delete all data +``` + +## Version & Upgrades + + +If you are having issues with these commands, you may be running an old version of Agent Stack. Run the one-line install script above to upgrade to the latest version. + + +```sh +agentstack self version # Check version +agentstack self upgrade # Update +agentstack self uninstall # Remove completely +``` diff --git a/docs/stable/introduction/welcome.mdx b/docs/stable/introduction/welcome.mdx new file mode 100644 index 000000000..5a63fd92c --- /dev/null +++ b/docs/stable/introduction/welcome.mdx @@ -0,0 +1,53 @@ +--- +title: Welcome +description: "Open infrastructure for deploying agents from any framework" +--- + +Agent Stack is open, self-hostable infrastructure for deploying AI agents built with any framework. Hosted by the Linux Foundation and built on the [Agent2Agent Protocol (A2A)](https://a2a-protocol.org/), it gives you everything needed to move agents from local development to shared production environments—without vendor lock-in. + +## What Agent Stack Provides + +Everything you need to deploy and operate agents in production: + +- **Self-hostable server** to run your agents +- **Web UI** for testing and sharing deployed agents +- **CLI** for deploying and managing agents +- **Runtime services** your agents can access: + - **LLM Service** — Switch between 15+ providers (Anthropic, OpenAI, watsonx.ai, Ollama) without code changes + - **Embeddings & vector search** for RAG and semantic search + - **File storage** — S3-compatible uploads/downloads + - **Document text extraction** via [Docling](https://www.docling.ai/) + - **External integrations** via MCP protocol (APIs, Slack, Google Drive, etc.) with OAuth + - **Secrets management** for API keys and credentials +- **SDK (`agentstack-sdk`)** for standardized A2A service requests +- **HELM chart** for Kubernetes deployments with customizable storage, databases, and auth + +Build your agent using LangGraph, CrewAI, or your own framework—SDK handles runtime service requests automatically. + +## How It Works + +1. **Build** your agent using `agentstack-sdk` +2. **Deploy** with a single CLI command +3. **Users interact** through the auto-generated web UI + +Your agents request infrastructure services at runtime through A2A protocol extensions. + +- **Development:** Run locally with full services for rapid iteration +- **Production:** Deploy to Kubernetes via HELM and integrate with your infrastructure + +## Get Started + + + + Get up and running in one command + + + Deploy your existing agents to Agent Stack + + + Build your first agent with the SDK + + + Deploy Agent Stack to Kubernetes for your team + + \ No newline at end of file diff --git a/docs/stable/logo/beeai-lockup-white.svg b/docs/stable/logo/beeai-lockup-white.svg new file mode 100644 index 000000000..bb813dfaa --- /dev/null +++ b/docs/stable/logo/beeai-lockup-white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/stable/logo/beeai-lockup.svg b/docs/stable/logo/beeai-lockup.svg new file mode 100644 index 000000000..d768b6204 --- /dev/null +++ b/docs/stable/logo/beeai-lockup.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/logo/beeai_framework_dark.svg b/docs/stable/logo/beeai_framework_dark.svg new file mode 100644 index 000000000..2741fec08 --- /dev/null +++ b/docs/stable/logo/beeai_framework_dark.svg @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/logo/beeai_framework_light.svg b/docs/stable/logo/beeai_framework_light.svg new file mode 100644 index 000000000..958b5269e --- /dev/null +++ b/docs/stable/logo/beeai_framework_light.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/reference/cli-reference.mdx b/docs/stable/reference/cli-reference.mdx new file mode 100644 index 000000000..565a2ba16 --- /dev/null +++ b/docs/stable/reference/cli-reference.mdx @@ -0,0 +1,369 @@ +--- +title: "CLI Reference" +description: "Complete reference for the Agent Stack command-line interface" +--- + +The Agent Stack command-line interface (CLI) provides tools to manage agents, configure models, and control the platform directly from your terminal. + +## Basic Usage + +```bash +agentstack [COMMAND] [OPTIONS] [ARGUMENTS] +``` + +To see all available commands: + +```bash +agentstack --help +``` + +## Agent Commands + +### list + +List all available agents: + +```bash +agentstack list +``` + +Shows agent names, statuses, descriptions, locations, and any configuration issues. + +### run + +Run an agent with specified input: + +```bash +agentstack run [input] +``` + +**Examples:** + +```bash +# Interactive mode +agentstack run chat + +# Direct input +agentstack run chat "Hello!" + +# Input from file +agentstack run chat < input.txt + +# Output to file +agentstack run chat "Summarize this" > output.txt + +# Append to file +agentstack run chat "More analysis" >> output.txt + +# Pipe between agents +agentstack run agent1 "input" | agentstack run agent2 +``` + +### info + +Get detailed information about a specific agent: + +```bash +agentstack info +``` + +Shows description, input/output schemas, examples, and configuration options. + +### add + +Add an agent to your environment: + +```bash +agentstack add +``` + +**Location formats:** +```bash +# From Docker image +agentstack add ghcr.io/i-am-bee/agentstack/agents/chat:latest + +# From GitHub repository +agentstack add "https://github.com/user/repo.git" + +# From GitHub with version tag +agentstack add "https://github.com/user/repo.git@v1.0.0" + +# From GitHub with specific path +agentstack add "https://github.com/user/repo.git#path=/agents/my-agent" + +# From GitHub with specific dockerfile location within the repository +agentstack add --dockerfile /my-agent/Dockerfile "https://github.com/user/repo.git#path=/agents/my-agent" +``` + +### remove + +Remove an agent from your environment: + +```bash +agentstack remove +``` + +### update + +Update an existing agent to a new version: + +```bash +agentstack update +``` + +**Location formats:** +```bash +# From Docker image +agentstack update my-agent ghcr.io/i-am-bee/agentstack/agents/chat:latest + +# From GitHub repository +agentstack update my-agent "https://github.com/user/repo.git" + +# From GitHub with version tag +agentstack update my-agent "https://github.com/user/repo.git@v1.0.0" + +# From GitHub with specific path +agentstack update my-agent "https://github.com/user/repo.git#path=/agents/my-agent" + +# From GitHub with specific dockerfile location within the repository +agentstack update my-agent --dockerfile /my-agent/Dockerfile "https://github.com/user/repo.git#path=/agents/my-agent" +``` + +### logs + +View and stream logs for an agent: + +```bash +agentstack logs +``` + +Displays the agent's log output and continues streaming new logs as they are generated. + +### build + +Build an agent from a GitHub repository: + +This command clones the repository and builds the agent image on the platform. You will get an image ID +which you can use to add the agent to your environment. + +``` +agentstack build +``` + +**Examples:** +```bash +# Build from GitHub repository +agentstack build https://github.com/org/repo.git + +# Build specific version +agentstack build https://github.com/org/repo.git@release-0.0.1 + +# Specify Dockerfile location (useful for monorepos) +agentstack build --dockerfile=./path/to/Dockerfile https://github.com/org/repo.git@release-0.0.1 + +# Specify docker context location +agentstack build https://github.com/org/repo.git@release-0.0.1#path=/path/to/context +``` + +**Arguments:** +- `github-repository-url`: GitHub repository URL + +**Options:** +- `--dockerfile `: Use custom dockerfile path + +### Client-side-build +Build an agent from a local directory. This is useful for CI/CD, multi-platform image build +or local push of agent image to registry. + + + Requires Docker running + + +```bash +agentstack client-side-build [OPTIONS] +``` + + +**Examples:** +```bash +# Build from current directory +agentstack client-side-build . + +# Build with custom tag +agentstack client-side-build . --tag my-agent:latest + +# Build without importing to platform +agentstack client-side-build . --no-import + +# Build with custom dockerfile +agentstack client-side-build . --dockerfile ./custom.Dockerfile +``` + +**Options:** + +| Option | Description | +|------------------------|-------------------------------------------------------------------------------------------| +| `--dockerfile ` | Use a custom Dockerfile path. Path should be relative to the repo root or a subdirectory. | +| `--tag ` | Docker tag for the agent | +| `--multi-platform` | Build for multiple platforms | +| `--push` | Push the image to the target registry | +| `--import/--no-import` | Import the image into Agent Stack (default: true) | + +## Model Commands + +### model setup + +Interactive setup for LLM provider configuration: + +```bash +agentstack model setup +``` + +Guides you through configuring your LLM provider with the required API keys and settings. + +## Platform Commands + +### start + +Starts the platform: + +```bash +agentstack platform start +``` + +### stop + +Stops the platform: + +```bash +agentstack platform stop +``` + +Stops the platform but keeps all data intact. + +### delete + +Deletes the platform and all data: + +```bash +agentstack platform delete +``` + + + This removes all agents, configurations, and data permanently. + + +## MCP Commands + +### mcp + +Manage MCP servers and toolkits: +```bash +agentstack mcp [subcommand] +``` + +Use `agentstack mcp --help` for available subcommands. + +## Server Commands + +### server + +Manage Agent Stack servers and authentication: + +### login + +Login to a server or switch between logged in servers: + +```bash +agentstack server login +``` + +### logout + +Logout from a server: + +```bash +agentstack server logout +``` + +### show + +Show current server information: + +```bash +agentstack server show +``` + +### list + +List all configured servers: + +```bash +agentstack server list +``` + +## Environment Commands + +### env + +Manage environment variables: + +```bash +agentstack env [subcommand] +``` + +Use `agentstack env --help` for available subcommands. + +## System Commands + +### ui + +Launch the web interface: + +```bash +agentstack ui +``` + +Opens Agent Stack's graphical interface in your default browser. + +### version + +Display version information: + +```bash +agentstack version +``` + +### install + +Install Agent Stack CLI and platform: + +```bash +agentstack self install +``` + +### upgrade + +Upgrade Agent Stack CLI and platform to the latest version: + +```bash +agentstack self upgrade +``` + +### uninstall + +Uninstall Agent Stack CLI and platform: + +```bash +agentstack self uninstall +``` + +Removes the CLI, stops the platform, and deletes all data. + +## Global Options + +Most commands support these options: + +- `--help`: Show help information for a command +- `-v, --verbose`: Show verbose output +- `--debug`: Enable debug output for troubleshooting (set via environment) \ No newline at end of file diff --git a/tasks.toml b/tasks.toml index 4a98589ea..edbf14e5f 100644 --- a/tasks.toml +++ b/tasks.toml @@ -406,16 +406,36 @@ if gum confirm "Publish version $publish_version now?"; then ) # Backport install.sh version updates to main branch - echo "Backporting install.sh version updates to main branch..." ( set -eux -o pipefail git checkout main git pull origin main + echo "Backporting install.sh version updates to main branch..." {{ mise_bin }} release:_update-install-sh --version="$publish_version" - git add install.sh git commit -m "chore: update install.sh version to v$publish_version" + + echo "Syncing release docs to stable..." + + # Port docs from release to main branch + rm -rf docs/stable docs/development + git restore release docs/development + mv docs/development docs/stable + git restore main docs/development + + # Port docs.json structure + git show release:docs/docs.json | \ + jq '(.navigation.versions[] | select(.version == "development")) | .version = "stable" | (.groups[].pages[] |= sub("development/"; "stable/"))' \ + > .new-stable.json + jq --slurpfile new_stable .new-stable.json \ + '.navigation.versions = $new_stable + (.navigation.versions | map(select(.version != "stable")))' \ + docs/docs.json > docs.json.tmp && mv docs.json.tmp docs/docs.json + rm .new-stable.json + + git add docs/stable docs/docs.json + git commit -m "docs: publish stable docs for v$publish_version" + git push origin main git checkout release )