Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 56 additions & 1 deletion veadk/a2a/remote_ve_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,62 @@ def _convert_agent_card_dict_to_obj(agent_card_dict: dict) -> AgentCard:


class RemoteVeAgent(RemoteA2aAgent):
"""Connect to remote agent on VeFaaS platform."""
"""Connect to a remote agent on the VeFaaS platform.

This class provides an interface to remotely connect with an agent deployed on the VeFaaS platform. It automatically fetches the agent card (metadata) and configures an HTTP client for secure communication. Authentication can be handled either via a bearer token in the HTTP header or via a query string parameter.

The class extends `RemoteA2aAgent` to provide compatibility with the A2A (Agent-to-Agent) communication layer.

This constructor connects to a remote VeFaaS agent endpoint, retrieves its metadata (`agent_card`), and sets up an asynchronous HTTP client (`httpx.AsyncClient`) for subsequent communication. Depending on the provided authentication parameters, it supports three connection modes:
- **No authentication:** Directly fetches the agent card.
- **Header authentication:** Sends a bearer token in the `Authorization` header.
- **Query string authentication:** Appends the token to the URL query.

Attributes:
name (str):
A unique name identifying this remote agent instance.
url (str):
The base URL of the remote agent on the VeFaaS platform.
auth_token (str | None):
Optional authentication token used for secure access.
If not provided, the agent will be accessed without authentication.
auth_method (Literal["header", "querystring"] | None):
The method of attaching the authentication token.
- `"header"`: Token is passed via HTTP `Authorization` header.
- `"querystring"`: Token is passed as a query parameter.
- `None`: No authentication used.

Raises:
ValueError:
If an unsupported `auth_method` is provided when `auth_token` is set.
requests.RequestException:
If fetching the agent card from the remote URL fails.

Examples:
```python
# Example 1: No authentication
agent = RemoteVeAgent(
name="public_agent",
url="https://vefaas.example.com/agents/public"
)

# Example 2: Using Bearer token in header
agent = RemoteVeAgent(
name="secured_agent",
url="https://vefaas.example.com/agents/secure",
auth_token="my_secret_token",
auth_method="header"
)

# Example 3: Using token in query string
agent = RemoteVeAgent(
name="query_agent",
url="https://vefaas.example.com/agents/query",
auth_token="my_secret_token",
auth_method="querystring"
)
```
"""

def __init__(
self,
Expand Down
105 changes: 79 additions & 26 deletions veadk/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,55 +47,108 @@


class Agent(LlmAgent):
"""LLM-based Agent with Volcengine capabilities."""
"""LLM-based Agent with Volcengine capabilities.

This class represents an intelligent agent powered by LLMs (Large Language Models),
integrated with Volcengine's AI framework. It supports memory modules, sub-agents,
tracers, knowledge bases, and other advanced features for A2A (Agent-to-Agent)
or user-facing scenarios.

Attributes:
name (str): The name of the agent.
description (str): A description of the agent, useful in A2A scenarios.
instruction (Union[str, InstructionProvider]): The instruction or instruction provider.
model_name (str): Name of the model used by the agent.
model_provider (str): Provider of the model (e.g., openai).
model_api_base (str): The base URL of the model API.
model_api_key (str): The API key for accessing the model.
model_extra_config (dict): Extra configurations to include in model requests.
tools (list[ToolUnion]): Tools available to the agent.
sub_agents (list[BaseAgent]): Sub-agents managed by this agent.
knowledgebase (Optional[KnowledgeBase]): Knowledge base attached to the agent.
short_term_memory (Optional[ShortTermMemory]): Session-based memory for temporary context.
long_term_memory (Optional[LongTermMemory]): Cross-session memory for persistent user context.
tracers (list[BaseTracer]): List of tracers used for telemetry and monitoring.

Notes:
Before creating your agent, you should get the API Key for your model.

Examples:
### Simple agent

Create a simplest agent without any extra settings. All agent attributes are come from environment variables and default values. Like:

```python
import asyncio

from veadk import Agent, Runner

root_agent = Agent()

runner = Runner(agent=root_agent)

response = asyncio.run(runner.run("hello"))
print(response)
```

You can set some agent metadata attributes by the following code:

```python
from veadk import Agent

from veadk import Agent, Runner

root_agent = Agent(
name="meeting_assistant",
description="An assistant that helps user to make meetings.",
# system prompt
instruction="First learn about user's meeting time, location, and other key informations, and give out a meeting plan.",
)
```

Or, once you want to use your local-serving model or models from other provider, you can specify some model-related configurations in initiation arguments:

```python
agent = Agent(model_name="", model_api_key="", model_api_base="")
```

Besides, you can specify some extra options by ARK requirements, such as:

```python
# disable thinking
model_extra_config = {}
```

In some systems, mulitple-agent based design is necessary, you can implement a multiple-agent system by `sub_agent` argument:

```python
from veadk import Agent
```

"""

model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
"""The model config"""

name: str = DEFAULT_AGENT_NAME
"""The name of the agent."""

description: str = DEFAULT_DESCRIPTION
"""The description of the agent. This will be helpful in A2A scenario."""

instruction: Union[str, InstructionProvider] = DEFAULT_INSTRUCTION
"""The instruction for the agent."""

model_name: str = Field(default_factory=lambda: settings.model.name)
"""The name of the model for agent running."""

model_provider: str = Field(default_factory=lambda: settings.model.provider)
"""The provider of the model for agent running."""

model_api_base: str = Field(default_factory=lambda: settings.model.api_base)
"""The api base of the model for agent running."""

model_api_key: str = Field(default_factory=lambda: settings.model.api_key)
"""The api key of the model for agent running."""

model_extra_config: dict = Field(default_factory=dict)
"""The extra config to include in the model requests."""

tools: list[ToolUnion] = []
"""The tools provided to agent."""

sub_agents: list[BaseAgent] = Field(default_factory=list, exclude=True)
"""The sub agents provided to agent."""

knowledgebase: Optional[KnowledgeBase] = None
"""The knowledgebase provided to agent."""

short_term_memory: Optional[ShortTermMemory] = None
"""The short term memory provided to agent."""

long_term_memory: Optional[LongTermMemory] = None
"""The long term memory provided to agent.

In VeADK, the `long_term_memory` refers to cross-session memory under the same user.
"""

tracers: list[BaseTracer] = []
"""The tracers provided to agent."""

def model_post_init(self, __context: Any) -> None:
super().model_post_init(None) # for sub_agents init
Expand Down
31 changes: 22 additions & 9 deletions veadk/agents/loop_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,38 @@


class LoopAgent(GoogleADKLoopAgent):
"""Loop Agent with Volcengine capabilities."""
"""Loop Agent with several sub agents.

This agent is capable of looping through and executing all sub-agents sequentially
or based on specific conditions. It is designed to operate in environments where
multiple agents need to work together in a looped execution flow, such as handling
complex, multi-step tasks or workflows. The agent integrates Volcengine’s AI
capabilities and supports a variety of tools and tracers for enhanced functionality
and performance monitoring.

Attributes:
model_config (ConfigDict): Configuration dictionary for the model.
name (str): The name of the agent, default is "veLoopAgent".
description (str): A description of the agent, helpful in A2A scenarios.
instruction (str): Instructions or principles for function calling and agent execution.
sub_agents (list[BaseAgent]): A list of sub-agents managed by the loop agent. Each sub-agent
is executed in a looped sequence based on the agent's logic.
tracers (list[BaseTracer]): A list of tracers used for monitoring the agent's performance
and behavior during execution.

Examples:

"""

model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
"""The model config"""

name: str = "veLoopAgent"
"""The name of the agent."""

description: str = DEFAULT_DESCRIPTION
"""The description of the agent. This will be helpful in A2A scenario."""

instruction: str = DEFAULT_INSTRUCTION
"""The instruction for the agent, such as principles of function calling."""

sub_agents: list[BaseAgent] = Field(default_factory=list, exclude=True)
"""The sub agents provided to agent."""

tracers: list[BaseTracer] = []
"""The tracers provided to agent."""

def model_post_init(self, __context: Any) -> None:
super().model_post_init(None) # for sub_agents init
Expand Down
30 changes: 21 additions & 9 deletions veadk/agents/parallel_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,37 @@


class ParallelAgent(GoogleADKParallelAgent):
"""LLM-based Agent with Volcengine capabilities."""
"""LLM-based Agent that can execute sub-agents in parallel.

This agent is capable of executing multiple sub-agents concurrently, making it suitable
for scenarios that require parallel execution of multiple tasks or operations. By leveraging
parallelism, the agent can handle more complex workflows and improve efficiency by performing
independent tasks simultaneously. This design is ideal for scenarios where tasks are independent
and can benefit from reduced execution time.

Attributes:
model_config (ConfigDict): Configuration dictionary for the model.
name (str): The name of the agent, default is "veParallelAgent".
description (str): A description of the agent, useful in A2A scenarios.
instruction (str): Instructions or principles for function calling and agent execution.
sub_agents (list[BaseAgent]): A list of sub-agents managed by the parallel agent.
Each sub-agent is executed concurrently.
tracers (list[BaseTracer]): A list of tracers used for monitoring the agent's performance
and behavior during parallel execution.

Examples:

"""

model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
"""The model config"""

name: str = "veParallelAgent"
"""The name of the agent."""

description: str = DEFAULT_DESCRIPTION
"""The description of the agent. This will be helpful in A2A scenario."""

instruction: str = DEFAULT_INSTRUCTION
"""The instruction for the agent, such as principles of function calling."""

sub_agents: list[BaseAgent] = Field(default_factory=list, exclude=True)
"""The sub agents provided to agent."""

tracers: list[BaseTracer] = []
"""The tracers provided to agent."""

def model_post_init(self, __context: Any) -> None:
super().model_post_init(None) # for sub_agents init
Expand Down
27 changes: 18 additions & 9 deletions veadk/agents/sequential_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,34 @@


class SequentialAgent(GoogleADKSequentialAgent):
"""Sequential Agent with Volcengine capabilities."""
"""Sequential Agent that executes sub-agents sequentially.

This agent is designed to execute multiple sub-agents in a predefined sequential order.
It ensures that each sub-agent is executed one after the other, making it suitable for
workflows where the output of one sub-agent is needed as input for the next. The agent
is well-suited for tasks that require a linear progression of steps or operations, ensuring
that the execution flow is maintained.

Attributes:
model_config (ConfigDict): Configuration dictionary for the model.
name (str): The name of the agent, default is "veSequentialAgent".
description (str): A description of the agent, useful in A2A scenarios.
instruction (str): Instructions or principles for function calling and agent execution.
sub_agents (list[BaseAgent]): A list of sub-agents managed by the sequential agent.
Each sub-agent is executed in the order they are listed.
tracers (list[BaseTracer]): A list of tracers used for monitoring the agent's performance
and behavior during sequential execution.
"""

model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
"""The model config"""

name: str = "veSequentialAgent"
"""The name of the agent."""

description: str = DEFAULT_DESCRIPTION
"""The description of the agent. This will be helpful in A2A scenario."""

instruction: str = DEFAULT_INSTRUCTION
"""The instruction for the agent, such as principles of function calling."""

sub_agents: list[BaseAgent] = Field(default_factory=list, exclude=True)
"""The sub agents provided to agent."""

tracers: list[BaseTracer] = []
"""The tracers provided to agent."""

def model_post_init(self, __context: Any) -> None:
super().model_post_init(None) # for sub_agents init
Expand Down
15 changes: 14 additions & 1 deletion veadk/knowledgebase/backends/base_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,21 @@


class BaseKnowledgebaseBackend(ABC, BaseModel):
"""Base backend for knowledgebase.

Attributes:
index (str): Index or collection name of the vector storage.

Examples:
You can implement your own knowledgebase backend.

```python
class CustomKnowledgebaseBackend(BaseKnowledgebaseBackend):
pass
```
"""

index: str
"""Index or collection name of the vector storage."""

@abstractmethod
def precheck_index_naming(self) -> None:
Expand Down
11 changes: 10 additions & 1 deletion veadk/knowledgebase/backends/in_memory_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,19 @@


class InMemoryKnowledgeBackend(BaseKnowledgebaseBackend):
"""A in-memory implementation backend for knowledgebase.

In-memory backend stores embedded text in a vector storage from Llama-index.

Attributes:
embedding_config (EmbeddingModelConfig):
Embedding config for text embedding and search.
Embedding config contains embedding model name and the corresponding dim.
"""

embedding_config: NormalEmbeddingModelConfig | EmbeddingModelConfig = Field(
default_factory=EmbeddingModelConfig
)
"""Embedding model configs"""

def model_post_init(self, __context: Any) -> None:
self._embed_model = OpenAILikeEmbedding(
Expand Down
Loading