|
47 | 47 |
|
48 | 48 |
|
49 | 49 | class Agent(LlmAgent): |
50 | | - """LLM-based Agent with Volcengine capabilities.""" |
| 50 | + """LLM-based Agent with Volcengine capabilities. |
| 51 | +
|
| 52 | + This class represents an intelligent agent powered by LLMs (Large Language Models), |
| 53 | + integrated with Volcengine's AI framework. It supports memory modules, sub-agents, |
| 54 | + tracers, knowledge bases, and other advanced features for A2A (Agent-to-Agent) |
| 55 | + or user-facing scenarios. |
| 56 | +
|
| 57 | + Attributes: |
| 58 | + name (str): The name of the agent. |
| 59 | + description (str): A description of the agent, useful in A2A scenarios. |
| 60 | + instruction (Union[str, InstructionProvider]): The instruction or instruction provider. |
| 61 | + model_name (str): Name of the model used by the agent. |
| 62 | + model_provider (str): Provider of the model (e.g., openai). |
| 63 | + model_api_base (str): The base URL of the model API. |
| 64 | + model_api_key (str): The API key for accessing the model. |
| 65 | + model_extra_config (dict): Extra configurations to include in model requests. |
| 66 | + tools (list[ToolUnion]): Tools available to the agent. |
| 67 | + sub_agents (list[BaseAgent]): Sub-agents managed by this agent. |
| 68 | + knowledgebase (Optional[KnowledgeBase]): Knowledge base attached to the agent. |
| 69 | + short_term_memory (Optional[ShortTermMemory]): Session-based memory for temporary context. |
| 70 | + long_term_memory (Optional[LongTermMemory]): Cross-session memory for persistent user context. |
| 71 | + tracers (list[BaseTracer]): List of tracers used for telemetry and monitoring. |
| 72 | +
|
| 73 | + Notes: |
| 74 | + Before creating your agent, you should get the API Key for your model. |
| 75 | +
|
| 76 | + Examples: |
| 77 | + ### Simple agent |
| 78 | +
|
| 79 | + Create a simplest agent without any extra settings. All agent attributes are come from environment variables and default values. Like: |
| 80 | +
|
| 81 | + ```python |
| 82 | + import asyncio |
| 83 | +
|
| 84 | + from veadk import Agent, Runner |
| 85 | +
|
| 86 | + root_agent = Agent() |
| 87 | +
|
| 88 | + runner = Runner(agent=root_agent) |
| 89 | +
|
| 90 | + response = asyncio.run(runner.run("hello")) |
| 91 | + print(response) |
| 92 | + ``` |
| 93 | +
|
| 94 | + You can set some agent metadata attributes by the following code: |
| 95 | +
|
| 96 | + ```python |
| 97 | + from veadk import Agent |
| 98 | +
|
| 99 | + from veadk import Agent, Runner |
| 100 | +
|
| 101 | + root_agent = Agent( |
| 102 | + name="meeting_assistant", |
| 103 | + description="An assistant that helps user to make meetings.", |
| 104 | + # system prompt |
| 105 | + instruction="First learn about user's meeting time, location, and other key informations, and give out a meeting plan.", |
| 106 | + ) |
| 107 | + ``` |
| 108 | +
|
| 109 | + Or, once you want to use your local-serving model or models from other provider, you can specify some model-related configurations in initiation arguments: |
| 110 | +
|
| 111 | + ```python |
| 112 | + agent = Agent(model_name="", model_api_key="", model_api_base="") |
| 113 | + ``` |
| 114 | +
|
| 115 | + Besides, you can specify some extra options by ARK requirements, such as: |
| 116 | +
|
| 117 | + ```python |
| 118 | + # disable thinking |
| 119 | + model_extra_config = {} |
| 120 | + ``` |
| 121 | +
|
| 122 | + In some systems, mulitple-agent based design is necessary, you can implement a multiple-agent system by `sub_agent` argument: |
| 123 | +
|
| 124 | + ```python |
| 125 | + from veadk import Agent |
| 126 | + ``` |
| 127 | +
|
| 128 | + """ |
51 | 129 |
|
52 | 130 | model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow") |
53 | | - """The model config""" |
54 | 131 |
|
55 | 132 | name: str = DEFAULT_AGENT_NAME |
56 | | - """The name of the agent.""" |
57 | | - |
58 | 133 | description: str = DEFAULT_DESCRIPTION |
59 | | - """The description of the agent. This will be helpful in A2A scenario.""" |
60 | | - |
61 | 134 | instruction: Union[str, InstructionProvider] = DEFAULT_INSTRUCTION |
62 | | - """The instruction for the agent.""" |
63 | 135 |
|
64 | 136 | model_name: str = Field(default_factory=lambda: settings.model.name) |
65 | | - """The name of the model for agent running.""" |
66 | | - |
67 | 137 | model_provider: str = Field(default_factory=lambda: settings.model.provider) |
68 | | - """The provider of the model for agent running.""" |
69 | | - |
70 | 138 | model_api_base: str = Field(default_factory=lambda: settings.model.api_base) |
71 | | - """The api base of the model for agent running.""" |
72 | | - |
73 | 139 | model_api_key: str = Field(default_factory=lambda: settings.model.api_key) |
74 | | - """The api key of the model for agent running.""" |
75 | | - |
76 | 140 | model_extra_config: dict = Field(default_factory=dict) |
77 | | - """The extra config to include in the model requests.""" |
78 | 141 |
|
79 | 142 | tools: list[ToolUnion] = [] |
80 | | - """The tools provided to agent.""" |
81 | 143 |
|
82 | 144 | sub_agents: list[BaseAgent] = Field(default_factory=list, exclude=True) |
83 | | - """The sub agents provided to agent.""" |
84 | 145 |
|
85 | 146 | knowledgebase: Optional[KnowledgeBase] = None |
86 | | - """The knowledgebase provided to agent.""" |
87 | 147 |
|
88 | 148 | short_term_memory: Optional[ShortTermMemory] = None |
89 | | - """The short term memory provided to agent.""" |
90 | | - |
91 | 149 | long_term_memory: Optional[LongTermMemory] = None |
92 | | - """The long term memory provided to agent. |
93 | | -
|
94 | | - In VeADK, the `long_term_memory` refers to cross-session memory under the same user. |
95 | | - """ |
96 | 150 |
|
97 | 151 | tracers: list[BaseTracer] = [] |
98 | | - """The tracers provided to agent.""" |
99 | 152 |
|
100 | 153 | def model_post_init(self, __context: Any) -> None: |
101 | 154 | super().model_post_init(None) # for sub_agents init |
|
0 commit comments