-
Notifications
You must be signed in to change notification settings - Fork 172
Expand file tree
/
Copy pathconfig.yaml.example
More file actions
125 lines (111 loc) · 4.55 KB
/
config.yaml.example
File metadata and controls
125 lines (111 loc) · 4.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# SGR Agent Core - Configuration Template
# Copy this file to config.yaml and fill in your data
# LLM Configuration
llm:
api_key: "your-openai-api-key-here" # Your OpenAI API key
base_url: "https://api.openai.com/v1" # API base URL
model: "gpt-4o-mini" # Model name
max_tokens: 8000 # Max output tokens
temperature: 0.4 # Temperature (0.0-1.0)
# proxy: "socks5://127.0.0.1:1081" # Optional proxy (socks5:// or http://)
# Execution Settings
execution:
max_clarifications: 3 # Max clarification requests
max_iterations: 10 # Max agent iterations
mcp_context_limit: 15000 # Max context length from MCP server response
logs_dir: "logs" # Directory for saving agent execution logs
reports_dir: "reports" # Directory for saving agent reports
# Prompts Configuration
# prompts:
# # Option 1: Use file paths (absolute or relative to project root)
# system_prompt_file: "path/to/your/system_prompt.txt"
# initial_user_request_file: "path/to/your/initial_user_request.txt"
# clarification_response_file: "path/to/your/clarification_response.txt"
# # Option 2: Provide prompts directly as strings
# system_prompt_str: "Your custom system prompt here..."
# initial_user_request_str: "Your custom initial request template..."
# clarification_response_str: "Your custom clarification template..."
# Note: If both file and string are provided, string takes precedence
# MCP (Model Context Protocol) Configuration
mcp:
mcpServers:
deepwiki:
url: "https://mcp.deepwiki.com/mcp"
# Add more MCP servers here:
# your_server:
# url: "https://your-mcp-server.com/mcp"
# headers:
# Authorization: "Bearer your-token"
# Note: The 'agents' field is optional and can be loaded from either:
# - This config.yaml file
# - Any separate file by GlobalConfig.definitions_from_yaml method
# See examples in agents.yaml.example for agent configuration options
tools:
# Global tool config: base_class (optional) and any params passed to the tool at runtime
# Agents that use the tool by name get these params as kwargs; per-agent inline config overrides.
my_custom_tool:
base_class: path.to.my.tools.CustomTool
my_other_tool:
base_class: "name_of_tool_class_in_registry"
# Search tools: configure search provider and API keys per tool
# (can be overridden per-agent in tools list)
web_search_tool:
engine: "tavily" # Search engine: "tavily" (default), "brave", or "perplexity"
tavily_api_key: "your-tavily-api-key-here" # Tavily API key (get at tavily.com)
tavily_api_base_url: "https://api.tavily.com" # Tavily API URL
max_results: 12
max_searches: 6
extract_page_content_tool:
tavily_api_key: "your-tavily-api-key-here" # Same Tavily API key (Tavily-only feature)
tavily_api_base_url: "https://api.tavily.com"
content_limit: 2000
# Standalone search tools (for multi-engine setups where LLM picks the engine)
brave_search_tool:
brave_api_key: "your-brave-api-key-here" # Brave Search API key
brave_api_base_url: "https://api.search.brave.com/res/v1/web/search"
perplexity_search_tool:
perplexity_api_key: "your-perplexity-api-key-here" # Perplexity API key
perplexity_api_base_url: "https://api.perplexity.ai/search"
tavily_search_tool:
tavily_api_key: "your-tavily-api-key-here"
tavily_api_base_url: "https://api.tavily.com"
agents:
custom_research_agent:
base_class: "sgr_agent_core.agents.sgr_agent.SGRAgent"
# Optional: Override LLM settings for this agent
llm:
model: "gpt-4o"
temperature: 0.3
max_tokens: 16000
# api_key: "your-custom-api-key" # Optional: use different API key
# base_url: "https://api.openai.com/v1" # Optional: use different endpoint
# proxy: "http://127.0.0.1:8080" # Optional: use proxy
# Optional: Execution configuration
execution:
max_steps: 8
max_iterations: 15
max_clarifications: 5
max_searches: 6
mcp_context_limit: 20000
logs_dir: "logs/custom_agent"
reports_dir: "reports/custom_agent"
# Optional: MCP configuration
mcp:
mcpServers:
deepwiki:
url: "https://mcp.deepwiki.com/mcp"
# Tools: names, or dicts with "name" and optional kwargs (e.g. search settings per tool)
# Example:
# - web_search_tool:
# max_results: 15
# max_searches: 6
tools:
- "web_search_tool"
- "extract_page_content_tool"
- "create_report_tool"
- "clarification_tool"
- "generate_plan_tool"
- "adapt_plan_tool"
- "final_answer_tool"
- "my_custom_tool"
- "my_other_tool"