-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.yaml.template
More file actions
184 lines (163 loc) · 6.2 KB
/
config.yaml.template
File metadata and controls
184 lines (163 loc) · 6.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# YADA Configuration File
# This file is automatically generated on first run.
#
# API KEYS: Stored in .env file (not here!)
# Create a .env file in the project root with your API keys:
# OPENAI_API_KEY=your-key-here
# ANTHROPIC_API_KEY=your-key-here
# GEMINI_API_KEY=your-key-here
# HF_TOKEN=your-key-here
version: 3
###############################################################################
# Models (Capability-Based Configuration)
###############################################################################
# Configure each capability independently with any provider
###############################################################################
# Supported providers: openai, anthropic, google, azure, huggingface, etc.
# Mix and match providers based on your needs!
#
# Each model config REQUIRES:
# - provider: The AI provider (e.g., openai, anthropic, google, huggingface)
# - model: The specific model name
# - key_name: Environment variable name containing the API key
#
# Optional fields:
# - base_url: Custom API base URL (e.g., for local gateways/proxies)
# - reasoning: high | medium | low (when supported by the model)
# - max_output: Max tokens to request from the model
# - temperature: Sampling temperature (when supported by the model)
# Some models ignore temperature.
# Common key_name values:
# - OPENAI_API_KEY (for OpenAI)
# - ANTHROPIC_API_KEY (for Anthropic)
# - GEMINI_API_KEY (for Google)
# - HF_TOKEN (for HuggingFace)
###############################################################################
models:
# Main chat agent (REQUIRED)
chat:
provider: [llm_provider] # e.g. gemini, openai, anthropic, huggingface, etc.
model: [model_name] # e.g. gemini-2.5-flash, gpt-5.1, etc.
key_name: [API_KEY_ENV_VAR] # e.g. GEMINI_API_KEY, OPENAI_API_KEY, etc.
reasoning: high # high | medium | low (when supported)
max_output: 4096
temperature: 0.2 # sampling temperature (when supported)
# base_url: "https://router.huggingface.co/v1" # optional custom base URL (e.g., for local gateways)
## Example configurations:
model: x-ai/grok-4.1-fast:free
base_url: "https://openrouter.ai/api/v1"
key_name: OPENROUTER_API_KEY
#reasoning: medium
max_output: 4096
temperature: 0.2
provider: openrouter
model: openai/gpt-oss-20b:free
base_url: "https://openrouter.ai/api/v1"
key_name: OPENROUTER_API_KEY
#reasoning: high
max_output: 4096
temperature: 0.2
# Embeddings for vector memory (OPTIONAL - disables memory if not configured)
embeddings:
provider: openai
model: text-embedding-3-small
key_name: OPENAI_API_KEY
# Vision model for PDF analysis (OPTIONAL - disables PDF reading if not configured)
vision:
provider: openai
model: gpt-4o-mini
key_name: OPENAI_API_KEY
# Query summarizer for metrics (OPTIONAL - uses chat model if not configured)
summarizer:
provider: openai
model: gpt-5-nano
key_name: OPENAI_API_KEY
###############################################################################
# Integrations
###############################################################################
integrations:
slack:
enabled: false
user_token: "" # Or set SLACK_USER_TOKEN in your environment
username: "" # Will auto-detect from Slack token if empty
use_deep_links: false
github:
enabled: false
token: "" # Or set GITHUB_TOKEN in your environment
username: "" # Or set GITHUB_USERNAME in your environment
repo: "" # Or set GITHUB_REPO; will auto-detect from git remote if empty
jira:
enabled: false
server: "" # e.g., https://your-domain.atlassian.net/ (or JIRA_SERVER env var)
email: "" # Your JIRA account email (or JIRA_EMAIL)
api_token: "" # JIRA API token (or JIRA_API_TOKEN)
project: "" # Default JIRA project key (or JIRA_PROJECT)
filesystem:
enabled: true
max_read_mb: 10
max_write_mb: 50
search:
enabled: true
shell:
enabled: true
working_dir: . # default to current project root
# Commands containing any of these substrings will NEVER be executed.
# YADA should refuse them outright and explain why.
deny_patterns:
- "rm -rf"
- "rm -r /"
- "mkfs"
- "mkfs."
- "dd if="
- ">:"
- ":(){:|:&};:" # fork bomb
- "shutdown"
- "reboot"
- "poweroff"
- "init 0"
- "userdel"
- "chmod 777 /"
- "chown -R /"
- "format "
- "mkpartition"
- "wipefs"
- "cryptsetup"
- "iptables -F"
- "ufw disable"
- "pkill -9"
- "killall -9"
- "docker system prune -a"
- "kubeadm reset"
###############################################################################
# Application Settings
###############################################################################
app:
log_level: INFO # DEBUG, INFO, WARNING, ERROR
cache_ttl: 300 # seconds
max_search_results: 20
console_trace: false
theme_mode: dark # dark or light
# When the agent is allowed to generate operational/utility scripts or code
codegen:
mode: operational-only # operational-only | disabled
allowed_paths:
- tools/debug
- tmp/yada
###############################################################################
# Paths (relative to ~/.yada/ directory)
# These are automatically set and should rarely need changing
###############################################################################
paths:
history_file: history
sessions_db: sessions.db
session_version: session_version
slack_cache: slack_users.json
metrics_db: metrics.db
vector_db: vector_db
traces_log: traces.log
# Agent output directory - where YADA saves generated files
# YADA reads your codebase but writes outputs here (not to your source)
filesystem_workspace: workspace
# Advanced overrides (optional)
# trace_file: /custom/path/to/traces.log
# metrics_db_path: /custom/path/to/metrics.db