-
Notifications
You must be signed in to change notification settings - Fork 46
Expand file tree
/
Copy pathconfig.toml.example
More file actions
211 lines (184 loc) · 8.79 KB
/
config.toml.example
File metadata and controls
211 lines (184 loc) · 8.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
# OpenCrabs Configuration File
# Copy this file to one of these locations:
# - Linux/macOS: ~/.opencrabs/config.toml
# - Windows: %APPDATA%\opencrabs\config.toml or opencrabs\config.toml
#
# IMPORTANT: API keys should NOT be stored here!
# Instead, store API keys in keys.toml (chmod 600) for security:
# - ~/.opencrabs/keys.toml
# Keys in keys.toml take priority over this file.
[database]
# Database file location (stores conversation history)
# path = "~/.opencrabs/opencrabs.db" # Default; only override if needed
[providers]
# ========================================
# Custom: OpenAI-Compatible Provider (Local LLMs, and any OpenAI Compatible model)
# ========================================
# Use this for LM Studio, Ollama, LocalAI, etc.
# Every custom provider needs a name — the label after "custom." (e.g. lm_studio, nvidia, groq).
# You can define as many as you need and switch between them via /models.
[providers.custom.lm_studio]
enabled = true
base_url = "http://localhost:1234/v1/chat/completions" # LM Studio default
models = ["kimi-k2.5", "glm-5", "MiniMax-M2.5", "qwen3-coder", "gpt-oss-120b", "llama-4-70B", "mistral-Large-3", "qwen3-coder-next"]
# ⭐ IMPORTANT: Set this to match the model name loaded in LM Studio!
# Common examples:
# - qwen2.5-coder-7b-instruct
# - codellama-7b-instruct
# - deepseek-coder-6.7b-instruct
# - llama-3.2-1b-instruct
default_model = "qwen3-coder-next"
# Other local LLM servers — just add another named section:
#
# [providers.custom.ollama]
# enabled = false
# base_url = "http://localhost:11434/v1/chat/completions"
# default_model = "mistral"
# models = ["mistral", "llama3", "codellama"]
# ========================================
# Official OpenAI Provider
# ========================================
[providers.openai]
enabled = false
default_model = "gpt-4o" # Optional: override default model
# ========================================
# Anthropic Provider (Claude)
# ========================================
[providers.anthropic]
enabled = false
default_model = "claude-sonnet-4-6" # Optional: override default
# ========================================
# OpenRouter Provider (100+ models via OpenAI-compatible API)
# ========================================
[providers.openrouter]
enabled = false
base_url = "https://openrouter.ai/api/v1/chat/completions"
default_model = "qwen/qwen3-coder-next" # Many options at openrouter.ai/models
# ========================================
# Google Gemini Provider
# ========================================
# Models fetched live from the Gemini API during onboarding and /models
# API key goes in keys.toml under [providers.gemini]
# Get key from: aistudio.google.com
[providers.gemini]
enabled = false
default_model = "gemini-2.5-flash"
# ========================================
# Minimax Provider (Chinese AI, OpenAI-compatible)
# ========================================
# Note: Minimax does NOT have a /models endpoint, so add models manually
[providers.minimax]
enabled = false
base_url = "https://api.minimax.io/v1"
default_model = "MiniMax-M2.5"
models = ["MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-Text-01"]
# ========================================
# STT (Speech-to-Text) Providers
# ========================================
# Groq Whisper for transcription
[providers.stt.groq]
enabled = false
default_model = "whisper-large-v3-turbo"
# ========================================
# TTS (Text-to-Speech) Providers
# ========================================
# OpenAI TTS for voice output
[providers.tts.openai]
enabled = false
default_model = "gpt-4o-mini-tts"
voice = "ash" # TTS voice name
model = "gpt-4o-mini-tts" # TTS model
# ========================================
# Image Generation & Vision (Google Gemini)
# ========================================
# API key goes in keys.toml under [image]
# Get key from: aistudio.google.com
# Use /onboard:image to configure via wizard
[image.generation]
enabled = false
model = "gemini-3.1-flash-image-preview" # Gemini image-gen model ("Nano Banana")
[image.vision]
enabled = false
model = "gemini-3.1-flash-image-preview" # Gemini vision model
# ========================================
# Tips for Using Local LLMs
# ========================================
# 1. Make sure LM Studio is running before starting OpenCrabs
# 2. Load a model in LM Studio first
# 3. Set default_model to EXACTLY match the model name shown in LM Studio
# 4. Increase context length in LM Studio if you get overflow errors:
# - Recommended: 8192 or higher
# - Location: LM Studio > Model Settings > Context Length
# ==================================================
# Channels (Telegram / WhatsApp / Slack / Discord / Trello)
# ==================================================
# respond_to controls which messages the bot replies to (applies to Telegram, Discord, Slack):
# "all" — reply to every message in allowed channels (default)
# "dm_only" — reply only to direct/private messages
# "mention" — reply only when the bot is @mentioned or replied-to
[channels.whatsapp]
enabled = false
# Phone numbers allowed to message the bot (E.164 format, leading + optional)
# Access control is purely phone-based — add numbers here to restrict who can message the bot.
allowed_phones = ["+15551234567"]
# session_idle_hours = 24.0 # Archive inactive non-owner sessions after N hours (default: never)
[channels.discord]
enabled = false
allowed_channels = ["channel_id"] # Where the bot operates (empty = all channels)
allowed_users = [123456789012345] # Who the bot replies to (numeric user ID, empty = everyone)
# respond_to = "all" # "all" | "dm_only" | "mention"
# session_idle_hours = 24.0 # Archive inactive non-owner sessions after N hours (default: never)
[channels.telegram]
enabled = false
allowed_users = [123456789] # Who the bot replies to (numeric user ID, empty = everyone)
# allowed_channels = ["-100123456789"] # Chat/group IDs to restrict to (empty = all chats)
# respond_to = "all" # "all" | "dm_only" | "mention"
# session_idle_hours = 24.0 # Archive inactive non-owner sessions after N hours (default: never)
[channels.slack]
enabled = false
allowed_channels = ["C12345678"] # Where the bot operates (Slack channel ID, empty = all)
allowed_users = ["U12345678"] # Who the bot replies to (Slack user ID, empty = everyone)
# respond_to = "all" # "all" | "dm_only" | "mention"
# session_idle_hours = 24.0 # Archive inactive non-owner sessions after N hours (default: never)
# ========================================
# Trello — board card management
# ========================================
# API keys/tokens go in keys.toml under [channels.trello]
# Default mode: tool-only — no automatic polling. The AI acts on Trello only
# when you explicitly ask it to via trello_send / trello_connect.
# Opt-in polling: set poll_interval_secs > 0 to have the agent watch boards
# for @mentions of the bot username and respond to them.
[channels.trello]
enabled = false
# Board IDs or names to monitor — you can mix 24-char hex IDs and human-readable board names.
# The agent resolves names at startup. Comma-separated in the wizard; TOML array here.
board_ids = ["your-board-name", "abc123def456abc123def456"]
allowed_users = [] # Trello member IDs allowed to @mention the bot (empty = all)
# poll_interval_secs = 30 # Opt-in: poll boards every N seconds for @mentions. Default = disabled.
# session_idle_hours = 24.0 # Archive inactive non-owner sessions after N hours (default: never)
# ========================================
# Agent-to-Agent (A2A) Protocol
# ========================================
# Enables HTTP gateway for peer-to-peer agent communication.
# Other A2A-compatible agents can send tasks, collaborate, and debate.
[a2a]
enabled = false
bind = "127.0.0.1" # Loopback only by default for security
port = 18790 # A2A gateway port
# CORS allowed origins (empty = no cross-origin requests allowed)
# allowed_origins = ["http://localhost:3000"]
# API key for Bearer token auth on /a2a/v1 (optional, recommended for non-loopback)
# Can also be set in keys.toml under [a2a] api_key = "..."
# api_key = "your-secret-key"
# ========================================
# Web Search Providers (default to free Duck Duck Go, no need additional web search provider)
# ========================================
[providers.web_search.exa]
enabled = true
# MCP is enabled by default as its free. If want through API its free up to 1000 requests. API key goes in keys.toml: [providers.web_search.exa] api_key = "..."
[providers.web_search.duckduckgo]
enabled = true
# Completely free, enabled by default"
[providers.web_search.brave]
enabled = false
# Its free up to 1000 requests. API key goes in keys.toml: [providers.web_search.brave] api_key = "..."