|
1 | | -# Rename this file to .env once you have filled in the below environment variables! |
2 | | - |
3 | | -# Get your GROQ API Key here - |
4 | | -# https://console.groq.com/keys |
5 | | -# You only need this environment variable set if you want to use Groq models |
6 | | -GROQ_API_KEY= |
7 | | - |
8 | | -# Get your HuggingFace API Key here - |
9 | | -# https://huggingface.co/settings/tokens |
10 | | -# You only need this environment variable set if you want to use HuggingFace models |
11 | | -HuggingFace_API_KEY= |
12 | | - |
13 | | - |
14 | | -# Get your Open AI API Key by following these instructions - |
15 | | -# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key |
16 | | -# You only need this environment variable set if you want to use GPT models |
17 | | -OPENAI_API_KEY= |
18 | | - |
19 | | -# Get your Anthropic API Key in your account settings - |
20 | | -# https://console.anthropic.com/settings/keys |
21 | | -# You only need this environment variable set if you want to use Claude models |
22 | | -ANTHROPIC_API_KEY= |
23 | | - |
24 | | -# Get your OpenRouter API Key in your account settings - |
25 | | -# https://openrouter.ai/settings/keys |
26 | | -# You only need this environment variable set if you want to use OpenRouter models |
27 | | -OPEN_ROUTER_API_KEY= |
28 | | - |
29 | | -# Get your Google Generative AI API Key by following these instructions - |
30 | | -# https://console.cloud.google.com/apis/credentials |
31 | | -# You only need this environment variable set if you want to use Google Generative AI models |
32 | | -GOOGLE_GENERATIVE_AI_API_KEY= |
33 | | - |
34 | | -# You only need this environment variable set if you want to use oLLAMA models |
35 | | -# DONT USE http://localhost:11434 due to IPV6 issues |
36 | | -# USE EXAMPLE http://127.0.0.1:11434 |
37 | | -OLLAMA_API_BASE_URL= |
38 | | - |
39 | | -# You only need this environment variable set if you want to use OpenAI Like models |
40 | | -OPENAI_LIKE_API_BASE_URL= |
41 | | - |
42 | | -# You only need this environment variable set if you want to use Together AI models |
43 | | -TOGETHER_API_BASE_URL= |
44 | | - |
45 | | -# You only need this environment variable set if you want to use DeepSeek models through their API |
46 | | -DEEPSEEK_API_KEY= |
47 | | - |
48 | | -# Get your OpenAI Like API Key |
49 | | -OPENAI_LIKE_API_KEY= |
50 | | - |
51 | | -# Get your Together API Key |
52 | | -TOGETHER_API_KEY= |
53 | | - |
54 | | -# You only need this environment variable set if you want to use Hyperbolic models |
55 | | -#Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings |
56 | | -#baseURL="https://api.hyperbolic.xyz/v1/chat/completions" |
57 | | -HYPERBOLIC_API_KEY= |
58 | | -HYPERBOLIC_API_BASE_URL= |
59 | | - |
60 | | -# Get your Mistral API Key by following these instructions - |
61 | | -# https://console.mistral.ai/api-keys/ |
62 | | -# You only need this environment variable set if you want to use Mistral models |
63 | | -MISTRAL_API_KEY= |
64 | | - |
65 | | -# Get the Cohere Api key by following these instructions - |
66 | | -# https://dashboard.cohere.com/api-keys |
67 | | -# You only need this environment variable set if you want to use Cohere models |
68 | | -COHERE_API_KEY= |
69 | | - |
70 | | -# Get LMStudio Base URL from LM Studio Developer Console |
71 | | -# Make sure to enable CORS |
72 | | -# DONT USE http://localhost:1234 due to IPV6 issues |
73 | | -# Example: http://127.0.0.1:1234 |
74 | | -LMSTUDIO_API_BASE_URL= |
75 | | - |
76 | | -# Get your xAI API key |
77 | | -# https://x.ai/api |
78 | | -# You only need this environment variable set if you want to use xAI models |
79 | | -XAI_API_KEY= |
80 | | - |
81 | | -# Get your Perplexity API Key here - |
82 | | -# https://www.perplexity.ai/settings/api |
83 | | -# You only need this environment variable set if you want to use Perplexity models |
84 | | -PERPLEXITY_API_KEY= |
85 | | - |
86 | | -# Get your AWS configuration |
87 | | -# https://console.aws.amazon.com/iam/home |
88 | | -# The JSON should include the following keys: |
89 | | -# - region: The AWS region where Bedrock is available. |
90 | | -# - accessKeyId: Your AWS access key ID. |
91 | | -# - secretAccessKey: Your AWS secret access key. |
92 | | -# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials. |
93 | | -# Example JSON: |
94 | | -# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"} |
95 | | -AWS_BEDROCK_CONFIG= |
96 | | - |
97 | | -# Include this environment variable if you want more logging for debugging locally |
98 | | -VITE_LOG_LEVEL=debug |
| 1 | +# ====================================== |
| 2 | +# Environment Variables for Bolt.diy |
| 3 | +# ====================================== |
| 4 | +# Copy this file to .env.local and fill in your API keys |
| 5 | +# See README.md for setup instructions |
| 6 | + |
| 7 | +# ====================================== |
| 8 | +# AI PROVIDER API KEYS |
| 9 | +# ====================================== |
| 10 | + |
| 11 | +# Anthropic Claude |
| 12 | +# Get your API key from: https://console.anthropic.com/ |
| 13 | +ANTHROPIC_API_KEY=your_anthropic_api_key_here |
| 14 | + |
| 15 | +# OpenAI GPT models |
| 16 | +# Get your API key from: https://platform.openai.com/api-keys |
| 17 | +OPENAI_API_KEY=your_openai_api_key_here |
| 18 | + |
| 19 | +# GitHub Models (OpenAI models hosted by GitHub) |
| 20 | +# Get your Personal Access Token from: https://github.com/settings/tokens |
| 21 | +# - Select "Fine-grained tokens" |
| 22 | +# - Set repository access to "All repositories" |
| 23 | +# - Enable "GitHub Models" permission |
| 24 | +GITHUB_API_KEY=github_pat_your_personal_access_token_here |
| 25 | + |
| 26 | +# Perplexity AI (Search-augmented models) |
| 27 | +# Get your API key from: https://www.perplexity.ai/settings/api |
| 28 | +PERPLEXITY_API_KEY=your_perplexity_api_key_here |
| 29 | + |
| 30 | +# DeepSeek |
| 31 | +# Get your API key from: https://platform.deepseek.com/api_keys |
| 32 | +DEEPSEEK_API_KEY=your_deepseek_api_key_here |
| 33 | + |
| 34 | +# Google Gemini |
| 35 | +# Get your API key from: https://makersuite.google.com/app/apikey |
| 36 | +GOOGLE_GENERATIVE_AI_API_KEY=your_google_gemini_api_key_here |
| 37 | + |
| 38 | +# Cohere |
| 39 | +# Get your API key from: https://dashboard.cohere.ai/api-keys |
| 40 | +COHERE_API_KEY=your_cohere_api_key_here |
| 41 | + |
| 42 | +# Groq (Fast inference) |
| 43 | +# Get your API key from: https://console.groq.com/keys |
| 44 | +GROQ_API_KEY=your_groq_api_key_here |
| 45 | + |
| 46 | +# Mistral |
| 47 | +# Get your API key from: https://console.mistral.ai/api-keys/ |
| 48 | +MISTRAL_API_KEY=your_mistral_api_key_here |
| 49 | + |
| 50 | +# Together AI |
| 51 | +# Get your API key from: https://api.together.xyz/settings/api-keys |
| 52 | +TOGETHER_API_KEY=your_together_api_key_here |
| 53 | + |
| 54 | +# X.AI (Elon Musk's company) |
| 55 | +# Get your API key from: https://console.x.ai/ |
| 56 | +XAI_API_KEY=your_xai_api_key_here |
| 57 | + |
| 58 | +# Moonshot AI (Kimi models) |
| 59 | +# Get your API key from: https://platform.moonshot.ai/console/api-keys |
| 60 | +MOONSHOT_API_KEY=your_moonshot_api_key_here |
| 61 | + |
| 62 | +# Hugging Face |
| 63 | +# Get your API key from: https://huggingface.co/settings/tokens |
| 64 | +HuggingFace_API_KEY=your_huggingface_api_key_here |
| 65 | + |
| 66 | +# Hyperbolic |
| 67 | +# Get your API key from: https://app.hyperbolic.xyz/settings |
| 68 | +HYPERBOLIC_API_KEY=your_hyperbolic_api_key_here |
| 69 | + |
| 70 | +# OpenRouter (Meta routing for multiple providers) |
| 71 | +# Get your API key from: https://openrouter.ai/keys |
| 72 | +OPEN_ROUTER_API_KEY=your_openrouter_api_key_here |
| 73 | + |
| 74 | +# ====================================== |
| 75 | +# CUSTOM PROVIDER BASE URLS (Optional) |
| 76 | +# ====================================== |
| 77 | + |
| 78 | +# Ollama (Local models) |
| 79 | +# DON'T USE http://localhost:11434 due to IPv6 issues |
| 80 | +# USE: http://127.0.0.1:11434 |
| 81 | +OLLAMA_API_BASE_URL=http://127.0.0.1:11434 |
| 82 | + |
| 83 | +# OpenAI-like API (Compatible providers) |
| 84 | +OPENAI_LIKE_API_BASE_URL=your_openai_like_base_url_here |
| 85 | +OPENAI_LIKE_API_KEY=your_openai_like_api_key_here |
| 86 | + |
| 87 | +# Together AI Base URL |
| 88 | +TOGETHER_API_BASE_URL=your_together_base_url_here |
| 89 | + |
| 90 | +# Hyperbolic Base URL |
| 91 | +HYPERBOLIC_API_BASE_URL=https://api.hyperbolic.xyz/v1/chat/completions |
| 92 | + |
| 93 | +# LMStudio (Local models) |
| 94 | +# Make sure to enable CORS in LMStudio |
| 95 | +# DON'T USE http://localhost:1234 due to IPv6 issues |
| 96 | +# USE: http://127.0.0.1:1234 |
| 97 | +LMSTUDIO_API_BASE_URL=http://127.0.0.1:1234 |
| 98 | + |
| 99 | +# ====================================== |
| 100 | +# CLOUD SERVICES CONFIGURATION |
| 101 | +# ====================================== |
| 102 | + |
| 103 | +# AWS Bedrock Configuration (JSON format) |
| 104 | +# Get your credentials from: https://console.aws.amazon.com/iam/home |
| 105 | +# Example: {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey"} |
| 106 | +AWS_BEDROCK_CONFIG=your_aws_bedrock_config_json_here |
| 107 | + |
| 108 | +# ====================================== |
| 109 | +# GITHUB INTEGRATION |
| 110 | +# ====================================== |
| 111 | + |
| 112 | +# GitHub Personal Access Token |
| 113 | +# Get from: https://github.com/settings/tokens |
| 114 | +# Used for importing/cloning repositories and accessing private repos |
| 115 | +VITE_GITHUB_ACCESS_TOKEN=your_github_personal_access_token_here |
| 116 | + |
| 117 | +# GitHub Token Type ('classic' or 'fine-grained') |
| 118 | +VITE_GITHUB_TOKEN_TYPE=classic |
99 | 119 |
|
100 | | -# Get your GitHub Personal Access Token here - |
101 | | -# https://github.com/settings/tokens |
| 120 | +# ====================================== |
| 121 | +# GITLAB INTEGRATION |
| 122 | +# ====================================== |
| 123 | + |
| 124 | +# GitLab Personal Access Token |
| 125 | +# Get your GitLab Personal Access Token here: |
| 126 | +# https://gitlab.com/-/profile/personal_access_tokens |
| 127 | +# |
102 | 128 | # This token is used for: |
103 | | -# 1. Importing/cloning GitHub repositories without rate limiting |
104 | | -# 2. Accessing private repositories |
105 | | -# 3. Automatic GitHub authentication (no need to manually connect in the UI) |
106 | | -# |
107 | | -# For classic tokens, ensure it has these scopes: repo, read:org, read:user |
108 | | -# For fine-grained tokens, ensure it has Repository and Organization access |
109 | | -VITE_GITHUB_ACCESS_TOKEN= |
110 | | - |
111 | | -# Specify the type of GitHub token you're using |
112 | | -# Can be 'classic' or 'fine-grained' |
113 | | -# Classic tokens are recommended for broader access |
114 | | -VITE_GITHUB_TOKEN_TYPE=classic |
| 129 | +# 1. Importing/cloning GitLab repositories |
| 130 | +# 2. Accessing private projects |
| 131 | +# 3. Creating/updating branches |
| 132 | +# 4. Creating/updating commits and pushing code |
| 133 | +# 5. Creating new GitLab projects via the API |
| 134 | +# |
| 135 | +# Make sure your token has the following scopes: |
| 136 | +# - api (for full API access including project creation and commits) |
| 137 | +# - read_repository (to clone/import repositories) |
| 138 | +# - write_repository (to push commits and update branches) |
| 139 | +VITE_GITLAB_ACCESS_TOKEN=your_gitlab_personal_access_token_here |
| 140 | + |
| 141 | +# Set the GitLab instance URL (e.g., https://gitlab.com or your self-hosted domain) |
| 142 | +VITE_GITLAB_URL=https://gitlab.com |
| 143 | + |
| 144 | +# GitLab token type should be 'personal-access-token' |
| 145 | +VITE_GITLAB_TOKEN_TYPE=personal-access-token |
| 146 | + |
| 147 | +# ====================================== |
| 148 | +# VERCEL INTEGRATION |
| 149 | +# ====================================== |
| 150 | + |
| 151 | +# Vercel Access Token |
| 152 | +# Get your access token from: https://vercel.com/account/tokens |
| 153 | +# This token is used for: |
| 154 | +# 1. Deploying projects to Vercel |
| 155 | +# 2. Managing Vercel projects and deployments |
| 156 | +# 3. Accessing project analytics and logs |
| 157 | +VITE_VERCEL_ACCESS_TOKEN=your_vercel_access_token_here |
| 158 | + |
| 159 | +# ====================================== |
| 160 | +# NETLIFY INTEGRATION |
| 161 | +# ====================================== |
| 162 | + |
| 163 | +# Netlify Access Token |
| 164 | +# Get your access token from: https://app.netlify.com/user/applications |
| 165 | +# This token is used for: |
| 166 | +# 1. Deploying sites to Netlify |
| 167 | +# 2. Managing Netlify sites and deployments |
| 168 | +# 3. Accessing build logs and analytics |
| 169 | +VITE_NETLIFY_ACCESS_TOKEN=your_netlify_access_token_here |
| 170 | + |
| 171 | +# ====================================== |
| 172 | +# SUPABASE INTEGRATION |
| 173 | +# ====================================== |
| 174 | + |
| 175 | +# Supabase Project Configuration |
| 176 | +# Get your project details from: https://supabase.com/dashboard |
| 177 | +# Select your project → Settings → API |
| 178 | +VITE_SUPABASE_URL=your_supabase_project_url_here |
| 179 | +VITE_SUPABASE_ANON_KEY=your_supabase_anon_key_here |
| 180 | + |
| 181 | +# Supabase Access Token (for management operations) |
| 182 | +# Generate from: https://supabase.com/dashboard/account/tokens |
| 183 | +VITE_SUPABASE_ACCESS_TOKEN=your_supabase_access_token_here |
| 184 | + |
| 185 | +# ====================================== |
| 186 | +# DEVELOPMENT SETTINGS |
| 187 | +# ====================================== |
| 188 | + |
| 189 | +# Development Mode |
| 190 | +NODE_ENV=development |
| 191 | + |
| 192 | +# Application Port (optional, defaults to 5173 for development) |
| 193 | +PORT=5173 |
| 194 | + |
| 195 | +# Logging Level (debug, info, warn, error) |
| 196 | +VITE_LOG_LEVEL=debug |
115 | 197 |
|
116 | | -# Example Context Values for qwen2.5-coder:32b |
117 | | -# |
118 | | -# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM |
119 | | -# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM |
120 | | -# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM |
121 | | -# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM |
122 | | -DEFAULT_NUM_CTX= |
| 198 | +# Default Context Window Size (for local models) |
| 199 | +DEFAULT_NUM_CTX=32768 |
| 200 | + |
| 201 | +# ====================================== |
| 202 | +# SETUP INSTRUCTIONS |
| 203 | +# ====================================== |
| 204 | +# 1. Copy this file to .env.local: cp .env.example .env.local |
| 205 | +# 2. Fill in the API keys for the services you want to use |
| 206 | +# 3. All service integration keys use VITE_ prefix for auto-connection |
| 207 | +# 4. Restart your development server: pnpm run dev |
| 208 | +# 5. Services will auto-connect on startup if tokens are provided |
| 209 | +# 6. Go to Settings > Service tabs to manage connections manually if needed |
0 commit comments