-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy path.env.example
More file actions
43 lines (33 loc) · 847 Bytes
/
.env.example
File metadata and controls
43 lines (33 loc) · 847 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# Example environment configuration for AI Model Tool Call Proxy Server
# Copy this file to .env and modify the values as needed
# Backend server configuration
BACKEND_HOST=localhost
BACKEND_PORT=8888
BACKEND_PROTOCOL=http
# Proxy server configuration
PROXY_HOST=0.0.0.0
PROXY_PORT=5000
# Request settings
REQUEST_TIMEOUT=300
# Feature toggles
ENABLE_TOOL_CALL_CONVERSION=true
# Environment and logging
FLASK_ENV=development
LOG_LEVEL=INFO
DEBUG=true
# Example configurations for different backends:
# For LM Studio (default)
# BACKEND_HOST=localhost
# BACKEND_PORT=8888
# For Ollama
# BACKEND_HOST=localhost
# BACKEND_PORT=11434
# For remote OpenAI API
# BACKEND_HOST=api.openai.com
# BACKEND_PORT=443
# BACKEND_PROTOCOL=https
# For production deployment
# FLASK_ENV=production
# LOG_LEVEL=WARNING
# DEBUG=false
# REQUEST_TIMEOUT=60