-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
89 lines (75 loc) · 2.96 KB
/
.env.example
File metadata and controls
89 lines (75 loc) · 2.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# =============================================================================
# GenBI Configuration File
# =============================================================================
# Copy this file to .env and configure your settings
# -----------------------------------------------------------------------------
# API Server Settings
# -----------------------------------------------------------------------------
API_HOST=0.0.0.0
API_PORT=5556
# -----------------------------------------------------------------------------
# LLM Configuration
# -----------------------------------------------------------------------------
# Option 1: OpenAI (Cloud)
# -------------------------
LLM_PROVIDER=openai
LLM_MODEL=gpt-4
LLM_API_KEY=your-openai-api-key-here
# LLM_BASE_URL= # Leave empty for OpenAI cloud
# Option 2: Ollama (Local)
# -------------------------
# LLM_PROVIDER=openai
# LLM_MODEL=llama3.1:8b
# LLM_API_KEY=ollama # Can be any value, Ollama doesn't validate
# LLM_BASE_URL=http://localhost:11434/v1
# Option 3: LM Studio (Local)
# ----------------------------
# LLM_PROVIDER=openai
# LLM_MODEL=your-model-name
# LLM_API_KEY=lm-studio # Can be any value
# LLM_BASE_URL=http://localhost:1234/v1
# Option 4: LocalAI (Local)
# --------------------------
# LLM_PROVIDER=openai
# LLM_MODEL=your-model-name
# LLM_API_KEY=local-ai # Can be any value
# LLM_BASE_URL=http://localhost:8080/v1
# Option 5: OpenAI Compatible API (e.g., vLLM, FastChat)
# -------------------------------------------------------
# LLM_PROVIDER=openai
# LLM_MODEL=your-model-name
# LLM_API_KEY=your-api-key
# LLM_BASE_URL=http://your-server:port/v1
# LLM Parameters
LLM_TEMPERATURE=0.1
LLM_MAX_TOKENS=2000
# -----------------------------------------------------------------------------
# Embedding Configuration
# -----------------------------------------------------------------------------
# Option 1: OpenAI Embeddings (Cloud)
# ------------------------------------
EMBEDDING_PROVIDER=openai
EMBEDDING_MODEL=text-embedding-3-small
EMBEDDING_API_KEY=your-openai-api-key-here
# EMBEDDING_BASE_URL= # Leave empty for OpenAI cloud
# Option 2: Ollama Embeddings (Local)
# ------------------------------------
# EMBEDDING_PROVIDER=openai
# EMBEDDING_MODEL=nomic-embed-text
# EMBEDDING_API_KEY=ollama # Can be any value
# EMBEDDING_BASE_URL=http://localhost:11434/v1
# Option 3: LocalAI Embeddings (Local)
# -------------------------------------
# EMBEDDING_PROVIDER=openai
# EMBEDDING_MODEL=text-embedding-ada-002
# EMBEDDING_API_KEY=local-ai # Can be any value
# EMBEDDING_BASE_URL=http://localhost:8080/v1
# -----------------------------------------------------------------------------
# Vector Store Settings
# -----------------------------------------------------------------------------
VECTOR_STORE_PATH=./data/vector_store
# -----------------------------------------------------------------------------
# System Settings
# -----------------------------------------------------------------------------
DEBUG=false
LOG_LEVEL=INFO