-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
41 lines (36 loc) · 1.43 KB
/
.env.example
File metadata and controls
41 lines (36 loc) · 1.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# eBPF-LLM NetSentinel Environment Variables
# Copy this file to .env and fill in your configuration
# ==================== LLM Backend Selection ====================
# Options: zhipuai (cloud, default), ollama (local service), huggingface (local model)
LLM_BACKEND=zhipuai
# ==================== Zhipu AI Configuration (Cloud) ====================
ZHIPUAI_API_KEY=your_api_key_here
ZHIPUAI_BASE_URL=https://open.bigmodel.cn/api/paas/v4/
ZHIPUAI_MODEL=glm-4-flash
# ==================== Ollama Configuration (Local Service) ====================
OLLAMA_HOST=localhost
OLLAMA_PORT=11434
OLLAMA_MODEL=qwen3:8b
# ==================== HuggingFace Configuration (Local Model) ====================
# Model ID (HuggingFace Hub) or local path
HF_MODEL=Qwen/Qwen2.5-1.5B-Instruct
# HF_MODEL=/path/to/local/model
HF_DEVICE=auto # cuda, cpu, auto
HF_QUANTIZE=none # 4bit, 8bit, none (quantization requires bitsandbytes)
# ==================== Usage Examples ====================
#
# Using Zhipu AI (default):
# python3 llm_analyzer.py
#
# Using Ollama:
# python3 llm_analyzer.py --backend ollama
#
# Using HuggingFace (auto-download):
# python3 llm_analyzer.py --backend huggingface --hf-model Qwen/Qwen2.5-1.5B-Instruct
#
# Using local model path:
# python3 llm_analyzer.py --backend huggingface --hf-model /path/to/model
#
# Using 4-bit quantization to save VRAM:
# pip install bitsandbytes
# python3 llm_analyzer.py --backend huggingface --hf-quantize 4bit