-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.example.toml
More file actions
50 lines (39 loc) · 1.24 KB
/
config.example.toml
File metadata and controls
50 lines (39 loc) · 1.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# Axion Configuration File Example
# Copy this file to config.toml and customize as needed
[server]
# Server host and port
host = "0.0.0.0"
port = 3000
[model]
# Model to serve (can be any HuggingFace model or local path)
# Examples:
# - "meta-llama/Llama-3.2-3B-Instruct"
# - "mistralai/Mistral-7B-Instruct-v0.2"
# - "unsloth/gemma-3-270m-it"
# - "Qwen/Qwen2.5-7B-Instruct"
name = "meta-llama/Llama-3.2-3B-Instruct"
# Model loading timeout in seconds (for MAX cold starts)
# Increase this if your model takes longer to load
max_startup_timeout = 120
[cache]
# Enable request caching (speeds up repeated requests)
enabled = true
# Maximum number of cached entries (LRU eviction)
max_entries = 1000
[batching]
# Enable continuous batching (improves throughput)
enabled = true
# Maximum batch size (number of requests processed together)
max_batch_size = 8
# Maximum wait time in milliseconds before processing batch
max_wait_ms = 50
[streaming]
# Enable streaming by default for chat completions
# Set to false if you prefer non-streaming responses
default = true
[logging]
# Log level: trace, debug, info, warn, error
level = "info"
# Additional module log levels
# Format: "module=level,another_module=level"
modules = "axion=info,tower_http=info"