@@ -18,7 +18,7 @@ docker run -d \
1818 -v $( pwd) /translated_files:/app/translated_files \
1919 -v $( pwd) /logs:/app/logs \
2020 -e API_ENDPOINT=http://host.docker.internal:11434/api/generate \
21- -e DEFAULT_MODEL=mistral-small:24b \
21+ -e DEFAULT_MODEL=qwen3:14b \
2222 ghcr.io/bropro/translatebookwithllm:latest
2323```
2424
@@ -44,7 +44,7 @@ Edit the `.env` file to configure your LLM settings:
4444
4545``` env
4646API_ENDPOINT=http://host.docker.internal:11434/api/generate
47- DEFAULT_MODEL=mistral-small:24b
47+ DEFAULT_MODEL=qwen3:14b
4848LLM_PROVIDER=ollama
4949PORT=5000
5050OLLAMA_NUM_CTX=2048
@@ -95,7 +95,7 @@ Docker automatically pulls the correct architecture for your system.
9595| Variable | Description | Default |
9696| ----------| -------------| ---------|
9797| ` API_ENDPOINT ` | LLM API endpoint | ` http://localhost:11434/api/generate ` |
98- | ` DEFAULT_MODEL ` | Default LLM model | ` mistral-small:24b ` |
98+ | ` DEFAULT_MODEL ` | Default LLM model | ` qwen3:14b ` |
9999| ` LLM_PROVIDER ` | Provider (ollama/gemini/openai) | ` ollama ` |
100100| ` GEMINI_API_KEY ` | Gemini API key | - |
101101| ` OPENAI_API_KEY ` | OpenAI API key | - |
@@ -132,7 +132,7 @@ chmod 755 translated_files logs data
132132docker run -d \
133133 -p 5000:5000 \
134134 -e API_ENDPOINT=http://host.docker.internal:11434/api/generate \
135- -e DEFAULT_MODEL=mistral-small:24b \
135+ -e DEFAULT_MODEL=qwen3:14b \
136136 ghcr.io/bropro/translatebookwithllm:latest
137137```
138138
@@ -157,7 +157,7 @@ services:
157157 - " 5000:5000"
158158 environment :
159159 - API_ENDPOINT=http://ollama:11434/api/generate
160- - DEFAULT_MODEL=mistral-small:24b
160+ - DEFAULT_MODEL=qwen3:14b
161161 depends_on :
162162 - ollama
163163
0 commit comments