Skip to content

Commit 5c3809c

Browse files
committed
chore: Update default LLM model from mistral-small:24b to qwen3:14b
1 parent 5eea568 commit 5c3809c

File tree

9 files changed

+15
-15
lines changed

9 files changed

+15
-15
lines changed

.env.example

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Translation API Configuration
22
API_ENDPOINT=http://localhost:11434/api/generate
3-
DEFAULT_MODEL=mistral-small:24b
3+
DEFAULT_MODEL=qwen3:14b
44

55
# Server Configuration
66
PORT=5000 # Port for the web interface

DOCKER.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ docker run -d \
1818
-v $(pwd)/translated_files:/app/translated_files \
1919
-v $(pwd)/logs:/app/logs \
2020
-e API_ENDPOINT=http://host.docker.internal:11434/api/generate \
21-
-e DEFAULT_MODEL=mistral-small:24b \
21+
-e DEFAULT_MODEL=qwen3:14b \
2222
ghcr.io/bropro/translatebookwithllm:latest
2323
```
2424

@@ -44,7 +44,7 @@ Edit the `.env` file to configure your LLM settings:
4444

4545
```env
4646
API_ENDPOINT=http://host.docker.internal:11434/api/generate
47-
DEFAULT_MODEL=mistral-small:24b
47+
DEFAULT_MODEL=qwen3:14b
4848
LLM_PROVIDER=ollama
4949
PORT=5000
5050
OLLAMA_NUM_CTX=2048
@@ -95,7 +95,7 @@ Docker automatically pulls the correct architecture for your system.
9595
| Variable | Description | Default |
9696
|----------|-------------|---------|
9797
| `API_ENDPOINT` | LLM API endpoint | `http://localhost:11434/api/generate` |
98-
| `DEFAULT_MODEL` | Default LLM model | `mistral-small:24b` |
98+
| `DEFAULT_MODEL` | Default LLM model | `qwen3:14b` |
9999
| `LLM_PROVIDER` | Provider (ollama/gemini/openai) | `ollama` |
100100
| `GEMINI_API_KEY` | Gemini API key | - |
101101
| `OPENAI_API_KEY` | OpenAI API key | - |
@@ -132,7 +132,7 @@ chmod 755 translated_files logs data
132132
docker run -d \
133133
-p 5000:5000 \
134134
-e API_ENDPOINT=http://host.docker.internal:11434/api/generate \
135-
-e DEFAULT_MODEL=mistral-small:24b \
135+
-e DEFAULT_MODEL=qwen3:14b \
136136
ghcr.io/bropro/translatebookwithllm:latest
137137
```
138138

@@ -157,7 +157,7 @@ services:
157157
- "5000:5000"
158158
environment:
159159
- API_ENDPOINT=http://ollama:11434/api/generate
160-
- DEFAULT_MODEL=mistral-small:24b
160+
- DEFAULT_MODEL=qwen3:14b
161161
depends_on:
162162
- ollama
163163

deployment/.env.docker.example

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ LLM_PROVIDER=ollama
2222
API_ENDPOINT=http://host.docker.internal:11434/api/generate
2323

2424
# Model to use (must be installed in your local Ollama)
25-
DEFAULT_MODEL=mistral-small:24b
25+
DEFAULT_MODEL=qwen3:14b
2626

2727
# Context window size (8192 recommended for chunk_size=25)
2828
OLLAMA_NUM_CTX=8192

deployment/TESTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ docker-compose logs
252252
# Linux:
253253
# API_ENDPOINT=http://172.17.0.1:11434/api/generate
254254
255-
DEFAULT_MODEL=mistral-small:24b
255+
DEFAULT_MODEL=qwen3:14b
256256
```
257257

258258
3. Verify Ollama is accessible from container:

deployment/docker-compose.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ services:
2323
# Use host.docker.internal to access host machine from Docker on Windows/Mac
2424
# On Linux, use --network host or the host's IP address
2525
- API_ENDPOINT=${API_ENDPOINT:-http://host.docker.internal:11434/api/generate}
26-
- DEFAULT_MODEL=${DEFAULT_MODEL:-mistral-small:24b}
26+
- DEFAULT_MODEL=${DEFAULT_MODEL:-qwen3:14b}
2727
- OLLAMA_NUM_CTX=${OLLAMA_NUM_CTX:-8192}
2828

2929
# Gemini Configuration (if using Gemini provider)

src/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
print("🔧 DEFAULT SETTINGS BEING USED:")
3535
print(f" • API Endpoint: http://localhost:11434/api/generate")
3636
print(f" • LLM Provider: ollama")
37-
print(f" • Model: mistral-small:24b")
37+
print(f" • Model: qwen3:14b")
3838
print(f" • Port: 5000")
3939
print(f"\n💡 TIP: If using a remote server or different provider, you MUST")
4040
print(f" create a .env file with the correct settings.\n")
@@ -55,7 +55,7 @@
5555

5656
# Load from environment variables with defaults
5757
API_ENDPOINT = os.getenv('API_ENDPOINT', 'http://localhost:11434/api/generate')
58-
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'mistral-small:24b')
58+
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'qwen3:14b')
5959
PORT = int(os.getenv('PORT', '5000'))
6060
MAIN_LINES_PER_CHUNK = int(os.getenv('MAIN_LINES_PER_CHUNK', '25'))
6161
REQUEST_TIMEOUT = int(os.getenv('REQUEST_TIMEOUT', '900'))

src/core/context_optimizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def get_max_model_context(model_name: str) -> int:
158158
Get maximum context size for a model based on its family.
159159
160160
Args:
161-
model_name: Name of the model (e.g., "mistral-small:24b")
161+
model_name: Name of the model (e.g., "qwen3:14b")
162162
163163
Returns:
164164
Maximum context size in tokens

src/utils/env_helper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def validate_env_config(verbose: bool = True) -> dict:
6161
# Check critical configuration
6262
api_endpoint = os.getenv('API_ENDPOINT', 'http://localhost:11434/api/generate')
6363
llm_provider = os.getenv('LLM_PROVIDER', 'ollama')
64-
default_model = os.getenv('DEFAULT_MODEL', 'mistral-small:24b')
64+
default_model = os.getenv('DEFAULT_MODEL', 'qwen3:14b')
6565
gemini_key = os.getenv('GEMINI_API_KEY', '')
6666
openai_key = os.getenv('OPENAI_API_KEY', '')
6767

@@ -142,7 +142,7 @@ def interactive_env_setup():
142142

143143
if config['LLM_PROVIDER'] == 'ollama':
144144
config['API_ENDPOINT'] = input(" Ollama API endpoint [http://localhost:11434/api/generate]: ").strip() or 'http://localhost:11434/api/generate'
145-
config['DEFAULT_MODEL'] = input(" Model name [mistral-small:24b]: ").strip() or 'mistral-small:24b'
145+
config['DEFAULT_MODEL'] = input(" Model name [qwen3:14b]: ").strip() or 'qwen3:14b'
146146

147147
elif config['LLM_PROVIDER'] == 'gemini':
148148
config['GEMINI_API_KEY'] = input(" Gemini API Key: ").strip()

src/web/static/js/providers/model-detector.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { DomHelpers } from '../ui/dom-helpers.js';
99

1010
/**
1111
* Extract parameter size from model name
12-
* @param {string} modelName - Model name (e.g., "mistral-small:7b", "llama-12b")
12+
* @param {string} modelName - Model name (e.g., "qwen3:14b", "llama-12b")
1313
* @returns {number|null} Size in billions of parameters, or null if not detected
1414
*/
1515
function extractModelSize(modelName) {

0 commit comments

Comments
 (0)