Skip to content

Commit f99f696

Browse files
committed
.
1 parent 72a4915 commit f99f696

File tree

8 files changed

+163
-7
lines changed

8 files changed

+163
-7
lines changed

.env.example

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,25 @@
11
# Translation API Configuration
2+
# Legacy API endpoint (deprecated, kept for backward compatibility)
3+
# Use provider-specific endpoints below instead
24
API_ENDPOINT=http://localhost:11434/api/generate
5+
6+
# Provider-specific API endpoints (recommended)
7+
# Ollama local server endpoint
8+
OLLAMA_API_ENDPOINT=http://localhost:11434/api/generate
9+
# OpenAI-compatible endpoint (OpenAI, LM Studio, llama.cpp, etc.)
10+
OPENAI_API_ENDPOINT=https://api.openai.com/v1/chat/completions
11+
312
DEFAULT_MODEL=qwen3:14b
413

514
# Server Configuration
615
PORT=5000 # Port for the web interface
716
HOST=127.0.0.1 # Server host (127.0.0.1 for localhost only, 0.0.0.0 for all network interfaces)
817
OUTPUT_DIR=translated_files # Directory for translated output files
918

19+
# Output filename pattern (naming convention)
20+
# Use {originalName}, {targetLang}, {sourceLang}, {model}, {ext} as placeholders
21+
OUTPUT_FILENAME_PATTERN={originalName} ({targetLang}).{ext}
22+
1023
# LLM Provider Settings
1124
# Options: ollama, poe, gemini, openai, openrouter, mistral, deepseek
1225
LLM_PROVIDER=ollama

src/api/blueprints/config_routes.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ def get_config_path():
2626

2727
from src.config import (
2828
API_ENDPOINT as DEFAULT_OLLAMA_API_ENDPOINT,
29+
OLLAMA_API_ENDPOINT,
30+
OPENAI_API_ENDPOINT,
2931
DEFAULT_MODEL,
3032
REQUEST_TIMEOUT,
3133
OLLAMA_NUM_CTX,
@@ -44,7 +46,8 @@ def get_config_path():
4446
DEEPSEEK_MODEL,
4547
POE_API_KEY,
4648
POE_MODEL,
47-
MAX_TOKENS_PER_CHUNK
49+
MAX_TOKENS_PER_CHUNK,
50+
OUTPUT_FILENAME_PATTERN
4851
)
4952

5053
# Setup logger for this module
@@ -139,6 +142,8 @@ def mask_api_key(key):
139142

140143
config_response = {
141144
"api_endpoint": DEFAULT_OLLAMA_API_ENDPOINT,
145+
"ollama_api_endpoint": OLLAMA_API_ENDPOINT,
146+
"openai_api_endpoint": OPENAI_API_ENDPOINT,
142147
"default_model": DEFAULT_MODEL,
143148
"timeout": REQUEST_TIMEOUT,
144149
"context_window": OLLAMA_NUM_CTX,
@@ -156,7 +161,8 @@ def mask_api_key(key):
156161
"openrouter_api_key_configured": bool(OPENROUTER_API_KEY),
157162
"mistral_api_key_configured": bool(MISTRAL_API_KEY),
158163
"deepseek_api_key_configured": bool(DEEPSEEK_API_KEY),
159-
"poe_api_key_configured": bool(POE_API_KEY)
164+
"poe_api_key_configured": bool(POE_API_KEY),
165+
"output_filename_pattern": OUTPUT_FILENAME_PATTERN
160166
}
161167

162168
return jsonify(config_response)
@@ -771,7 +777,10 @@ def save_settings():
771777
'POE_MODEL',
772778
'DEFAULT_MODEL',
773779
'LLM_PROVIDER',
774-
'API_ENDPOINT'
780+
'API_ENDPOINT',
781+
'OLLAMA_API_ENDPOINT',
782+
'OPENAI_API_ENDPOINT',
783+
'OUTPUT_FILENAME_PATTERN'
775784
}
776785

777786
try:
@@ -821,7 +830,9 @@ def get_settings():
821830
"poe_api_key_configured": bool(POE_API_KEY),
822831
"default_model": DEFAULT_MODEL or "",
823832
"llm_provider": os.getenv('LLM_PROVIDER', 'ollama'),
824-
"api_endpoint": DEFAULT_OLLAMA_API_ENDPOINT or ""
833+
"api_endpoint": DEFAULT_OLLAMA_API_ENDPOINT or "",
834+
"ollama_api_endpoint": OLLAMA_API_ENDPOINT or "",
835+
"openai_api_endpoint": OPENAI_API_ENDPOINT or ""
825836
})
826837

827838
return bp

src/config.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,12 @@
8686
_config_logger.debug(f"📁 Loaded .env from: {_env_file.absolute()}")
8787

8888
# Load from environment variables with defaults
89-
API_ENDPOINT = os.getenv('API_ENDPOINT', 'http://localhost:11434/api/generate')
89+
# Ollama endpoint configuration (provider-specific)
90+
OLLAMA_API_ENDPOINT = os.getenv('OLLAMA_API_ENDPOINT', 'http://localhost:11434/api/generate')
91+
# OpenAI-compatible endpoint configuration (for OpenAI, LM Studio, etc.)
92+
OPENAI_API_ENDPOINT = os.getenv('OPENAI_API_ENDPOINT', 'https://api.openai.com/v1/chat/completions')
93+
# Legacy API_ENDPOINT for backward compatibility (defaults to Ollama endpoint)
94+
API_ENDPOINT = os.getenv('API_ENDPOINT', OLLAMA_API_ENDPOINT)
9095
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'qwen3:14b')
9196
PORT = int(os.getenv('PORT', '5000'))
9297
REQUEST_TIMEOUT = int(os.getenv('REQUEST_TIMEOUT', '900'))
@@ -233,6 +238,10 @@
233238
HOST = os.getenv('HOST', '127.0.0.1')
234239
OUTPUT_DIR = os.getenv('OUTPUT_DIR', 'translated_files')
235240

241+
# Output filename pattern
242+
# Use {originalName}, {targetLang}, {sourceLang}, {model}, {ext} as placeholders
243+
OUTPUT_FILENAME_PATTERN = os.getenv('OUTPUT_FILENAME_PATTERN', '{originalName} ({targetLang}).{ext}')
244+
236245
# Debug mode (reload after .env is loaded)
237246
DEBUG_MODE = os.getenv('DEBUG_MODE', 'false').lower() == 'true'
238247

src/web/static/js/core/settings-manager.js

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ const LOCAL_SETTINGS = [
6161
'lastTargetLanguage',
6262
'lastApiEndpoint',
6363
'lastOpenaiEndpoint',
64+
'outputFilenamePattern',
6465
'ttsEnabled',
6566
'textCleanup',
6667
'refineTranslation',
@@ -150,6 +151,8 @@ export const SettingsManager = {
150151
// API endpoints
151152
{ id: 'apiEndpoint', event: 'change' },
152153
{ id: 'openaiEndpoint', event: 'change' },
154+
// Output filename pattern
155+
{ id: 'outputFilenamePattern', event: 'change' },
153156
// API keys (save to .env)
154157
{ id: 'geminiApiKey', event: 'change' },
155158
{ id: 'openaiApiKey', event: 'change' },
@@ -299,6 +302,11 @@ export const SettingsManager = {
299302
DomHelpers.setValue('openaiEndpoint', prefs.lastOpenaiEndpoint);
300303
}
301304

305+
// Apply output filename pattern (naming convention)
306+
if (prefs.outputFilenamePattern) {
307+
DomHelpers.setValue('outputFilenamePattern', prefs.outputFilenamePattern);
308+
}
309+
302310
// Apply last provider AFTER endpoints are set
303311
// This triggers model loading with the correct endpoint
304312
if (prefs.lastProvider) {
@@ -412,6 +420,7 @@ export const SettingsManager = {
412420
lastTargetLanguage: this._getLanguageValue('targetLang', 'customTargetLang'),
413421
lastApiEndpoint: DomHelpers.getValue('apiEndpoint'),
414422
lastOpenaiEndpoint: DomHelpers.getValue('openaiEndpoint'),
423+
outputFilenamePattern: DomHelpers.getValue('outputFilenamePattern'),
415424
ttsEnabled: ttsEnabledCheckbox ? ttsEnabledCheckbox.checked : false,
416425
textCleanup: textCleanupCheckbox ? textCleanupCheckbox.checked : false,
417426
refineTranslation: refineTranslationCheckbox ? refineTranslationCheckbox.checked : false,
@@ -502,6 +511,22 @@ export const SettingsManager = {
502511
if (key) envSettings['POE_API_KEY'] = key;
503512
}
504513

514+
// Save endpoints to .env
515+
const ollamaEndpoint = DomHelpers.getValue('apiEndpoint');
516+
const openaiEndpoint = DomHelpers.getValue('openaiEndpoint');
517+
if (ollamaEndpoint) {
518+
envSettings['OLLAMA_API_ENDPOINT'] = ollamaEndpoint;
519+
}
520+
if (openaiEndpoint) {
521+
envSettings['OPENAI_API_ENDPOINT'] = openaiEndpoint;
522+
}
523+
524+
// Save output filename pattern (naming convention)
525+
const filenamePattern = DomHelpers.getValue('outputFilenamePattern');
526+
if (filenamePattern) {
527+
envSettings['OUTPUT_FILENAME_PATTERN'] = filenamePattern;
528+
}
529+
505530
// Also save provider and model as defaults
506531
envSettings['LLM_PROVIDER'] = provider;
507532
const model = DomHelpers.getValue('model');

src/web/static/js/index.js

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,20 @@ window.loadResumableJobs = ResumeManager.loadResumableJobs.bind(ResumeManager);
476476
// Provider Manager
477477
window.refreshModels = ProviderManager.refreshModels.bind(ProviderManager);
478478

479+
// Settings Manager
480+
window.saveSettings = async () => {
481+
const result = await SettingsManager.saveAllSettings(true);
482+
if (result.success && result.savedToEnv && result.savedToEnv.length > 0) {
483+
MessageLogger.showMessage(`✅ Settings saved: ${result.savedToEnv.join(', ')}`, 'success');
484+
MessageLogger.addLog(`💾 Saved to .env: ${result.savedToEnv.join(', ')}`);
485+
} else if (result.success) {
486+
MessageLogger.showMessage('✅ Preferences saved', 'success');
487+
} else {
488+
MessageLogger.showMessage(`❌ Failed to save: ${result.error}`, 'error');
489+
}
490+
return result;
491+
};
492+
479493
// Message Logger
480494
window.clearActivityLog = MessageLogger.clearLog.bind(MessageLogger);
481495

src/web/static/js/ui/form-manager.js

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,10 +353,22 @@ export const FormManager = {
353353
// Pass empty string as default - setDefaultLanguage will keep existing "Other" selection
354354
setDefaultLanguage('sourceLang', 'customSourceLang', '')
355355

356-
// Set other configuration values
357-
if (config.api_endpoint) {
356+
// Set provider-specific API endpoints
357+
// Ollama endpoint (for Ollama provider)
358+
if (config.ollama_api_endpoint) {
359+
DomHelpers.setValue('apiEndpoint', config.ollama_api_endpoint);
360+
} else if (config.api_endpoint) {
361+
// Fallback to legacy api_endpoint for backward compatibility
358362
DomHelpers.setValue('apiEndpoint', config.api_endpoint);
359363
}
364+
// OpenAI endpoint (for OpenAI-compatible providers like OpenAI, LM Studio)
365+
if (config.openai_api_endpoint) {
366+
DomHelpers.setValue('openaiEndpoint', config.openai_api_endpoint);
367+
}
368+
// Output filename pattern (naming convention)
369+
if (config.output_filename_pattern) {
370+
DomHelpers.setValue('outputFilenamePattern', config.output_filename_pattern);
371+
}
360372
// Handle API keys - show indicator if configured in .env, otherwise keep placeholder
361373
ApiKeyUtils.setupField('geminiApiKey', config.gemini_api_key_configured, config.gemini_api_key);
362374
ApiKeyUtils.setupField('openaiApiKey', config.openai_api_key_configured, config.openai_api_key);

src/web/templates/translation_interface.html

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,17 @@ <h3>Drop files to translate</h3>
316316
</div>
317317
<small style="color: var(--text-muted-light); font-size: 0.75rem; margin-top: 0.5rem; display: block;">Placeholders: {originalName}, {targetLang}, {ext} — Example: Book (French).epub</small>
318318
</div>
319+
320+
<!-- Save Settings Button -->
321+
<div class="form-group" style="margin-top: 20px; padding-top: 15px; border-top: 1px solid var(--border-color);">
322+
<button type="button" id="saveSettingsBtn" class="btn btn-primary" style="width: 100%;" onclick="saveSettings()">
323+
<span class="material-symbols-outlined">save</span>
324+
Save Settings to .env
325+
</button>
326+
<p style="margin: 8px 0 0 0; font-size: 0.8125rem; color: var(--text-muted-light);">
327+
Saves API keys, endpoints, naming convention, and current model as default values in the <code>.env</code> file.
328+
</p>
329+
</div>
319330
</div>
320331

321332
<!-- Options (Collapsible) -->

test_openai_server.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
#!/usr/bin/env python3
2+
"""Test rapide du serveur OpenAI compatible"""
3+
import requests
4+
import json
5+
6+
# Configuration
7+
ENDPOINT = "http://ai_server.mds.com/v1/chat/completions"
8+
API_KEY = "" # Laissez vide si pas de clé requise
9+
MODEL = "gpt-3.5-turbo" # Adaptez selon votre serveur
10+
11+
headers = {
12+
"Content-Type": "application/json"
13+
}
14+
if API_KEY:
15+
headers["Authorization"] = f"Bearer {API_KEY}"
16+
17+
# Test 1: Liste des modèles
18+
print("=" * 50)
19+
print("Test 1: Récupération des modèles")
20+
print("=" * 50)
21+
try:
22+
resp = requests.get(ENDPOINT.replace('/chat/completions', '/models'),
23+
headers=headers, timeout=10)
24+
print(f"Status: {resp.status_code}")
25+
if resp.status_code == 200:
26+
models = resp.json()
27+
print(f"✅ Serveur accessible!")
28+
print(f"Modèles disponibles: {len(models.get('data', []))}")
29+
for m in models.get('data', [])[:5]:
30+
print(f" - {m.get('id')}")
31+
else:
32+
print(f"❌ Erreur: {resp.text}")
33+
except Exception as e:
34+
print(f"❌ Erreur de connexion: {e}")
35+
36+
# Test 2: Requête de chat simple
37+
print()
38+
print("=" * 50)
39+
print("Test 2: Requête de chat")
40+
print("=" * 50)
41+
payload = {
42+
"model": MODEL,
43+
"messages": [
44+
{"role": "system", "content": "Vous êtes un assistant utile."},
45+
{"role": "user", "content": "Dites 'Test réussi!' en français."}
46+
],
47+
"temperature": 0.7
48+
}
49+
50+
try:
51+
resp = requests.post(ENDPOINT, headers=headers, json=payload, timeout=30)
52+
print(f"Status: {resp.status_code}")
53+
if resp.status_code == 200:
54+
result = resp.json()
55+
message = result['choices'][0]['message']['content']
56+
print(f"✅ Réponse reçue!")
57+
print(f"Réponse: {message}")
58+
else:
59+
print(f"❌ Erreur: {resp.text}")
60+
except Exception as e:
61+
print(f"❌ Erreur: {e}")

0 commit comments

Comments
 (0)