Skip to content

Commit 0798f6c

Browse files
hydropixclaude
andcommitted
feat: Add DEBUG_MODE for troubleshooting configuration issues
- Add DEBUG_MODE option in .env to enable verbose logging - Log .env file loading status and working directory - Log all loaded configuration values at startup - Add detailed logging for /api/config and /api/models endpoints - Test Ollama connection at server startup - Increase Ollama timeout from 5s to 10s - Distinguish connection errors (ConnectionError, Timeout, etc.) Helps diagnose issues like #84 where models or languages are not detected. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent 912d3af commit 0798f6c

File tree

4 files changed

+133
-6
lines changed

4 files changed

+133
-6
lines changed

.env.example

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,4 +42,10 @@ SRT_MAX_CHARS_PER_BLOCK=500
4242
# This adds a discrete attribution to your translations (metadata for EPUB, footer for TXT, comment for SRT)
4343
# Please consider keeping this enabled to support the project and help others discover this free tool!
4444
# The signature is non-intrusive and placed at the end of files. Thank you for your support!
45-
SIGNATURE_ENABLED=true
45+
SIGNATURE_ENABLED=true
46+
47+
# Debug Mode
48+
# Enable verbose logging for troubleshooting configuration and connection issues.
49+
# Set to 'true' to see detailed logs about .env loading, API calls, and configuration values.
50+
# Useful when models are not detected or languages are not applied correctly.
51+
DEBUG_MODE=false

src/api/blueprints/config_routes.py

Lines changed: 54 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
"""
44
import os
55
import asyncio
6+
import logging
67
import requests
78
from flask import Blueprint, request, jsonify, send_from_directory
89

@@ -15,9 +16,15 @@
1516
MAX_TRANSLATION_ATTEMPTS,
1617
RETRY_DELAY_SECONDS,
1718
DEFAULT_SOURCE_LANGUAGE,
18-
DEFAULT_TARGET_LANGUAGE
19+
DEFAULT_TARGET_LANGUAGE,
20+
DEBUG_MODE
1921
)
2022

23+
# Setup logger for this module
24+
logger = logging.getLogger('config_routes')
25+
if DEBUG_MODE:
26+
logger.setLevel(logging.DEBUG)
27+
2128

2229
def create_config_blueprint():
2330
"""Create and configure the config blueprint"""
@@ -57,7 +64,7 @@ def get_default_config():
5764
"""Get default configuration values"""
5865
gemini_api_key = os.getenv('GEMINI_API_KEY', '')
5966

60-
return jsonify({
67+
config_response = {
6168
"api_endpoint": DEFAULT_OLLAMA_API_ENDPOINT,
6269
"default_model": DEFAULT_MODEL,
6370
"chunk_size": MAIN_LINES_PER_CHUNK,
@@ -69,7 +76,16 @@ def get_default_config():
6976
"gemini_api_key": gemini_api_key,
7077
"default_source_language": DEFAULT_SOURCE_LANGUAGE,
7178
"default_target_language": DEFAULT_TARGET_LANGUAGE
72-
})
79+
}
80+
81+
if DEBUG_MODE:
82+
logger.debug(f"📤 /api/config response:")
83+
logger.debug(f" default_source_language: {DEFAULT_SOURCE_LANGUAGE}")
84+
logger.debug(f" default_target_language: {DEFAULT_TARGET_LANGUAGE}")
85+
logger.debug(f" api_endpoint: {DEFAULT_OLLAMA_API_ENDPOINT}")
86+
logger.debug(f" default_model: {DEFAULT_MODEL}")
87+
88+
return jsonify(config_response)
7389

7490
def _get_gemini_models():
7591
"""Get available models from Gemini API"""
@@ -124,25 +140,59 @@ def _get_ollama_models():
124140
"""Get available models from Ollama API"""
125141
ollama_base_from_ui = request.args.get('api_endpoint', DEFAULT_OLLAMA_API_ENDPOINT)
126142

143+
if DEBUG_MODE:
144+
logger.debug(f"📥 /api/models request for Ollama")
145+
logger.debug(f" api_endpoint from UI: {ollama_base_from_ui}")
146+
logger.debug(f" default endpoint: {DEFAULT_OLLAMA_API_ENDPOINT}")
147+
127148
try:
128149
base_url = ollama_base_from_ui.split('/api/')[0]
129150
tags_url = f"{base_url}/api/tags"
130-
response = requests.get(tags_url, timeout=5)
151+
152+
if DEBUG_MODE:
153+
logger.debug(f" Connecting to: {tags_url}")
154+
155+
response = requests.get(tags_url, timeout=10) # Increased timeout from 5 to 10
156+
157+
if DEBUG_MODE:
158+
logger.debug(f" Response status: {response.status_code}")
131159

132160
if response.status_code == 200:
133161
data = response.json()
134162
models_data = data.get('models', [])
135163
model_names = [m.get('name') for m in models_data if m.get('name')]
136164

165+
if DEBUG_MODE:
166+
logger.debug(f" Models found: {model_names}")
167+
137168
return jsonify({
138169
"models": model_names,
139170
"default": DEFAULT_MODEL if DEFAULT_MODEL in model_names else (model_names[0] if model_names else DEFAULT_MODEL),
140171
"status": "ollama_connected",
141172
"count": len(model_names)
142173
})
174+
else:
175+
if DEBUG_MODE:
176+
logger.debug(f" ❌ Non-200 response: {response.status_code}")
177+
logger.debug(f" Response body: {response.text[:500]}")
178+
179+
except requests.exceptions.ConnectionError as e:
180+
error_msg = f"Connection refused to {tags_url}. Is Ollama running?"
181+
if DEBUG_MODE:
182+
logger.debug(f" ❌ ConnectionError: {e}")
183+
print(f"❌ {error_msg}")
184+
except requests.exceptions.Timeout as e:
185+
error_msg = f"Timeout connecting to {tags_url} (10s)"
186+
if DEBUG_MODE:
187+
logger.debug(f" ❌ Timeout: {e}")
188+
print(f"❌ {error_msg}")
143189
except requests.exceptions.RequestException as e:
190+
if DEBUG_MODE:
191+
logger.debug(f" ❌ RequestException: {type(e).__name__}: {e}")
144192
print(f"❌ Could not connect to Ollama at {ollama_base_from_ui}: {e}")
145193
except Exception as e:
194+
if DEBUG_MODE:
195+
logger.debug(f" ❌ Unexpected error: {type(e).__name__}: {e}")
146196
print(f"❌ Error retrieving models from {ollama_base_from_ui}: {e}")
147197

148198
return jsonify({

src/config.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,32 @@
33
"""
44
import os
55
import sys
6+
import logging
67
from pathlib import Path
78
from dataclasses import dataclass
89
from typing import Optional
910
from dotenv import load_dotenv
1011

12+
# Setup debug logger for configuration
13+
_config_logger = logging.getLogger('config')
14+
15+
# Check for DEBUG_MODE early (before .env is loaded, check environment)
16+
_debug_mode = os.getenv('DEBUG_MODE', 'false').lower() == 'true'
17+
if _debug_mode:
18+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
19+
_config_logger.setLevel(logging.DEBUG)
20+
_config_logger.debug("🔍 DEBUG_MODE enabled - verbose logging active")
21+
1122
# Check if .env file exists and provide helpful guidance
1223
_env_file = Path('.env')
1324
_env_example = Path('.env.example')
1425
_env_exists = _env_file.exists()
26+
_cwd = Path.cwd()
27+
28+
if _debug_mode:
29+
_config_logger.debug(f"📁 Current working directory: {_cwd}")
30+
_config_logger.debug(f"📁 Looking for .env at: {_env_file.absolute()}")
31+
_config_logger.debug(f"📁 .env exists: {_env_exists}")
1532

1633
if not _env_exists:
1734
print("\n" + "="*70)
@@ -51,7 +68,9 @@
5168
sys.exit(0)
5269

5370
# Load .env file if it exists
54-
load_dotenv()
71+
_dotenv_result = load_dotenv()
72+
if _debug_mode:
73+
_config_logger.debug(f"📁 load_dotenv() returned: {_dotenv_result}")
5574

5675
# Load from environment variables with defaults
5776
API_ENDPOINT = os.getenv('API_ENDPOINT', 'http://localhost:11434/api/generate')
@@ -98,6 +117,28 @@
98117
HOST = os.getenv('HOST', '127.0.0.1')
99118
OUTPUT_DIR = os.getenv('OUTPUT_DIR', 'translated_files')
100119

120+
# Debug mode (reload after .env is loaded)
121+
DEBUG_MODE = os.getenv('DEBUG_MODE', 'false').lower() == 'true'
122+
123+
# Log loaded configuration in debug mode
124+
if DEBUG_MODE or _debug_mode:
125+
_config_logger.setLevel(logging.DEBUG)
126+
_config_logger.debug("="*60)
127+
_config_logger.debug("📋 LOADED CONFIGURATION VALUES:")
128+
_config_logger.debug("="*60)
129+
_config_logger.debug(f" API_ENDPOINT: {API_ENDPOINT}")
130+
_config_logger.debug(f" DEFAULT_MODEL: {DEFAULT_MODEL}")
131+
_config_logger.debug(f" LLM_PROVIDER: {LLM_PROVIDER}")
132+
_config_logger.debug(f" PORT: {PORT}")
133+
_config_logger.debug(f" HOST: {HOST}")
134+
_config_logger.debug(f" DEFAULT_SOURCE_LANGUAGE: {DEFAULT_SOURCE_LANGUAGE}")
135+
_config_logger.debug(f" DEFAULT_TARGET_LANGUAGE: {DEFAULT_TARGET_LANGUAGE}")
136+
_config_logger.debug(f" OLLAMA_NUM_CTX: {OLLAMA_NUM_CTX}")
137+
_config_logger.debug(f" REQUEST_TIMEOUT: {REQUEST_TIMEOUT}")
138+
_config_logger.debug(f" GEMINI_API_KEY: {'***' + GEMINI_API_KEY[-4:] if GEMINI_API_KEY else '(not set)'}")
139+
_config_logger.debug(f" OPENAI_API_KEY: {'***' + OPENAI_API_KEY[-4:] if OPENAI_API_KEY else '(not set)'}")
140+
_config_logger.debug("="*60)
141+
101142
# Translation tags - Improved for LLM clarity and reliability
102143
TRANSLATE_TAG_IN = "<TRANSLATION>"
103144
TRANSLATE_TAG_OUT = "</TRANSLATION>"

translation_api.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,31 @@ def restore_incomplete_jobs():
120120

121121
restore_incomplete_jobs()
122122

123+
def test_ollama_connection():
124+
"""Test Ollama connection at startup and log result"""
125+
import requests
126+
try:
127+
base_url = DEFAULT_OLLAMA_API_ENDPOINT.split('/api/')[0]
128+
tags_url = f"{base_url}/api/tags"
129+
logger.info(f"🔍 Testing Ollama connection at {tags_url}...")
130+
response = requests.get(tags_url, timeout=5)
131+
if response.status_code == 200:
132+
data = response.json()
133+
models = [m.get('name') for m in data.get('models', [])]
134+
logger.info(f"✅ Ollama connected! Found {len(models)} model(s): {models}")
135+
return True
136+
else:
137+
logger.warning(f"⚠️ Ollama returned status {response.status_code}")
138+
return False
139+
except requests.exceptions.ConnectionError:
140+
logger.warning(f"⚠️ Cannot connect to Ollama at {base_url}")
141+
logger.warning(f" Make sure Ollama is running ('ollama serve')")
142+
return False
143+
except Exception as e:
144+
logger.warning(f"⚠️ Ollama connection test failed: {e}")
145+
return False
146+
147+
123148
if __name__ == '__main__':
124149
# Validate configuration before starting
125150
validate_configuration()
@@ -132,6 +157,11 @@ def restore_incomplete_jobs():
132157
logger.info(f" - API: http://{HOST}:{PORT}/api/")
133158
logger.info(f" - Health Check: http://{HOST}:{PORT}/api/health")
134159
logger.info(f" - Supported formats: .txt, .epub, and .srt")
160+
logger.info("")
161+
162+
# Test Ollama connection at startup
163+
test_ollama_connection()
164+
135165
logger.info("")
136166
logger.info("💡 Press Ctrl+C to stop the server")
137167
logger.info("")

0 commit comments

Comments
 (0)