@@ -34,83 +34,15 @@ def execute_query(self, query: str, params: tuple = None) -> List[tuple]:
3434 return cursor .fetchall ()
3535
3636 def _configure_connection (self , conn ):
37- """Configure SQLite connection with enhanced Docker resilience and cross-platform compatibility"""
38- try :
39- conn .execute ('PRAGMA foreign_keys = ON' )
40-
41- # Try WAL mode first, fall back to DELETE mode if it fails
42- try :
43- conn .execute ('PRAGMA journal_mode = WAL' )
44- except Exception as wal_error :
45- logger .warning (f"WAL mode failed, using DELETE mode: { wal_error } " )
46- conn .execute ('PRAGMA journal_mode = DELETE' )
47-
48- # Detect if running on Synology NAS for performance optimizations
49- is_synology = self ._detect_synology_nas ()
50- synology_opt_enabled = os .environ .get ('HUNTARR_SYNOLOGY_OPTIMIZATIONS' , 'true' ).lower () == 'true'
51-
52- if is_synology and synology_opt_enabled :
53- logger .info ("Synology NAS detected - applying performance optimizations for network file systems" )
54- # Synology-optimized settings for network file system performance
55- conn .execute ('PRAGMA synchronous = NORMAL' ) # Much faster on NFS/CIFS, still safe with WAL
56- conn .execute ('PRAGMA cache_size = -40960' ) # 40MB cache for better performance on Synology
57- conn .execute ('PRAGMA temp_store = MEMORY' ) # Store temp tables in memory
58- conn .execute ('PRAGMA busy_timeout = 30000' ) # 30 seconds for Synology I/O
59- conn .execute ('PRAGMA mmap_size = 134217728' ) # 128MB memory map optimized for Synology
60- else :
61- if is_synology and not synology_opt_enabled :
62- logger .info ("Synology NAS detected but optimizations disabled via HUNTARR_SYNOLOGY_OPTIMIZATIONS=false" )
63- # Standard settings for Docker rebuild resilience and corruption prevention
64- conn .execute ('PRAGMA synchronous = FULL' ) # Maximum durability for Docker environments
65- conn .execute ('PRAGMA cache_size = -32000' ) # 32MB cache for better performance
66- conn .execute ('PRAGMA temp_store = MEMORY' ) # Store temp tables in memory
67- conn .execute ('PRAGMA busy_timeout = 60000' ) # 60 seconds for Docker I/O delays
68-
69- # Skip mmap on Windows if it causes issues, but use it elsewhere for performance
70- import platform
71- if platform .system () != "Windows" :
72- conn .execute ('PRAGMA mmap_size = 268435456' ) # 256MB memory map
73-
74- # Common settings for all platforms
75- conn .execute ('PRAGMA auto_vacuum = INCREMENTAL' ) # Incremental vacuum for maintenance
76- conn .execute ('PRAGMA secure_delete = ON' ) # Secure deletion to prevent data recovery
77-
78- # Additional corruption prevention measures
79- conn .execute ('PRAGMA cell_size_check = ON' ) # Enable cell size validation
80- conn .execute ('PRAGMA integrity_check' ) # Verify database integrity on connection
81- conn .execute ('PRAGMA optimize' ) # Optimize statistics and indexes
82- conn .execute ('PRAGMA application_id = 1751013204' ) # Set unique application ID for Huntarr
83-
84- # Enhanced checkpoint settings for WAL mode
85- result = conn .execute ('PRAGMA journal_mode' ).fetchone ()
86- if result and result [0 ] == 'wal' :
87- if is_synology and synology_opt_enabled :
88- # More aggressive checkpointing for Synology
89- conn .execute ('PRAGMA wal_autocheckpoint = 1000' ) # Less frequent checkpoints for performance
90- conn .execute ('PRAGMA journal_size_limit = 134217728' ) # 128MB journal size limit
91- else :
92- # Conservative checkpointing for other systems
93- conn .execute ('PRAGMA wal_autocheckpoint = 500' ) # More frequent checkpoints for Docker
94- conn .execute ('PRAGMA journal_size_limit = 67108864' ) # 64MB journal size limit
95- conn .execute ('PRAGMA wal_checkpoint(TRUNCATE)' ) # Force checkpoint and truncate
96-
97- # Test the configuration worked
98- integrity_result = conn .execute ('PRAGMA integrity_check' ).fetchone ()
99- if integrity_result and integrity_result [0 ] != 'ok' :
100- logger .error (f"Database integrity check failed: { integrity_result } " )
101- raise sqlite3 .DatabaseError (f"Database integrity compromised: { integrity_result } " )
102-
103- # Log optimization status for monitoring
104- if is_synology :
105- sync_mode = conn .execute ('PRAGMA synchronous' ).fetchone ()[0 ]
106- cache_size = conn .execute ('PRAGMA cache_size' ).fetchone ()[0 ]
107- mmap_size = conn .execute ('PRAGMA mmap_size' ).fetchone ()[0 ]
108- logger .info (f"Synology optimizations applied: sync={ sync_mode } , cache={ abs (cache_size / 1024 ):.1f} MB, mmap={ mmap_size / 1024 / 1024 :.0f} MB" )
109-
110- except Exception as e :
111- logger .error (f"Error configuring database connection: { e } " )
112- # Continue with basic connection if configuration fails
113- pass
37+ """Configure SQLite connection with Synology NAS compatible settings"""
38+ conn .execute ('PRAGMA foreign_keys = ON' )
39+ conn .execute ('PRAGMA journal_mode = WAL' )
40+ conn .execute ('PRAGMA synchronous = NORMAL' )
41+ conn .execute ('PRAGMA cache_size = 10000' )
42+ conn .execute ('PRAGMA temp_store = MEMORY' )
43+ conn .execute ('PRAGMA mmap_size = 268435456' )
44+ conn .execute ('PRAGMA wal_autocheckpoint = 1000' )
45+ conn .execute ('PRAGMA busy_timeout = 30000' )
11446
11547 def get_connection (self ):
11648 """Get a configured SQLite connection with Synology NAS compatibility"""
@@ -162,81 +94,7 @@ def _get_database_path(self) -> Path:
16294 data_dir .mkdir (parents = True , exist_ok = True )
16395 return data_dir / "huntarr.db"
16496
165- def _detect_synology_nas (self ):
166- """
167- Detect if running on Synology NAS using multiple reliable methods.
168- Returns True if Synology is detected, False otherwise.
169- """
170- try :
171- # Method 1: Check for Synology-specific files and directories
172- synology_indicators = [
173- '/usr/syno' , # Synology system directory
174- '/etc/synoinfo.conf' , # Synology configuration file
175- '/proc/sys/kernel/syno_hw_version' , # Synology hardware version
176- '/usr/bin/synopkg' , # Synology package manager
177- '/var/services' # Synology services directory
178- ]
179-
180- for indicator in synology_indicators :
181- if os .path .exists (indicator ):
182- logger .debug (f"Synology detected via: { indicator } " )
183- return True
184-
185- # Method 2: Check environment variables set by Synology
186- synology_env_vars = [
187- 'SYNOPKG_PKGNAME' ,
188- 'SYNOPKG_PKGVER' ,
189- 'SYNO_USER' ,
190- 'SYNO_GROUP'
191- ]
192-
193- for env_var in synology_env_vars :
194- if os .environ .get (env_var ):
195- logger .debug (f"Synology detected via environment variable: { env_var } " )
196- return True
197-
198- # Method 3: Check system information files for Synology
199- info_files = [
200- ('/etc/os-release' , 'synology' ),
201- ('/proc/version' , 'synology' ),
202- ('/etc/issue' , 'synology' )
203- ]
204-
205- for file_path , keyword in info_files :
206- try :
207- if os .path .exists (file_path ):
208- with open (file_path , 'r' ) as f :
209- content = f .read ().lower ()
210- if keyword in content :
211- logger .debug (f"Synology detected in { file_path } " )
212- return True
213- except (IOError , OSError ):
214- continue
215-
216- # Method 4: Check for Synology-specific hostname patterns
217- try :
218- hostname = os .uname ().nodename .lower ()
219- if 'diskstation' in hostname or 'rackstation' in hostname :
220- logger .debug (f"Synology detected via hostname: { hostname } " )
221- return True
222- except (AttributeError , OSError ):
223- pass
224-
225- # Method 5: Check for Synology Docker environment
226- try :
227- with open ('/proc/1/cgroup' , 'r' ) as f :
228- cgroup_content = f .read ()
229- if 'synology' in cgroup_content .lower ():
230- logger .debug ("Synology detected via Docker cgroup" )
231- return True
232- except (IOError , OSError ):
233- pass
234-
235- return False
236-
237- except Exception as e :
238- logger .debug (f"Error during Synology detection: { e } " )
239- return False
97+
24098
24199 def _handle_database_corruption (self ):
242100 """Handle database corruption by creating backup and starting fresh"""
0 commit comments