diff --git a/Dockerfile b/Dockerfile
index 77dc10c6..d42415b8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -25,6 +25,7 @@ RUN mkdir -p /config && chmod -R 755 /config
# Set environment variables
ENV PYTHONPATH=/app
ENV TZ=UTC
+ENV LOG_LEVEL=INFO
# ENV APP_TYPE=sonarr # APP_TYPE is likely managed via config now, remove if not needed
# Expose port
diff --git a/docker-compose.yml b/docker-compose.yml
index 3b2f6a7d..6719b0fe 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -11,6 +11,7 @@ services:
environment:
- TZ=${TZ:-UTC}
- BASE_URL=${BASE_URL:-}
+ - LOG_LEVEL=${LOG_LEVEL:-INFO}
restart: unless-stopped
# Graceful shutdown configuration
stop_signal: SIGTERM
diff --git a/docs/faq.html b/docs/faq.html
index 75d97299..bbae3303 100644
--- a/docs/faq.html
+++ b/docs/faq.html
@@ -105,6 +105,7 @@
Ins
-p 9705:9705 \
-v /path/to/huntarr/config:/config \
-e TZ=America/New_York \
+ -e LOG_LEVEL=INFO \
huntarr/huntarr:latest
@@ -325,6 +326,7 @@
Doc
- ./huntarr-config:/config
environment:
- TZ=America/New_York
+ - LOG_LEVEL=INFO
- PUID=1000
- PGID=1000
@@ -332,6 +334,27 @@ Doc
docker-compose up -d
+
+
+
How do I enable debug logging for troubleshooting?
+
+ Set the LOG_LEVEL environment variable to DEBUG:
+
+
# For Docker run:
+docker run ... -e LOG_LEVEL=DEBUG huntarr/huntarr:latest
+
+# For docker-compose, add to environment section:
+environment:
+ - LOG_LEVEL=DEBUG
+
+# Then restart the container:
+docker restart huntarr
+
+
+ Remember: Set LOG_LEVEL back to INFO after troubleshooting to reduce log verbosity.
+
+
+
diff --git a/docs/getting-started/installation.html b/docs/getting-started/installation.html
index 00c711d6..7e914e55 100644
--- a/docs/getting-started/installation.html
+++ b/docs/getting-started/installation.html
@@ -92,6 +92,7 @@ Opti
-p 9705:9705 \
-v /your-path/huntarr:/config \
-e TZ=America/New_York \
+ -e LOG_LEVEL=INFO \
huntarr/huntarr:latest
Option 2: GitHub Container Registry
@@ -100,6 +101,7 @@ Option
-p 9705:9705 \
-v /your-path/huntarr:/config \
-e TZ=America/New_York \
+ -e LOG_LEVEL=INFO \
ghcr.io/plexguide/huntarr:latest
To check on the status of the program:
@@ -119,7 +121,8 @@ Opti
volumes:
- /your-path/huntarr:/config
environment:
- - TZ=America/New_York
+ - TZ=America/New_York
+ - LOG_LEVEL=INFO
Option 2: GitHub Container Registry
services:
@@ -132,7 +135,8 @@ Option
volumes:
- /your-path/huntarr:/config
environment:
- - TZ=America/New_York
+ - TZ=America/New_York
+ - LOG_LEVEL=INFO
Then run:
docker-compose up -d huntarr
@@ -182,6 +186,7 @@ Dock
-p 9705:9705 \
-v /mnt/user/appdata/huntarr:/config \
-e TZ=America/New_York \
+ -e LOG_LEVEL=INFO \
huntarr/huntarr:latest
GitHub Container Registry
@@ -190,6 +195,7 @@ GitHub
-p 9705:9705 \
-v /mnt/user/appdata/huntarr:/config \
-e TZ=America/New_York \
+ -e LOG_LEVEL=INFO \
ghcr.io/plexguide/huntarr:latest
diff --git a/docs/settings/settings.html b/docs/settings/settings.html
index 01c65c6d..e48ec00d 100644
--- a/docs/settings/settings.html
+++ b/docs/settings/settings.html
@@ -62,6 +62,7 @@ Table of Contents
Debug Mode
Display Resources
Low Usage Mode
+ Log Level Configuration
Notifications
@@ -172,6 +173,27 @@
+ Controls the verbosity of Huntarr's logging output through the LOG_LEVEL environment variable.
+
+ Available log levels:
+
+ - DEBUG: Detailed information for troubleshooting issues
+ - INFO: General information about operations (default)
+ - WARNING: Important warnings that don't stop operation
+ - ERROR: Error messages for serious issues
+ - CRITICAL: Critical errors that may stop operation
+
+
+ Set this in your Docker environment:
+ -e LOG_LEVEL=DEBUG
+
+ Or in docker-compose.yml:
+ environment:
+ - LOG_LEVEL=DEBUG
+
+ Changes require a container restart to take effect. Use DEBUG level for troubleshooting, then return to INFO for normal operation to reduce log verbosity.
diff --git a/src/primary/apps/lidarr/missing.py b/src/primary/apps/lidarr/missing.py
index a2a4dc3b..6aadf9b2 100644
--- a/src/primary/apps/lidarr/missing.py
+++ b/src/primary/apps/lidarr/missing.py
@@ -61,7 +61,7 @@ def process_missing_albums(
# Make sure any requested stop function is executable
stop_check = stop_check if callable(stop_check) else lambda: False
- lidarr_logger.info(f"Looking for missing albums for {instance_name}")
+ lidarr_logger.debug(f"Looking for missing albums for {instance_name}")
lidarr_logger.debug(f"Processing up to {hunt_missing_items} missing items in {hunt_missing_mode} mode")
# Reset state files if enough time has passed
@@ -80,7 +80,7 @@ def process_missing_albums(
try:
# Get missing albums or artists data based on the hunt_missing_mode
if hunt_missing_mode == "album":
- lidarr_logger.info("Retrieving missing albums for album-based processing...")
+ lidarr_logger.debug("Retrieving missing albums for album-based processing...")
# Use efficient random page selection instead of fetching all albums
missing_albums_data = lidarr_api.get_missing_albums_random_page(
api_url, api_key, api_timeout, monitored_only, total_items_to_process * 2
@@ -94,7 +94,7 @@ def process_missing_albums(
lidarr_logger.info("No missing albums found.")
return False
- lidarr_logger.info(f"Retrieved {len(missing_albums_data)} missing albums from random page selection.")
+ lidarr_logger.debug(f"Retrieved {len(missing_albums_data)} missing albums from random page selection.")
# Convert to the expected format for album processing - keep IDs as integers
unprocessed_entities = []
diff --git a/src/primary/apps/radarr/missing.py b/src/primary/apps/radarr/missing.py
index 59114a2d..b99d3ece 100644
--- a/src/primary/apps/radarr/missing.py
+++ b/src/primary/apps/radarr/missing.py
@@ -66,7 +66,7 @@ def process_missing_movies(
tag_processed_items = radarr_settings.get("tag_processed_items", True)
# Log important settings
- radarr_logger.info("=== Radarr Missing Movies Settings ===")
+ radarr_logger.debug("=== Radarr Missing Movies Settings ===")
radarr_logger.debug(f"Instance Name: {instance_name}")
# Extract necessary settings
@@ -101,7 +101,7 @@ def process_missing_movies(
return False
# Get missing movies
- radarr_logger.info("Retrieving movies with missing files...")
+ radarr_logger.debug("Retrieving movies with missing files...")
# Use efficient random page selection instead of fetching all movies
missing_movies = radarr_api.get_movies_with_missing_random_page(
api_url, api_key, api_timeout, monitored_only, hunt_missing_movies * 2
@@ -115,11 +115,11 @@ def process_missing_movies(
radarr_logger.info("No missing movies found.")
return False
- radarr_logger.info(f"Retrieved {len(missing_movies)} missing movies from random page selection.")
+ radarr_logger.debug(f"Retrieved {len(missing_movies)} missing movies from random page selection.")
# Skip future releases if enabled
if skip_future_releases:
- radarr_logger.info("Filtering out future releases...")
+ radarr_logger.debug("Filtering out future releases...")
now = datetime.datetime.now(datetime.timezone.utc)
filtered_movies = []
@@ -160,14 +160,14 @@ def process_missing_movies(
radarr_logger.debug(f"Skipping movie ID {movie_id} ('{movie_title}') - no releaseDate field and process_no_release_dates is disabled")
no_date_count += 1
- radarr_logger.info(f"Filtered out {skipped_count} future releases and {no_date_count} movies with no release dates")
+ radarr_logger.debug(f"Filtered out {skipped_count} future releases and {no_date_count} movies with no release dates")
radarr_logger.debug(f"After filtering: {len(filtered_movies)} movies remaining from {len(missing_movies)} original")
missing_movies = filtered_movies
else:
- radarr_logger.info("Skip future releases is disabled - processing all movies regardless of release date")
+ radarr_logger.debug("Skip future releases is disabled - processing all movies regardless of release date")
if not missing_movies:
- radarr_logger.info("No missing movies left to process after filtering future releases.")
+ radarr_logger.debug("No missing movies left to process after filtering future releases.")
return False
movies_processed = 0
@@ -182,20 +182,20 @@ def process_missing_movies(
else:
radarr_logger.debug(f"Skipping already processed movie ID: {movie_id}")
- radarr_logger.info(f"Found {len(unprocessed_movies)} unprocessed missing movies out of {len(missing_movies)} total.")
+ radarr_logger.debug(f"Found {len(unprocessed_movies)} unprocessed missing movies out of {len(missing_movies)} total.")
if not unprocessed_movies:
- radarr_logger.info("No unprocessed missing movies found. All available movies have been processed.")
+ radarr_logger.debug("No unprocessed missing movies found. All available movies have been processed.")
return False
# Always use random selection for missing movies
- radarr_logger.info(f"Using random selection for missing movies")
+ radarr_logger.debug(f"Using random selection for missing movies")
if len(unprocessed_movies) > hunt_missing_movies:
movies_to_process = random.sample(unprocessed_movies, hunt_missing_movies)
else:
movies_to_process = unprocessed_movies
- radarr_logger.info(f"Selected {len(movies_to_process)} movies to process.")
+ radarr_logger.debug(f"Selected {len(movies_to_process)} movies to process.")
# Add detailed logging for selected movies
if movies_to_process:
@@ -260,4 +260,4 @@ def process_missing_movies(
radarr_logger.warning(f"Failed to trigger search for movie '{movie_title}'")
radarr_logger.info(f"Finished processing missing movies. Processed {movies_processed} of {len(movies_to_process)} selected movies.")
- return processed_any
\ No newline at end of file
+ return processed_any
diff --git a/src/primary/apps/readarr/missing.py b/src/primary/apps/readarr/missing.py
index 6e209009..e4956224 100644
--- a/src/primary/apps/readarr/missing.py
+++ b/src/primary/apps/readarr/missing.py
@@ -32,7 +32,7 @@ def process_missing_books(
Returns:
True if any books were processed, False otherwise.
"""
- readarr_logger.info("Starting missing books processing cycle for Readarr.")
+ readarr_logger.debug("Starting missing books processing cycle for Readarr.")
processed_any = False
# Reset state files if enough time has passed
@@ -51,7 +51,7 @@ def process_missing_books(
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Readarr Default")
- readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr")
+ readarr_logger.debug(f"Using API timeout of {api_timeout} seconds for Readarr")
monitored_only = app_settings.get("monitored_only", True)
skip_future_releases = app_settings.get("skip_future_releases", True)
@@ -78,7 +78,7 @@ def process_missing_books(
return False
# Get missing books
- readarr_logger.info(f"Retrieving books with missing files...")
+ readarr_logger.debug(f"Retrieving books with missing files...")
# Use efficient random page selection instead of fetching all books
missing_books_data = readarr_api.get_wanted_missing_books_random_page(
api_url, api_key, api_timeout, monitored_only, hunt_missing_books * 2
@@ -91,7 +91,7 @@ def process_missing_books(
readarr_logger.info("No missing books found.")
return False
- readarr_logger.info(f"Retrieved {len(missing_books_data)} missing books from random page selection.")
+ readarr_logger.debug(f"Retrieved {len(missing_books_data)} missing books from random page selection.")
# Check for stop signal after retrieving books
if stop_check():
diff --git a/src/primary/apps/sonarr/missing.py b/src/primary/apps/sonarr/missing.py
index 22f006d1..80a7ffa5 100644
--- a/src/primary/apps/sonarr/missing.py
+++ b/src/primary/apps/sonarr/missing.py
@@ -42,12 +42,12 @@ def process_missing_episodes(
sonarr_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing processing.")
return False
- sonarr_logger.info(f"Checking for {hunt_missing_items} missing episodes in {hunt_missing_mode} mode for instance '{instance_name}'...")
+ sonarr_logger.debug(f"Checking for {hunt_missing_items} missing episodes in {hunt_missing_mode} mode for instance '{instance_name}'...")
# Handle different modes
if hunt_missing_mode == "seasons_packs":
# Handle season pack searches (using SeasonSearch command)
- sonarr_logger.info("Season [Packs] mode selected - searching for complete season packs")
+ sonarr_logger.debug("Season [Packs] mode selected - searching for complete season packs")
return process_missing_seasons_packs_mode(
api_url, api_key, instance_name, api_timeout, monitored_only,
skip_future_episodes, hunt_missing_items,
@@ -55,7 +55,7 @@ def process_missing_episodes(
)
elif hunt_missing_mode == "shows":
# Handle show-based missing items (all episodes from a show)
- sonarr_logger.info("Show-based missing mode selected")
+ sonarr_logger.debug("Show-based missing mode selected")
return process_missing_shows_mode(
api_url, api_key, instance_name, api_timeout, monitored_only,
skip_future_episodes, hunt_missing_items,
@@ -104,7 +104,7 @@ def process_missing_seasons_packs_mode(
sonarr_logger.info("No missing episodes found")
return False
- sonarr_logger.info(f"Retrieved {len(missing_episodes)} missing episodes from random page selection.")
+ sonarr_logger.debug(f"Retrieved {len(missing_episodes)} missing episodes from random page selection.")
# Filter out future episodes if configured
if skip_future_episodes:
@@ -174,7 +174,7 @@ def process_missing_seasons_packs_mode(
else:
sonarr_logger.debug(f"Skipping already processed season ID: {season_id}")
- sonarr_logger.info(f"Found {len(unprocessed_seasons)} unprocessed seasons with missing episodes out of {len(seasons_list)} total.")
+ sonarr_logger.debug(f"Found {len(unprocessed_seasons)} unprocessed seasons with missing episodes out of {len(seasons_list)} total.")
if not unprocessed_seasons:
sonarr_logger.info("All seasons with missing episodes have been processed.")
@@ -186,10 +186,10 @@ def process_missing_seasons_packs_mode(
# Process up to hunt_missing_items seasons
processed_count = 0
- # Add detailed logging for selected seasons
+ # Add user-friendly logging for selected seasons
if unprocessed_seasons and hunt_missing_items > 0:
seasons_to_process = unprocessed_seasons[:hunt_missing_items]
- sonarr_logger.info(f"Randomly selected {min(len(unprocessed_seasons), hunt_missing_items)} seasons with missing episodes:")
+ sonarr_logger.info(f"Seasons selected for processing in this cycle ({min(len(unprocessed_seasons), hunt_missing_items)} of {len(unprocessed_seasons)} available):")
for idx, season in enumerate(seasons_to_process):
sonarr_logger.info(f" {idx+1}. {season['series_title']} - Season {season['season_number']} ({season['episode_count']} missing episodes) (Series ID: {season['series_id']})")
@@ -218,7 +218,7 @@ def process_missing_seasons_packs_mode(
# Refresh functionality has been removed as it was identified as a performance bottleneck
- sonarr_logger.info(f"Searching for season pack: {series_title} - Season {season_number} (contains {episode_count} missing episodes)")
+ sonarr_logger.debug(f"Searching for season pack: {series_title} - Season {season_number} (contains {episode_count} missing episodes)")
# Trigger an API call to search for the entire season
command_id = sonarr_api.search_season(api_url, api_key, api_timeout, series_id, season_number)
@@ -263,7 +263,7 @@ def process_missing_seasons_packs_mode(
else:
sonarr_logger.error(f"Failed to trigger search for {series_title}.")
- sonarr_logger.info(f"Processed {processed_count} missing season packs for Sonarr.")
+ sonarr_logger.info(f"Finished processing missing episodes. Processed {processed_count} season packs.")
return processed_any
def process_missing_shows_mode(
@@ -286,7 +286,7 @@ def process_missing_shows_mode(
tag_processed_items = sonarr_settings.get("tag_processed_items", True)
# Get series with missing episodes
- sonarr_logger.info("Retrieving series with missing episodes...")
+ sonarr_logger.debug("Retrieving series with missing episodes...")
series_with_missing = sonarr_api.get_series_with_missing_episodes(
api_url, api_key, api_timeout, monitored_only, random_mode=True)
@@ -303,7 +303,7 @@ def process_missing_shows_mode(
else:
sonarr_logger.debug(f"Skipping already processed series ID: {series_id}")
- sonarr_logger.info(f"Found {len(unprocessed_series)} unprocessed series with missing episodes out of {len(series_with_missing)} total.")
+ sonarr_logger.debug(f"Found {len(unprocessed_series)} unprocessed series with missing episodes out of {len(series_with_missing)} total.")
if not unprocessed_series:
sonarr_logger.info("All series with missing episodes have been processed.")
@@ -315,9 +315,9 @@ def process_missing_shows_mode(
min(len(unprocessed_series), hunt_missing_items)
)
- # Add detailed logging for selected shows
+ # Add user-friendly logging for selected shows
if shows_to_process:
- sonarr_logger.info("Shows selected for processing in this cycle:")
+ sonarr_logger.info(f"Shows selected for processing in this cycle ({len(shows_to_process)} of {len(unprocessed_series)} available):")
for idx, show in enumerate(shows_to_process):
show_id = show.get('series_id')
show_title = show.get('series_title', 'Unknown Show')
@@ -520,7 +520,7 @@ def process_missing_episodes_mode(
else:
sonarr_logger.debug(f"Skipping already processed episode ID: {episode_id}")
- sonarr_logger.info(f"Found {len(unprocessed_episodes)} unprocessed episodes out of {len(missing_episodes)} total.")
+ sonarr_logger.debug(f"Found {len(unprocessed_episodes)} unprocessed episodes out of {len(missing_episodes)} total.")
if not unprocessed_episodes:
sonarr_logger.info("All missing episodes have been processed.")
@@ -530,7 +530,7 @@ def process_missing_episodes_mode(
random.shuffle(unprocessed_episodes)
episodes_to_process = unprocessed_episodes[:hunt_missing_items]
- sonarr_logger.info(f"Processing {len(episodes_to_process)} individual missing episodes...")
+ sonarr_logger.info(f"Episodes selected for processing in this cycle ({len(episodes_to_process)} of {len(unprocessed_episodes)} available)")
# Process each episode individually
processed_count = 0
diff --git a/src/primary/apps/whisparr/missing.py b/src/primary/apps/whisparr/missing.py
index a6a821d5..dcd62663 100644
--- a/src/primary/apps/whisparr/missing.py
+++ b/src/primary/apps/whisparr/missing.py
@@ -35,7 +35,7 @@ def process_missing_items(
Returns:
True if any items were processed, False otherwise.
"""
- whisparr_logger.info("Starting missing items processing cycle for Whisparr.")
+ whisparr_logger.debug("Starting missing items processing cycle for Whisparr.")
processed_any = False
# Reset state files if enough time has passed
@@ -79,7 +79,7 @@ def process_missing_items(
return False
# Get missing items
- whisparr_logger.info(f"Retrieving items with missing files...")
+ whisparr_logger.debug(f"Retrieving items with missing files...")
missing_items = whisparr_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only)
if missing_items is None: # API call failed
@@ -95,7 +95,7 @@ def process_missing_items(
whisparr_logger.info("Stop requested after retrieving missing items. Aborting...")
return False
- whisparr_logger.info(f"Found {len(missing_items)} items with missing files.")
+ whisparr_logger.debug(f"Found {len(missing_items)} items with missing files.")
# Filter out future releases if configured
if skip_future_releases:
diff --git a/src/primary/background.py b/src/primary/background.py
index a98c1310..d31cc980 100644
--- a/src/primary/background.py
+++ b/src/primary/background.py
@@ -302,7 +302,7 @@ def app_specific_loop(app_type: str) -> None:
hunt_upgrade_enabled = hunt_upgrade_value > 0
# Debug logging for per-instance hunt values
- app_logger.info(f"Instance '{instance_name}' - Missing: {hunt_missing_value} (enabled: {hunt_missing_enabled}), Upgrade: {hunt_upgrade_value} (enabled: {hunt_upgrade_enabled})")
+ app_logger.debug(f"Instance '{instance_name}' - Missing: {hunt_missing_value} (enabled: {hunt_missing_enabled}), Upgrade: {hunt_upgrade_value} (enabled: {hunt_upgrade_enabled})")
# --- Queue Size Check --- # Moved inside loop
# Get maximum_download_queue_size from general settings (still using minimum_download_queue_size key for backward compatibility)
@@ -422,7 +422,7 @@ def app_specific_loop(app_type: str) -> None:
if processed_any_items:
app_logger.info(f"=== {app_type.upper()} cycle finished. Processed items across instances. ===")
else:
- app_logger.info(f"=== {app_type.upper()} cycle finished. No items processed in any instance. ===")
+ app_logger.debug(f"=== {app_type.upper()} cycle finished. No items processed in any instance. ===")
# Add state management summary logging for user clarity (only for hunting apps, not Swaparr)
if app_type != "swaparr":
@@ -474,30 +474,30 @@ def app_specific_loop(app_type: str) -> None:
# Log per-instance state management info
if instance_summaries:
- app_logger.info(f"=== STATE MANAGEMENT SUMMARY FOR {app_type.upper()} ===")
+ app_logger.debug(f"=== STATE MANAGEMENT SUMMARY FOR {app_type.upper()} ===")
for inst in instance_summaries:
if inst["enabled"]:
if inst["processed_count"] > 0:
- app_logger.info(f" {inst['name']}: {inst['processed_count']} items tracked, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
+ app_logger.debug(f" {inst['name']}: {inst['processed_count']} items tracked, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
else:
- app_logger.info(f" {inst['name']}: No items tracked yet, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
+ app_logger.debug(f" {inst['name']}: No items tracked yet, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
else:
- app_logger.info(f" {inst['name']}: State management disabled")
+ app_logger.debug(f" {inst['name']}: State management disabled")
# Overall summary
if not processed_any_items and has_any_processed:
# Items were skipped due to state management
- app_logger.info(f"RESULT: {total_processed} items skipped due to state management (already processed)")
+ app_logger.debug(f"RESULT: {total_processed} items skipped due to state management (already processed)")
elif processed_any_items:
# Items were processed, show summary
- app_logger.info(f"RESULT: Items processed successfully. Total tracked across instances: {total_processed}")
+ app_logger.debug(f"RESULT: Items processed successfully. Total tracked across instances: {total_processed}")
else:
# No items processed and no state management blocking
if total_processed > 0:
- app_logger.info(f"RESULT: No new items found. Total tracked across instances: {total_processed}")
+ app_logger.debug(f"RESULT: No new items found. Total tracked across instances: {total_processed}")
else:
- app_logger.info(f"RESULT: No items to process and no items tracked yet")
+ app_logger.debug(f"RESULT: No items to process and no items tracked yet")
except Exception as e:
app_logger.warning(f"Could not generate state management summary: {e}")
@@ -522,8 +522,8 @@ def app_specific_loop(app_type: str) -> None:
next_cycle_time = now_user_tz + datetime.timedelta(seconds=sleep_seconds)
app_logger.debug(f"Current time ({user_tz}): {now_user_tz.strftime('%Y-%m-%d %H:%M:%S')}")
- app_logger.info(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
- app_logger.info(f"Sleep duration: {sleep_seconds} seconds")
+ app_logger.debug(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
+ app_logger.debug(f"Sleep duration: {sleep_seconds} seconds")
# Update cycle tracking with user timezone time
next_cycle_naive = next_cycle_time.replace(tzinfo=None) if next_cycle_time.tzinfo else next_cycle_time
@@ -812,8 +812,8 @@ def swaparr_app_loop():
# Sleep duration and next cycle info (like other apps)
swaparr_logger.debug(f"Current time ({user_tz}): {now_user_tz.strftime('%Y-%m-%d %H:%M:%S')}")
- swaparr_logger.info(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
- swaparr_logger.info(f"Sleep duration: {sleep_duration} seconds")
+ swaparr_logger.debug(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
+ swaparr_logger.debug(f"Sleep duration: {sleep_duration} seconds")
# Sleep with responsiveness to stop events and reset requests (like other apps)
elapsed = 0
diff --git a/src/primary/cycle_tracker.py b/src/primary/cycle_tracker.py
index 02a6a577..e4a5e282 100644
--- a/src/primary/cycle_tracker.py
+++ b/src/primary/cycle_tracker.py
@@ -78,7 +78,7 @@ def update_sleep_json(app_type: str, next_cycle_time: datetime.datetime, cyclelo
last_cycle_end=current_data.get('last_cycle_end')
)
- logger.info(f"Updated sleep data for {app_type}: next_cycle={next_cycle_time.isoformat()}, cyclelock={cyclelock}")
+ logger.debug(f"Updated sleep data for {app_type}: next_cycle={next_cycle_time.isoformat()}, cyclelock={cyclelock}")
except Exception as e:
logger.error(f"Error updating sleep data for {app_type}: {e}")
@@ -190,7 +190,7 @@ def end_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
next_cycle_time: When the next cycle will begin
"""
try:
- logger.info(f"Ending cycle for {app_type}, next cycle at {next_cycle_time.isoformat()}")
+ logger.debug(f"Ending cycle for {app_type}, next cycle at {next_cycle_time.isoformat()}")
db = get_database()
current_data = db.get_sleep_data(app_type)
@@ -216,7 +216,7 @@ def end_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
last_cycle_end=now_user_tz.isoformat()
)
- logger.info(f"Ended cycle for {app_type} (cyclelock = False)")
+ logger.debug(f"Ended cycle for {app_type} (cyclelock = False)")
except Exception as e:
logger.error(f"Error ending cycle for {app_type}: {e}")
diff --git a/src/primary/settings_manager.py b/src/primary/settings_manager.py
index 0ebf170a..2cad842d 100644
--- a/src/primary/settings_manager.py
+++ b/src/primary/settings_manager.py
@@ -13,7 +13,6 @@
from typing import Dict, Any, Optional, List
# Create a simple logger for settings_manager
-logging.basicConfig(level=logging.INFO)
settings_logger = logging.getLogger("settings_manager")
# Database integration
diff --git a/src/primary/stateful_manager.py b/src/primary/stateful_manager.py
index b2bb766d..fdad3430 100644
--- a/src/primary/stateful_manager.py
+++ b/src/primary/stateful_manager.py
@@ -10,8 +10,11 @@
import logging
from typing import Dict, Any, List, Optional, Set
-# Create logger for stateful_manager
-stateful_logger = logging.getLogger("stateful_manager")
+# Import the Huntarr logger system
+from src.primary.utils.logger import get_logger
+
+# Create logger for stateful_manager using Huntarr logger system
+stateful_logger = get_logger("huntarr") # Use main huntarr logger for now
# Constants
DEFAULT_HOURS = 168 # Default 7 days (168 hours)
@@ -307,7 +310,7 @@ def is_processed(app_type: str, instance_name: str, media_id: str) -> bool:
processed_ids = db.get_processed_ids(app_type, instance_name)
total_count = len(processed_ids)
- stateful_logger.info(f"is_processed check: {app_type}/{instance_name}, ID:{media_id_str}, Found:{is_in_db}, Total IDs:{total_count}")
+ stateful_logger.debug(f"is_processed check: {app_type}/{instance_name}, ID:{media_id_str}, Found:{is_in_db}, Total IDs:{total_count}")
return is_in_db
except Exception as e:
diff --git a/src/primary/utils/database.py b/src/primary/utils/database.py
index e0d6f7e8..c1b3b610 100644
--- a/src/primary/utils/database.py
+++ b/src/primary/utils/database.py
@@ -14,7 +14,22 @@
import time
import shutil
-logger = logging.getLogger(__name__)
+# Import the Huntarr logger system
+from src.primary.utils.logger import get_logger
+
+# Use the main huntarr logger instead of creating a separate one
+logger = None # Will be initialized when needed to avoid circular import
+
+def _get_logger():
+ """Get the huntarr logger, initializing it if needed to avoid circular imports"""
+ global logger
+ if logger is None:
+ try:
+ logger = get_logger("huntarr")
+ except:
+ # Fallback to standard logger if huntarr logger system isn't available
+ logger = logging.getLogger("huntarr")
+ return logger
class HuntarrDatabase:
"""Database manager for all Huntarr configurations and settings"""
@@ -2725,9 +2740,9 @@ def cleanup_worker():
logs_db = get_logs_database()
deleted_count = logs_db.cleanup_old_logs(days_to_keep=30, max_entries_per_app=10000)
if deleted_count > 0:
- logger.info(f"Scheduled cleanup removed {deleted_count} old log entries")
+ _get_logger().debug(f"Scheduled cleanup removed {deleted_count} old log entries")
except Exception as e:
- logger.error(f"Error in scheduled log cleanup: {e}")
+ _get_logger().error(f"Error in scheduled log cleanup: {e}")
# Start cleanup thread
cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True)
diff --git a/src/primary/utils/logger.py b/src/primary/utils/logger.py
index 4ba99016..072cc135 100644
--- a/src/primary/utils/logger.py
+++ b/src/primary/utils/logger.py
@@ -65,9 +65,6 @@ def formatTime(self, record, datefmt=None):
# Use timezone-aware format
s = ct.strftime("%Y-%m-%d %H:%M:%S")
- # Add timezone information for clarity
- timezone_name = str(user_tz)
- s += f" {timezone_name}"
return s
except Exception:
@@ -78,21 +75,37 @@ def formatTime(self, record, datefmt=None):
else:
s = time.strftime("%Y-%m-%d %H:%M:%S", ct)
- # Add timezone information to help identify which timezone logs are in
- tz_name = time.tzname[time.daylight] if time.daylight else time.tzname[0]
- if tz_name:
- s += f" {tz_name}"
return s
+def get_log_level():
+ """Get the logging level from LOG_LEVEL environment variable with fallback to INFO."""
+ # Check LOG_LEVEL environment variable
+ log_level_str = os.environ.get('LOG_LEVEL', '').upper()
+ if log_level_str in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
+ return getattr(logging, log_level_str)
+
+ # Default to INFO level
+ return logging.INFO
+
+def configure_root_logger():
+ """Configure root logger to respect LOG_LEVEL environment variable."""
+ root_logger = logging.getLogger()
+ log_level = get_log_level()
+ root_logger.setLevel(log_level)
+
+ # Configure all existing handlers to use the new level
+ for handler in root_logger.handlers:
+ handler.setLevel(log_level)
+
def setup_main_logger():
"""Set up the main Huntarr logger."""
global logger
log_name = "huntarr"
log_file = MAIN_LOG_FILE
- # Always use DEBUG level - let frontend filter what users see
- use_log_level = logging.DEBUG
+ # Get log level from environment with INFO as default
+ use_log_level = get_log_level()
# Get or create the main logger instance
current_logger = logging.getLogger(log_name)
@@ -125,6 +138,10 @@ def setup_main_logger():
current_logger.debug("Debug logging enabled for main logger")
logger = current_logger # Assign to the global variable
+
+ # Configure root logger to ensure all loggers respect LOG_LEVEL
+ configure_root_logger()
+
return current_logger
def get_logger(app_type: str) -> logging.Logger:
@@ -158,8 +175,8 @@ def get_logger(app_type: str) -> logging.Logger:
# Prevent propagation to the main 'huntarr' logger or root logger
app_logger.propagate = False
- # Always use DEBUG level - let frontend filter what users see
- log_level = logging.DEBUG
+ # Get log level from environment with INFO as default
+ log_level = get_log_level()
app_logger.setLevel(log_level)
@@ -197,11 +214,10 @@ def get_logger(app_type: str) -> logging.Logger:
def update_logging_levels():
"""
- Update all logger levels to DEBUG level.
- This function is kept for compatibility but now always sets DEBUG level.
+ Update all logger levels based on environment configuration.
"""
- # Always use DEBUG level - let frontend filter what users see
- level = logging.DEBUG
+ # Get log level from environment
+ level = get_log_level()
# Set level for main logger
if logger: