11from xml .etree import ElementTree
22import os
3+ import hashlib
4+ from urllib .parse import urlparse
5+ import aiohttp
6+ import aiofiles
37import re
48import math
59from datetime import datetime
610
11+ SCRIPT_DIR = os .path .dirname (os .path .abspath (__file__ ))
12+ CACHE_FOLDER = os .path .join (SCRIPT_DIR , "images_cache" )
13+ os .makedirs (CACHE_FOLDER , exist_ok = True )
14+
15+ import logging
16+ _LOGGER = logging .getLogger (__name__ )
17+
718def parse_library (root ):
819 output = []
920 for medium in root .findall ("Video" ):
@@ -19,19 +30,34 @@ def parse_library(root):
1930
2031 return output
2132
22- def extract_metadata_and_type (path ):
23- pattern = re .compile (r"/library/metadata/(\d+)/(thumb|art)/(\d+)" )
24- match = pattern .search (path )
25-
26- if match :
27- metadata_id = match .group (1 )
28- art_type = match .group (2 )
29- art_id = match .group (3 )
30- return metadata_id , art_type , art_id
31-
33+ def get_image_filename (url ):
34+ """Generate a unique filename from the URL while keeping the original extension."""
35+ parsed_url = urlparse (url )
36+ ext = os .path .splitext (parsed_url .path )[- 1 ]
37+ if not ext :
38+ ext = ".jpg"
39+ return hashlib .md5 (url .encode ()).hexdigest () + ext
40+
41+ async def download_image (url ):
42+ """Download an image asynchronously and save it to the cache folder without blocking the event loop."""
43+ filename = get_image_filename (url )
44+ file_path = os .path .join (CACHE_FOLDER , filename )
45+
46+ async with aiohttp .ClientSession () as session :
47+ async with session .get (url ) as response :
48+ if response .status == 200 :
49+ async with aiofiles .open (file_path , "wb" ) as file : # ✅ Non-blocking file write
50+ await file .write (await response .read ()) # ✅ Async file writing
51+ return filename
3252 return None
3353
34- def parse_data (data , max , base_url , token , identifier , section_key , images_base_url , is_all = False ):
54+ def cleanup_old_images (valid_filenames ):
55+ """Delete images that are not in the updated list."""
56+ for filename in os .listdir (CACHE_FOLDER ):
57+ if filename not in valid_filenames :
58+ os .remove (os .path .join (CACHE_FOLDER , filename ))
59+
60+ async def parse_data (data , max , base_url , token , identifier , section_key , images_base_url , is_all = False ):
3561 if is_all :
3662 sorted_data = []
3763 for k in data .keys ():
@@ -42,6 +68,7 @@ def parse_data(data, max, base_url, token, identifier, section_key, images_base_
4268 sorted_data = sorted (data , key = lambda i : i ['addedAt' ], reverse = True )[:max ]
4369
4470 output = []
71+ valid_images = set ()
4572 for item in sorted_data :
4673 media_type_map = {'movie' : ('thumb' , 'art' ), 'episode' : ('grandparentThumb' , 'grandparentArt' )}
4774 thumb_key , art_key = media_type_map .get (item ['type' ], ('thumb' , 'grandparentArt' ))
@@ -80,10 +107,20 @@ def parse_data(data, max, base_url, token, identifier, section_key, images_base_
80107 data_output ["rating" ] = ('\N{BLACK STAR} ' + str (item .get ("rating" ))) if int (float (item .get ("rating" , 0 ))) > 0 else ''
81108 data_output ['summary' ] = item .get ('summary' , '' )
82109 data_output ['trailer' ] = item .get ('trailer' )
83- thumb_IDs = extract_metadata_and_type (thumb )
84- data_output ["poster" ] = (f'{ images_base_url } ?metadata={ thumb_IDs [0 ]} &thumb={ thumb_IDs [2 ]} ' ) if thumb_IDs else ""
85- art_IDs = extract_metadata_and_type (art )
86- data_output ["fanart" ] = (f'{ images_base_url } ?metadata={ art_IDs [0 ]} &art={ art_IDs [2 ]} ' ) if art_IDs else ""
110+
111+
112+ thumb_filename = await download_image (f'{ base_url } { thumb } ?X-Plex-Token={ token } ' )
113+ if thumb_filename :
114+ valid_images .add (thumb_filename )
115+ data_output ["poster" ] = (f'{ images_base_url } ?filename={ thumb_filename } ' ) if thumb_filename else ""
116+
117+
118+ art_filename = await download_image (f'{ base_url } { art } ?X-Plex-Token={ token } ' )
119+ if art_filename :
120+ valid_images .add (art_filename )
121+ data_output ["fanart" ] = (f'{ images_base_url } ?filename={ art_filename } ' ) if art_filename else ""
122+
123+
87124 data_output ["deep_link" ] = deep_link if identifier else None
88125
89126 output .append (data_output )
0 commit comments