diff --git a/GUI/manage.py b/GUI/manage.py index 350660d5f..4b0894d0c 100644 --- a/GUI/manage.py +++ b/GUI/manage.py @@ -2,6 +2,7 @@ import os import sys + # Fix PYTHONPATH current_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(current_dir) @@ -9,7 +10,6 @@ sys.path.insert(0, parent_dir) - def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webgui.settings") from django.core.management import execute_from_command_line diff --git a/GUI/searchapp/__init__.py b/GUI/searchapp/__init__.py deleted file mode 100644 index 8b1378917..000000000 --- a/GUI/searchapp/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/GUI/searchapp/api/__init__.py b/GUI/searchapp/api/__init__.py new file mode 100644 index 000000000..258606953 --- /dev/null +++ b/GUI/searchapp/api/__init__.py @@ -0,0 +1,72 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + +from typing import Dict, Type + + +# Internal utilities +from .base import BaseStreamingAPI +from .streamingcommunity import StreamingCommunityAPI +from .animeunity import AnimeUnityAPI + + +_API_REGISTRY: Dict[str, Type[BaseStreamingAPI]] = { + 'streamingcommunity': StreamingCommunityAPI, + 'animeunity': AnimeUnityAPI, +} + + +def get_api(site_name: str) -> BaseStreamingAPI: + """ + Get API instance for a specific site. + + Args: + site_name: Name of the streaming site + + Returns: + Instance of the appropriate API class + """ + site_key = site_name.lower().split('_')[0] + + if site_key not in _API_REGISTRY: + raise ValueError( + f"Unsupported site: {site_name}. " + f"Available sites: {', '.join(_API_REGISTRY.keys())}" + ) + + api_class = _API_REGISTRY[site_key] + return api_class() + + +def get_available_sites() -> list: + """ + Get list of available streaming sites. + + Returns: + List of site names + """ + return list(_API_REGISTRY.keys()) + + +def register_api(site_name: str, api_class: Type[BaseStreamingAPI]): + """ + Register a new API class. + + Args: + site_name: Name of the site + api_class: API class that inherits from BaseStreamingAPI + """ + if not issubclass(api_class, BaseStreamingAPI): + raise ValueError(f"{api_class} must inherit from BaseStreamingAPI") + + _API_REGISTRY[site_name.lower()] = api_class + + +__all__ = [ + 'BaseStreamingAPI', + 'StreamingCommunityAPI', + 'AnimeUnityAPI', + 'get_api', + 'get_available_sites', + 'register_api' +] \ No newline at end of file diff --git a/GUI/searchapp/api/animeunity.py b/GUI/searchapp/api/animeunity.py new file mode 100644 index 000000000..fc28036d3 --- /dev/null +++ b/GUI/searchapp/api/animeunity.py @@ -0,0 +1,142 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + +import importlib +from typing import List, Optional + + +# Internal utilities +from .base import BaseStreamingAPI, MediaItem, Season, Episode + + +# External utilities +from StreamingCommunity.Util.config_json import config_manager +from StreamingCommunity.Api.Site.animeunity.util.ScrapeSerie import ScrapeSerieAnime + + + +class AnimeUnityAPI(BaseStreamingAPI): + def __init__(self): + super().__init__() + self.site_name = "animeunity" + self._load_config() + self._search_fn = None + + def _load_config(self): + """Load site configuration.""" + self.base_url = (config_manager.get_site("animeunity", "full_url") or "").rstrip("/") + + def _get_search_fn(self): + """Lazy load the search function.""" + if self._search_fn is None: + module = importlib.import_module("StreamingCommunity.Api.Site.animeunity") + self._search_fn = getattr(module, "search") + return self._search_fn + + def search(self, query: str) -> List[MediaItem]: + """ + Search for content on AnimeUnity. + + Args: + query: Search term + + Returns: + List of MediaItem objects + """ + try: + search_fn = self._get_search_fn() + database = search_fn(query, get_onlyDatabase=True) + + results = [] + if database and hasattr(database, 'media_list'): + for element in database.media_list: + item_dict = element.__dict__.copy() if hasattr(element, '__dict__') else {} + + media_item = MediaItem( + id=item_dict.get('id'), + title=item_dict.get('name'), + slug=item_dict.get('slug', ''), + type=item_dict.get('type'), + url=item_dict.get('url'), + poster=item_dict.get('image'), + raw_data=item_dict + ) + results.append(media_item) + + return results + + except Exception as e: + raise Exception(f"AnimeUnity search error: {e}") + + def get_series_metadata(self, media_item: MediaItem) -> Optional[List[Season]]: + """ + Get seasons and episodes for an AnimeUnity series. + Note: AnimeUnity typically has single season anime. + + Args: + media_item: MediaItem to get metadata for + + Returns: + List of Season objects (usually one season), or None if not a series + """ + # Check if it's a movie or OVA + if media_item.is_movie: + return None + + try: + scraper = ScrapeSerieAnime(self.base_url) + scraper.setup(series_name=media_item.slug, media_id=media_item.id) + + episodes_count = scraper.get_count_episodes() + if not episodes_count: + return None + + # AnimeUnity typically has single season + episodes = [] + for ep_num in range(1, episodes_count + 1): + episode = Episode( + number=ep_num, + name=f"Episodio {ep_num}", + id=ep_num + ) + episodes.append(episode) + + season = Season(number=1, episodes=episodes) + return [season] + + except Exception as e: + raise Exception(f"Error getting series metadata: {e}") + + def start_download(self, media_item: MediaItem, season: Optional[str] = None, episodes: Optional[str] = None) -> bool: + """ + Start downloading from AnimeUnity. + + Args: + media_item: MediaItem to download + season: Season number (typically 1 for anime) + episodes: Episode selection + + Returns: + True if download started successfully + """ + try: + search_fn = self._get_search_fn() + + # Prepare direct_item from MediaItem + direct_item = media_item.raw_data or media_item.to_dict() + + # For AnimeUnity, we only use episode selection + selections = None + if episodes: + selections = {'episode': episodes} + + elif not media_item.is_movie: + # Default: download all episodes + selections = {'episode': '*'} + + # Execute download + search_fn(direct_item=direct_item, selections=selections) + return True + + except Exception as e: + raise Exception(f"Download error: {e}") \ No newline at end of file diff --git a/GUI/searchapp/api/base.py b/GUI/searchapp/api/base.py new file mode 100644 index 000000000..6398b6012 --- /dev/null +++ b/GUI/searchapp/api/base.py @@ -0,0 +1,165 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional +from dataclasses import dataclass + + +@dataclass +class MediaItem: + """Standardized media item representation.""" + id: Any + title: str + slug: str + type: str # 'film', 'series', 'ova', etc. + url: Optional[str] = None + poster: Optional[str] = None + release_date: Optional[str] = None + year: Optional[int] = None + raw_data: Optional[Dict[str, Any]] = None + + @property + def is_movie(self) -> bool: + return self.type.lower() in ['film', 'movie', 'ova'] + + def to_dict(self) -> Dict[str, Any]: + return { + 'id': self.id, + 'title': self.title, + 'slug': self.slug, + 'type': self.type, + 'url': self.url, + 'poster': self.poster, + 'release_date': self.release_date, + 'year': self.year, + 'raw_data': self.raw_data, + 'is_movie': self.is_movie + } + + +@dataclass +class Episode: + """Episode information.""" + number: int + name: str + id: Optional[Any] = None + + def to_dict(self) -> Dict[str, Any]: + return { + 'number': self.number, + 'name': self.name, + 'id': self.id + } + + +@dataclass +class Season: + """Season information.""" + number: int + episodes: List[Episode] + + @property + def episode_count(self) -> int: + return len(self.episodes) + + def to_dict(self) -> Dict[str, Any]: + return { + 'number': self.number, + 'episodes': [ep.to_dict() for ep in self.episodes], + 'episode_count': self.episode_count + } + + +class BaseStreamingAPI(ABC): + """Base class for all streaming site APIs.""" + + def __init__(self): + self.site_name: str = "" + self.base_url: str = "" + + @abstractmethod + def search(self, query: str) -> List[MediaItem]: + """ + Search for content on the streaming site. + + Args: + query: Search term + + Returns: + List of MediaItem objects + """ + pass + + @abstractmethod + def get_series_metadata(self, media_item: MediaItem) -> Optional[List[Season]]: + """ + Get seasons and episodes for a series. + + Args: + media_item: MediaItem to get metadata for + + Returns: + List of Season objects, or None if not a series + """ + pass + + @abstractmethod + def start_download(self, media_item: MediaItem, season: Optional[str] = None, episodes: Optional[str] = None) -> bool: + """ + Start downloading content. + + Args: + media_item: MediaItem to download + season: Season number (for series) + episodes: Episode selection (e.g., "1-5" or "1,3,5" or "*" for all) + + Returns: + True if download started successfully + """ + pass + + def ensure_complete_item(self, partial_item: Dict[str, Any]) -> MediaItem: + """ + Ensure a media item has all required fields by searching the database. + + Args: + partial_item: Dictionary with partial item data + + Returns: + Complete MediaItem object + """ + # If already complete, convert to MediaItem + if partial_item.get('id') and (partial_item.get('slug') or partial_item.get('url')): + return self._dict_to_media_item(partial_item) + + # Try to find in database + query = (partial_item.get('title') or partial_item.get('name') or partial_item.get('slug') or partial_item.get('display_title')) + + if query: + results = self.search(query) + if results: + wanted_slug = partial_item.get('slug') + if wanted_slug: + for item in results: + if item.slug == wanted_slug: + return item + + return results[0] + + # Fallback: return partial item + return self._dict_to_media_item(partial_item) + + def _dict_to_media_item(self, data: Dict[str, Any]) -> MediaItem: + """Convert dictionary to MediaItem.""" + return MediaItem( + id=data.get('id'), + title=data.get('title') or data.get('name') or 'Unknown', + slug=data.get('slug') or '', + type=data.get('type') or data.get('media_type') or 'unknown', + url=data.get('url'), + poster=data.get('poster') or data.get('poster_url') or data.get('image'), + release_date=data.get('release_date') or data.get('first_air_date'), + year=data.get('year'), + raw_data=data + ) \ No newline at end of file diff --git a/GUI/searchapp/api/streamingcommunity.py b/GUI/searchapp/api/streamingcommunity.py new file mode 100644 index 000000000..f985b6c2f --- /dev/null +++ b/GUI/searchapp/api/streamingcommunity.py @@ -0,0 +1,151 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + +import importlib +from typing import List, Optional + + +# Internal utilities +from .base import BaseStreamingAPI, MediaItem, Season, Episode + + +# External utilities +from StreamingCommunity.Util.config_json import config_manager +from StreamingCommunity.Api.Site.streamingcommunity.util.ScrapeSerie import GetSerieInfo + + +class StreamingCommunityAPI(BaseStreamingAPI): + def __init__(self): + super().__init__() + self.site_name = "streamingcommunity" + self._load_config() + self._search_fn = None + + def _load_config(self): + """Load site configuration.""" + self.base_url = config_manager.get_site("streamingcommunity", "full_url").rstrip("/") + "/it" + + def _get_search_fn(self): + """Lazy load the search function.""" + if self._search_fn is None: + module = importlib.import_module("StreamingCommunity.Api.Site.streamingcommunity") + self._search_fn = getattr(module, "search") + return self._search_fn + + def search(self, query: str) -> List[MediaItem]: + """ + Search for content on StreamingCommunity. + + Args: + query: Search term + + Returns: + List of MediaItem objects + """ + try: + search_fn = self._get_search_fn() + database = search_fn(query, get_onlyDatabase=True) + + results = [] + if database and hasattr(database, 'media_list'): + for element in database.media_list: + item_dict = element.__dict__.copy() if hasattr(element, '__dict__') else {} + + media_item = MediaItem( + id=item_dict.get('id'), + title=item_dict.get('name'), + slug=item_dict.get('slug', ''), + type=item_dict.get('type'), + url=item_dict.get('url'), + poster=item_dict.get('image'), + release_date=item_dict.get('date'), + raw_data=item_dict + ) + results.append(media_item) + + return results + except Exception as e: + raise Exception(f"StreamingCommunity search error: {e}") + + def get_series_metadata(self, media_item: MediaItem) -> Optional[List[Season]]: + """ + Get seasons and episodes for a StreamingCommunity series. + + Args: + media_item: MediaItem to get metadata for + + Returns: + List of Season objects, or None if not a series + """ + # Check if it's a movie + if media_item.is_movie: + return None + + try: + scraper = GetSerieInfo( + url=self.base_url, + media_id=media_item.id, + series_name=media_item.slug + ) + + seasons_count = scraper.getNumberSeason() + if not seasons_count: + return None + + seasons = [] + for season_num in range(1, seasons_count + 1): + try: + episodes_raw = scraper.getEpisodeSeasons(season_num) + episodes = [] + + for idx, ep in enumerate(episodes_raw or [], 1): + episode = Episode( + number=idx, + name=getattr(ep, 'name', f"Episodio {idx}"), + id=getattr(ep, 'id', idx) + ) + episodes.append(episode) + + season = Season(number=season_num, episodes=episodes) + seasons.append(season) + + except Exception: + continue + + return seasons if seasons else None + + except Exception as e: + raise Exception(f"Error getting series metadata: {e}") + + def start_download(self, media_item: MediaItem, season: Optional[str] = None, episodes: Optional[str] = None) -> bool: + """ + Start downloading from StreamingCommunity. + + Args: + media_item: MediaItem to download + season: Season number (for series) + episodes: Episode selection + + Returns: + True if download started successfully + """ + try: + search_fn = self._get_search_fn() + + # Prepare direct_item from MediaItem + direct_item = media_item.raw_data or media_item.to_dict() + + # Prepare selections + selections = None + if season or episodes: + selections = { + 'season': season, + 'episode': episodes + } + + # Execute download + search_fn(direct_item=direct_item, selections=selections) + return True + + except Exception as e: + raise Exception(f"Download error: {e}") \ No newline at end of file diff --git a/GUI/searchapp/apps.py b/GUI/searchapp/apps.py index d1ab60427..a2e1dac80 100644 --- a/GUI/searchapp/apps.py +++ b/GUI/searchapp/apps.py @@ -1,6 +1,9 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + from django.apps import AppConfig class SearchappConfig(AppConfig): default_auto_field = "django.db.models.BigAutoField" - name = "searchapp" + name = "searchapp" \ No newline at end of file diff --git a/GUI/searchapp/forms.py b/GUI/searchapp/forms.py index e66f43592..e1a543ff4 100644 --- a/GUI/searchapp/forms.py +++ b/GUI/searchapp/forms.py @@ -1,15 +1,17 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + from django import forms +from GUI.searchapp.api import get_available_sites -SITE_CHOICES = [ - ("animeunity", "AnimeUnity"), - ("streamingcommunity", "StreamingCommunity"), -] +def get_site_choices(): + sites = get_available_sites() + return [(site, site.replace('_', ' ').title()) for site in sites] class SearchForm(forms.Form): site = forms.ChoiceField( - choices=SITE_CHOICES, label="Sito", widget=forms.Select( attrs={ @@ -28,11 +30,14 @@ class SearchForm(forms.Form): } ), ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.fields['site'].choices = get_site_choices() class DownloadForm(forms.Form): source_alias = forms.CharField(widget=forms.HiddenInput) item_payload = forms.CharField(widget=forms.HiddenInput) - # Opzionali per serie season = forms.CharField(max_length=10, required=False, label="Stagione") - episode = forms.CharField(max_length=20, required=False, label="Episodio (es: 1-3)") + episode = forms.CharField(max_length=20, required=False, label="Episodio (es: 1-3)") \ No newline at end of file diff --git a/GUI/searchapp/templates/searchapp/results.html b/GUI/searchapp/templates/searchapp/results.html index dfa3d31de..fd3e136a5 100644 --- a/GUI/searchapp/templates/searchapp/results.html +++ b/GUI/searchapp/templates/searchapp/results.html @@ -154,47 +154,31 @@

{{ item.display_t {% endif %} - -
- {% csrf_token %} - - - - - - - - - - - +
+ {% else %} + + - + + - Scarica - - + Visualizza Episodi + + {% endif %} {% endfor %} @@ -283,161 +267,11 @@

Nessun risultato trovato

border-color: #d1d5db !important; } - /* Force black text for season/episode selects (requested) */ - /* always comment in english */ - .combo-wrapper select.season-from, - .combo-wrapper select.season-to, - .combo-wrapper select.episode-from, - .combo-wrapper select.episode-to { - color: #111827 !important; - } - .combo-wrapper select.season-from option, - .combo-wrapper select.season-to option, - .combo-wrapper select.episode-from option, - .combo-wrapper select.episode-to option { - color: #111827 !important; - } - /* Smooth scrolling */ html { scroll-behavior: smooth; } - diff --git a/GUI/searchapp/templates/searchapp/series_detail.html b/GUI/searchapp/templates/searchapp/series_detail.html new file mode 100644 index 000000000..57bcc749e --- /dev/null +++ b/GUI/searchapp/templates/searchapp/series_detail.html @@ -0,0 +1,269 @@ + + + + + + {{ title }} - Dettagli Serie + + + + + + +
+ + {% if bg_image_url %} + +
+
+ {% endif %} + +
+ +
+ + + + +
+
+
+
+ + + +
+
+

{{ title }}

+

Seleziona stagione ed episodi da scaricare

+
+
+
+
+ + + {% if messages %} +
+ {% for message in messages %} +
+ {{ message }} +
+ {% endfor %} +
+ {% endif %} + + +
+ {% for season in seasons %} +
+ +
+
+
+
+ {{ season.number }} +
+
+

Stagione {{ season.number }}

+

{{ season.episodes|length }} episodi disponibili

+
+
+ +
+ {% csrf_token %} + + + + + +
+
+
+ + +
+
+ {% csrf_token %} + + + + + + +
+

Seleziona Episodi

+
+ + +
+
+ +
+ {% for episode in season.episodes %} + + {% endfor %} +
+ + +
+
+
+ {% endfor %} +
+
+
+ + + + + + diff --git a/GUI/searchapp/tests.py b/GUI/searchapp/tests.py index b58fb3886..897c09fc6 100644 --- a/GUI/searchapp/tests.py +++ b/GUI/searchapp/tests.py @@ -1,3 +1,6 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + from django.test import TestCase, Client from django.urls import reverse from unittest.mock import patch @@ -82,4 +85,4 @@ def test_animeunity_single_season(self, _cfg_mock, scrape_mock): data = resp.json() self.assertTrue(data.get("isSeries")) self.assertEqual(data.get("seasonsCount"), 1) - self.assertEqual(data.get("episodesPerSeason"), {1: 24}) + self.assertEqual(data.get("episodesPerSeason"), {1: 24}) \ No newline at end of file diff --git a/GUI/searchapp/urls.py b/GUI/searchapp/urls.py index d9076e8e0..6c8371721 100644 --- a/GUI/searchapp/urls.py +++ b/GUI/searchapp/urls.py @@ -1,3 +1,6 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + from django.urls import path from . import views @@ -6,4 +9,5 @@ path("search/", views.search, name="search"), path("download/", views.start_download, name="start_download"), path("series-metadata/", views.series_metadata, name="series_metadata"), -] + path("series-detail/", views.series_detail, name="series_detail"), +] \ No newline at end of file diff --git a/GUI/searchapp/views.py b/GUI/searchapp/views.py index dff114e35..9481834ea 100644 --- a/GUI/searchapp/views.py +++ b/GUI/searchapp/views.py @@ -1,163 +1,77 @@ -import threading -import importlib +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + import json -from typing import Any, Dict, List, Optional +import threading from datetime import datetime +from typing import Any, Dict + +# External utilities from django.shortcuts import render, redirect from django.http import HttpRequest, HttpResponse, JsonResponse from django.views.decorators.http import require_http_methods from django.contrib import messages -from .forms import SearchForm, DownloadForm - - -def _load_site_search(site: str): - module_path = f"StreamingCommunity.Api.Site.{site}" - mod = importlib.import_module(module_path) - return getattr(mod, "search") - - -def _ensure_direct_item(search_fn, item_payload: Dict[str, Any]) -> Dict[str, Any]: - """Garantisce un direct_item valido ricostruendolo dal database se mancano campi chiave.""" - if item_payload.get("id") and (item_payload.get("slug") or item_payload.get("url")): - return item_payload - - query = ( - item_payload.get("title") - or item_payload.get("name") - or item_payload.get("slug") - or item_payload.get("display_title") - ) - if not query: - return item_payload - try: - database = search_fn(query, get_onlyDatabase=True) - if ( - not database - or not hasattr(database, "media_list") - or not database.media_list - ): - return item_payload - - # Prova match per slug - wanted_slug = item_payload.get("slug") - if wanted_slug: - for el in database.media_list: - if getattr(el, "slug", None) == wanted_slug: - return el.__dict__.copy() - - # Altrimenti primo risultato - return database.media_list[0].__dict__.copy() - except Exception: - return item_payload - - -def _search_results_to_list( - database_obj: Any, source_alias: str -) -> List[Dict[str, Any]]: - # database_obj expected to be MediaManager with media_list of MediaItem-like objects - results = [] - if not database_obj or not hasattr(database_obj, "media_list"): - return results - for element in database_obj.media_list: - item_dict = element.__dict__.copy() if hasattr(element, "__dict__") else {} - # Campi sicuri per il template - item_dict["display_title"] = ( - item_dict.get("title") - or item_dict.get("name") - or item_dict.get("slug") - or "Senza titolo" - ) - item_dict["display_type"] = ( - item_dict.get("type") or item_dict.get("media_type") or "Unknown" - ) - item_dict["source"] = source_alias.capitalize() - item_dict["source_alias"] = source_alias - - # Data di uscita (prova diversi campi comuni; visualizza preferibilmente l'anno) - release_raw = ( - item_dict.get("release_date") - or item_dict.get("first_air_date") - or item_dict.get("air_date") - or item_dict.get("date") - or item_dict.get("publish_date") - or item_dict.get("publishedAt") - ) - release_year = ( - item_dict.get("year") - or item_dict.get("release_year") - or item_dict.get("start_year") - ) - display_release = None - if release_raw: - # Prova parsing in vari formati comuni - parsed_date = None - for fmt in ("%Y-%m-%d", "%d/%m/%Y", "%Y/%m/%d", "%d-%m-%Y", "%Y"): +# Internal utilities +from .forms import SearchForm, DownloadForm +from GUI.searchapp.api import get_api +from GUI.searchapp.api.base import MediaItem + + +def _media_item_to_display_dict(item: MediaItem, source_alias: str) -> Dict[str, Any]: + """Convert MediaItem to template-friendly dictionary.""" + result = { + 'display_title': item.title, + 'display_type': item.type.capitalize(), + 'source': source_alias.capitalize(), + 'source_alias': source_alias, + 'bg_image_url': item.poster, + 'is_movie': item.is_movie, + } + + # Format release date + display_release = None + if item.year: + display_release = str(item.year) + elif item.release_date: + try: + for fmt in ('%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d', '%d-%m-%Y', '%Y'): try: - parsed_date = datetime.strptime(str(release_raw)[:10], fmt) + parsed_date = datetime.strptime(str(item.release_date)[:10], fmt) + display_release = str(parsed_date.year) break + except Exception: continue - if parsed_date: - display_release = str(parsed_date.year) - else: - # Fallback: prova a estrarre l'anno da una stringa tipo 2021-... + + if not display_release: try: - year_guess = int(str(release_raw)[:4]) - display_release = str(year_guess) + display_release = str(int(str(item.release_date)[:4])) + except Exception: - display_release = str(release_raw) - elif release_year: - display_release = str(release_year) - item_dict["display_release"] = display_release - - # Immagine di sfondo (usa il primo campo disponibile) - bg_image_url = ( - item_dict.get("poster") - or item_dict.get("poster_url") - or item_dict.get("image") - or item_dict.get("image_url") - or item_dict.get("cover") - or item_dict.get("cover_url") - or item_dict.get("thumbnail") - or item_dict.get("thumb") - or item_dict.get("backdrop") - or item_dict.get("backdrop_url") - ) - if isinstance(bg_image_url, dict): - # Alcune API possono restituire un oggetto con varie dimensioni - # Prova chiavi comuni - bg_image_url = ( - bg_image_url.get("url") - or bg_image_url.get("large") - or bg_image_url.get("medium") - or bg_image_url.get("small") - ) - item_dict["bg_image_url"] = bg_image_url - try: - item_dict["payload_json"] = json.dumps(item_dict) + display_release = str(item.release_date) + except Exception: - item_dict["payload_json"] = json.dumps( - { - k: item_dict.get(k) - for k in ["id", "name", "title", "type", "url", "slug"] - if k in item_dict - } - ) - results.append(item_dict) - return results + pass + + result['display_release'] = display_release + result['payload_json'] = json.dumps(item.to_dict()) + + return result @require_http_methods(["GET"]) def search_home(request: HttpRequest) -> HttpResponse: + """Display search form.""" form = SearchForm() return render(request, "searchapp/home.html", {"form": form}) @require_http_methods(["POST"]) def search(request: HttpRequest) -> HttpResponse: + """Handle search requests.""" form = SearchForm(request.POST) if not form.is_valid(): messages.error(request, "Dati non validi") @@ -167,9 +81,9 @@ def search(request: HttpRequest) -> HttpResponse: query = form.cleaned_data["query"] try: - search_fn = _load_site_search(site) - database = search_fn(query, get_onlyDatabase=True) - results = _search_results_to_list(database, site) + api = get_api(site) + media_items = api.search(query) + results = [_media_item_to_display_dict(item, site) for item in media_items] except Exception as e: messages.error(request, f"Errore nella ricerca: {e}") return render(request, "searchapp/home.html", {"form": form}) @@ -186,38 +100,31 @@ def search(request: HttpRequest) -> HttpResponse: ) -def _run_download_in_thread( - site: str, - item_payload: Dict[str, Any], - season: Optional[str], - episode: Optional[str], -) -> None: +def _run_download_in_thread(site: str, item_payload: Dict[str, Any], season: str = None, episodes: str = None) -> None: + """Run download in background thread.""" def _task(): try: - search_fn = _load_site_search(site) - - # Assicura direct_item valido - direct_item = _ensure_direct_item(search_fn, item_payload) - - selections = None - # Per animeunity consideriamo solo gli episodi - if site == "animeunity": - selections = {"episode": episode or None} if episode else None - else: - if season or episode: - selections = {"season": season or None, "episode": episode or None} - - search_fn(direct_item=direct_item, selections=selections) + api = get_api(site) + + # Ensure complete item + media_item = api.ensure_complete_item(item_payload) + + # Start download + api.start_download(media_item, season=season, episodes=episodes) except Exception: - return + pass threading.Thread(target=_task, daemon=True).start() @require_http_methods(["POST"]) def series_metadata(request: HttpRequest) -> JsonResponse: + """ + API endpoint to get series metadata (seasons/episodes). + Returns JSON with series information. + """ try: - # Expect either JSON body or standard form fields + # Parse request if request.content_type and "application/json" in request.content_type: body = json.loads(request.body.decode("utf-8")) source_alias = body.get("source_alias") or body.get("site") @@ -230,114 +137,49 @@ def series_metadata(request: HttpRequest) -> JsonResponse: if not source_alias or not item_payload: return JsonResponse({"error": "Parametri mancanti"}, status=400) - site = (source_alias.split("_")[0] if source_alias else "").lower() - media_type = ( - item_payload.get("type") or item_payload.get("media_type") or "" - ).lower() - - # Films and OVA: no seasons/episodes - if media_type in ("film", "movie", "ova"): - return JsonResponse( - {"isSeries": False, "seasonsCount": 0, "episodesPerSeason": {}} - ) - - # Guard rail: require id and slug where needed - media_id = item_payload.get("id") - slug = item_payload.get("slug") or item_payload.get("name") - - if site == "streamingcommunity": - # Lazy import to avoid loading heavy package during tests unless needed - import importlib - - try: - scrape_mod = importlib.import_module( - "StreamingCommunity.Api.Site.streamingcommunity.util.ScrapeSerie" - ) - GetSerieInfo = getattr(scrape_mod, "GetSerieInfo") - except Exception as imp_err: - return JsonResponse({"error": f"Import error: {imp_err}"}, status=500) - - # Best-effort base_url - base_url = "" - try: - from StreamingCommunity.Util.config_json import config_manager - - base_url = ( - config_manager.get_site("streamingcommunity", "full_url") or "" - ).rstrip("/") - except Exception: - base_url = "" - - scraper = GetSerieInfo(url=base_url, media_id=media_id, series_name=slug) - seasons_count = scraper.getNumberSeason() - episodes_per_season: Dict[int, int] = {} - for season_number in range(1, (seasons_count or 0) + 1): - try: - episodes = scraper.getEpisodeSeasons(season_number) - episodes_per_season[season_number] = len(episodes or []) - except Exception: - episodes_per_season[season_number] = 0 - - return JsonResponse( - { - "isSeries": True, - "seasonsCount": seasons_count or 0, - "episodesPerSeason": episodes_per_season, - } - ) - - if site == "animeunity": - import importlib - - try: - scrape_mod = importlib.import_module( - "StreamingCommunity.Api.Site.animeunity.util.ScrapeSerie" - ) - ScrapeSerieAnime = getattr(scrape_mod, "ScrapeSerieAnime") - except Exception as imp_err: - return JsonResponse({"error": f"Import error: {imp_err}"}, status=500) - - # Best-effort base_url - base_url = "" - try: - from StreamingCommunity.Util.config_json import config_manager - - base_url = ( - config_manager.get_site("animeunity", "full_url") or "" - ).rstrip("/") - except Exception: - base_url = "" - - scraper = ScrapeSerieAnime(url=base_url) - # Optional fields - try: - scraper.setup(series_name=slug, media_id=media_id) - except Exception: - pass - - try: - episodes_count = scraper.get_count_episodes() - except Exception: - episodes_count = None - - return JsonResponse( - { - "isSeries": True, - "seasonsCount": 1, - "episodesPerSeason": {1: (episodes_count or 0)}, - } - ) - - # Default: unknown site treated as no metadata - return JsonResponse( - {"isSeries": False, "seasonsCount": 0, "episodesPerSeason": {}} - ) + # Get API instance + api = get_api(source_alias) + + # Convert to MediaItem + media_item = api._dict_to_media_item(item_payload) + + # Check if it's a movie + if media_item.is_movie: + return JsonResponse({ + "isSeries": False, + "seasonsCount": 0, + "episodesPerSeason": {} + }) + + # Get series metadata + seasons = api.get_series_metadata(media_item) + + if not seasons: + return JsonResponse({ + "isSeries": False, + "seasonsCount": 0, + "episodesPerSeason": {} + }) + + # Build response + episodes_per_season = { + season.number: season.episode_count + for season in seasons + } + + return JsonResponse({ + "isSeries": True, + "seasonsCount": len(seasons), + "episodesPerSeason": episodes_per_season + }) + except Exception as e: return JsonResponse({"error": str(e)}, status=500) @require_http_methods(["POST"]) def start_download(request: HttpRequest) -> HttpResponse: + """Handle download requests for movies or individual series selections.""" form = DownloadForm(request.POST) if not form.is_valid(): messages.error(request, "Dati non validi") @@ -348,7 +190,7 @@ def start_download(request: HttpRequest) -> HttpResponse: season = form.cleaned_data.get("season") or None episode = form.cleaned_data.get("episode") or None - # Normalizza spazi + # Normalize if season: season = str(season).strip() or None if episode: @@ -360,37 +202,28 @@ def start_download(request: HttpRequest) -> HttpResponse: messages.error(request, "Payload non valido") return redirect("search_home") - # source_alias is like 'streamingcommunity' or 'animeunity' - site = source_alias.split("_")[0].lower() - - # Estrai titolo per il messaggio - title = ( - item_payload.get("display_title") - or item_payload.get("title") - or item_payload.get("name") - or "contenuto selezionato" - ) + # Extract title for message + title = item_payload.get("title") - # Per animeunity, se non specificato e se non รจ un contenuto non seriale (film/ova), - # scarica tutti gli episodi evitando prompt - media_type = ( - item_payload.get("type") or item_payload.get("media_type") or "" - ).lower() - if ( - site == "animeunity" - and not episode - and media_type not in ("film", "movie", "ova") - ): + # For animeunity, default to all episodes if not specified and not a movie + site = source_alias.split("_")[0].lower() + media_type = (item_payload.get("type") or "").lower() + + if site == "animeunity" and not episode and media_type not in ("film", "movie", "ova"): episode = "*" + # Start download in background _run_download_in_thread(site, item_payload, season, episode) - # Messaggio di successo con dettagli + # Success message season_info = "" if site != "animeunity" and season: season_info = f" (Stagione {season}" episode_info = f", Episodi {episode}" if episode else "" - season_info += ")" if season_info and not episode_info == "" else "" + if season_info and episode_info: + season_info += ")" + elif season_info: + season_info += ")" messages.success( request, @@ -399,3 +232,95 @@ def start_download(request: HttpRequest) -> HttpResponse: ) return redirect("search_home") + + +@require_http_methods(["GET", "POST"]) +def series_detail(request: HttpRequest) -> HttpResponse: + """Display series details page with seasons and episodes.""" + if request.method == "GET": + source_alias = request.GET.get("source_alias") + item_payload_raw = request.GET.get("item_payload") + + if not source_alias or not item_payload_raw: + messages.error(request, "Parametri mancanti per visualizzare i dettagli della serie.") + return redirect("search_home") + + try: + item_payload = json.loads(item_payload_raw) + except Exception: + messages.error(request, "Errore nel caricamento dei dati della serie.") + return redirect("search_home") + + try: + # Get API instance + api = get_api(source_alias) + + # Ensure complete item + media_item = api.ensure_complete_item(item_payload) + + # Get series metadata + seasons = api.get_series_metadata(media_item) + + if not seasons: + messages.error(request, "Impossibile recuperare le informazioni sulla serie.") + return redirect("search_home") + + # Convert to template format + seasons_data = [season.to_dict() for season in seasons] + + context = { + "title": media_item.title, + "source_alias": source_alias, + "item_payload": json.dumps(media_item.to_dict()), + "seasons": seasons_data, + "bg_image_url": media_item.poster, + } + + return render(request, "searchapp/series_detail.html", context) + + except Exception as e: + messages.error(request, f"Errore nel caricamento dei dettagli: {str(e)}") + return redirect("search_home") + + # POST: download season or selected episodes + elif request.method == "POST": + source_alias = request.POST.get("source_alias") + item_payload_raw = request.POST.get("item_payload") + season_number = request.POST.get("season_number") + download_type = request.POST.get("download_type") + selected_episodes = request.POST.get("selected_episodes", "") + + if not all([source_alias, item_payload_raw, season_number]): + messages.error(request, "Parametri mancanti per il download.") + return redirect("search_home") + + try: + item_payload = json.loads(item_payload_raw) + except Exception: + messages.error(request, "Errore nel parsing dei dati.") + return redirect("search_home") + + title = item_payload.get("title") + + # Prepare download parameters + if download_type == "full_season": + episode_selection = "*" + msg_detail = f"stagione {season_number} completa" + + else: + episode_selection = selected_episodes.strip() if selected_episodes else None + if not episode_selection: + messages.error(request, "Nessun episodio selezionato.") + return redirect("series_detail") + f"?source_alias={source_alias}&item_payload={item_payload_raw}" + msg_detail = f"S{season_number}:E{episode_selection}" + + # Start download + _run_download_in_thread(source_alias, item_payload, season_number, episode_selection) + + messages.success( + request, + f"Download avviato per '{title}' - {msg_detail}. " + f"Il download sta procedendo in background." + ) + + return redirect("search_home") \ No newline at end of file diff --git a/GUI/webgui/asgi.py b/GUI/webgui/asgi.py index 400394195..295ed54d4 100644 --- a/GUI/webgui/asgi.py +++ b/GUI/webgui/asgi.py @@ -1,6 +1,8 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webgui.settings") - -application = get_asgi_application() +application = get_asgi_application() \ No newline at end of file diff --git a/GUI/webgui/settings.py b/GUI/webgui/settings.py index 118b431c6..f9cd9ecd4 100644 --- a/GUI/webgui/settings.py +++ b/GUI/webgui/settings.py @@ -1,3 +1,6 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + import os import sys from pathlib import Path diff --git a/GUI/webgui/urls.py b/GUI/webgui/urls.py index b941e47f5..e494f21c6 100644 --- a/GUI/webgui/urls.py +++ b/GUI/webgui/urls.py @@ -1,7 +1,10 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + from django.contrib import admin from django.urls import path, include urlpatterns = [ path("admin/", admin.site.urls), path("", include("searchapp.urls")), -] +] \ No newline at end of file diff --git a/GUI/webgui/wsgi.py b/GUI/webgui/wsgi.py index ee717b98a..f554b902c 100644 --- a/GUI/webgui/wsgi.py +++ b/GUI/webgui/wsgi.py @@ -1,6 +1,8 @@ +# 06-06-2025 By @FrancescoGrazioso -> "https://github.com/FrancescoGrazioso" + + import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webgui.settings") - -application = get_wsgi_application() +application = get_wsgi_application() \ No newline at end of file diff --git a/README.md b/README.md index bbfd3043c..c94b8a387 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,6 @@ StreamingCommunity Logo ---- - [![PyPI Version](https://img.shields.io/pypi/v/streamingcommunity?logo=pypi&logoColor=white&labelColor=2d3748&color=3182ce&style=for-the-badge)](https://pypi.org/project/streamingcommunity/) [![Last Commit](https://img.shields.io/github/last-commit/Arrowar/StreamingCommunity?logo=git&logoColor=white&labelColor=2d3748&color=805ad5&style=for-the-badge)](https://github.com/Arrowar/StreamingCommunity/commits) [![Sponsor](https://img.shields.io/badge/๐Ÿ’–_Sponsor-ea4aaa?style=for-the-badge&logo=github-sponsors&logoColor=white&labelColor=2d3748)](https://ko-fi.com/arrowar) @@ -167,6 +165,20 @@ dash_process.get_status() See [DASH example](./Test/Downloads/DASH.py) for complete usage. +โ“‚๏ธ MEGA + +```python +mega = Mega_Downloader() +m = mega.login() + +output_path = m.download_url( + url="https://mega.nz/file/0kgCWZZB#7u....", + dest_path=".\\prova.mp4" +) +``` + +See [MEGA example](./Test/Downloads/MEGA.py) for complete usage. + --- ## Configuration diff --git a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py index fa050b98a..9a7621418 100644 --- a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +++ b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py @@ -14,6 +14,7 @@ def __init__(self, data: Dict[str, Any]): self.url: str = data.get('url', '') self.mpd_id: str = data.get('mpd_id', '') self.channel: str = data.get('channel', '') + self.category: str = data.get('category', '') def __str__(self): return f"Episode(id={self.id}, number={self.number}, name='{self.name}', duration={self.duration} sec)" diff --git a/StreamingCommunity/Api/Site/crunchyroll/site.py b/StreamingCommunity/Api/Site/crunchyroll/site.py index c87df4895..ceee7c7d1 100644 --- a/StreamingCommunity/Api/Site/crunchyroll/site.py +++ b/StreamingCommunity/Api/Site/crunchyroll/site.py @@ -102,9 +102,9 @@ def title_search(query: str) -> int: title = item.get("title", "") media_search_manager.add_media({ - 'url': url, 'name': title, - 'type': tipo + 'type': tipo, + 'url': url }) found += 1 diff --git a/StreamingCommunity/Api/Site/guardaserie/site.py b/StreamingCommunity/Api/Site/guardaserie/site.py index 6ca76c100..75daf4138 100644 --- a/StreamingCommunity/Api/Site/guardaserie/site.py +++ b/StreamingCommunity/Api/Site/guardaserie/site.py @@ -53,9 +53,9 @@ def title_search(query: str) -> int: try: serie_info = { 'name': serie_div.find('a').get("title").replace("streaming guardaserie", ""), - 'url': serie_div.find('a').get("href"), 'type': 'tv', - 'image': f"{site_constant.FULL_URL}/{serie_div.find('img').get('src')}", + 'url': serie_div.find('a').get("href"), + 'image': f"{site_constant.FULL_URL}/{serie_div.find('img').get('src')}" } media_search_manager.add_media(serie_info) diff --git a/StreamingCommunity/Api/Site/hd4me/__init__.py b/StreamingCommunity/Api/Site/hd4me/__init__.py new file mode 100644 index 000000000..450e5f67f --- /dev/null +++ b/StreamingCommunity/Api/Site/hd4me/__init__.py @@ -0,0 +1,103 @@ +# 16.03.25 + + +# External library +from rich.console import Console +from rich.prompt import Prompt + + +# Internal utilities +from StreamingCommunity.Api.Template import get_select_title +from StreamingCommunity.Api.Template.config_loader import site_constant +from StreamingCommunity.Api.Template.Class.SearchType import MediaItem + + +# Logic class +from .site import title_search, table_show_manager, media_search_manager +from .film import download_film + + +# Variable +indice = 10 +_useFor = "Film" +_priority = 0 +_engineDownload = "hls" +_deprecate = False + +msg = Prompt() +console = Console() + + +def get_user_input(string_to_search: str = None): + """ + Asks the user to input a search term. + Handles both Telegram bot input and direct input. + If string_to_search is provided, it's returned directly (after stripping). + """ + if string_to_search is not None: + return string_to_search.strip() + + else: + return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip() + + +def process_search_result(select_title, selections=None): + """ + Handles the search result and initiates the download for either a film or series. + + Parameters: + select_title (MediaItem): The selected media item + selections (dict, optional): Dictionary containing selection inputs that bypass manual input + {'season': season_selection, 'episode': episode_selection} + + Returns: + bool: True if processing was successful, False otherwise + """ + if not select_title: + return False + + if select_title.type == 'film': + download_film(select_title) + table_show_manager.clear() + return True + + +# search("Game of Thrones", selections={"season": "1", "episode": "1-3"}) +def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None): + """ + Main function of the application for search. + + Parameters: + string_to_search (str, optional): String to search for + get_onlyDatabase (bool, optional): If True, return only the database object + direct_item (dict, optional): Direct item to process (bypass search) + selections (dict, optional): Dictionary containing selection inputs that bypass manual input + {'season': season_selection, 'episode': episode_selection} + """ + if direct_item: + select_title = MediaItem(**direct_item) + result = process_search_result(select_title, selections) + return result + + # Get the user input for the search term + actual_search_query = get_user_input(string_to_search) + + # Handle empty input + if not actual_search_query: + return False + + # Search on database + len_database = title_search(actual_search_query) + + # If only the database is needed, return the manager + if get_onlyDatabase: + return media_search_manager + + if len_database > 0: + select_title = get_select_title(table_show_manager, media_search_manager, len_database) + result = process_search_result(select_title, selections) + return result + + else: + console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}") + return False \ No newline at end of file diff --git a/StreamingCommunity/Api/Site/hd4me/film.py b/StreamingCommunity/Api/Site/hd4me/film.py new file mode 100644 index 000000000..c980f4059 --- /dev/null +++ b/StreamingCommunity/Api/Site/hd4me/film.py @@ -0,0 +1,83 @@ +# 16.03.25 + +import os + + +# External library +from bs4 import BeautifulSoup +from rich.console import Console + + +# Internal utilities +from StreamingCommunity.Util.os import os_manager +from StreamingCommunity.Util.headers import get_headers +from StreamingCommunity.Util.http_client import create_client_curl +from StreamingCommunity.Util.message import start_message +from StreamingCommunity.Util.config_json import config_manager + + +# Logic class +from StreamingCommunity.Api.Template.config_loader import site_constant +from StreamingCommunity.Api.Template.Class.SearchType import MediaItem + + +# Player +from StreamingCommunity import Mega_Downloader + + +# Variable +console = Console() +extension_output = config_manager.get("M3U8_CONVERSION", "extension") + + +def download_film(select_title: MediaItem) -> str: + """ + Downloads a film using the provided film ID, title name, and domain. + + Parameters: + - select_title (MediaItem): The selected media item. + + Return: + - str: output path if successful, otherwise None + """ + start_message() + console.print(f"\n[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] โ†’ [cyan]{select_title.name}[/cyan] \n") + + mega_link = None + try: + response = create_client_curl(headers=get_headers()).get(select_title.url) + response.raise_for_status() + + # Parse HTML to find mega link + soup = BeautifulSoup(response.text, 'html.parser') + for a in soup.find_all("a", href=True): + + if "?!" in a["href"].lower().strip(): + mega_link = "https://mega.nz/file/" + a["href"].split("/")[-1].replace('?!', '') + break + + if "/?file/" in a["href"].lower().strip(): + mega_link = "https://mega.nz/file/" + a["href"].split("/")[-1].replace('/?file/', '') + break + + except Exception as e: + console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda") + return None + + # Define the filename and path for the downloaded film + title_name = os_manager.get_sanitize_file(select_title.name, select_title.date) + extension_output + mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(extension_output, "")) + + # Download the film using the mega downloader + mega = Mega_Downloader() + m = mega.login() + + if mega_link is None: + console.print(f"[red]Site: {site_constant.SITE_NAME}, error: Mega link not found for url: {select_title.url}[/red]") + return None + + output_path = m.download_url( + url=mega_link, + dest_path=os.path.join(mp4_path, title_name) + ) + return output_path \ No newline at end of file diff --git a/StreamingCommunity/Api/Site/hd4me/site.py b/StreamingCommunity/Api/Site/hd4me/site.py new file mode 100644 index 000000000..8101d7365 --- /dev/null +++ b/StreamingCommunity/Api/Site/hd4me/site.py @@ -0,0 +1,75 @@ +# 16.03.25 + + +# External libraries +from bs4 import BeautifulSoup +from rich.console import Console + + +# Internal utilities +from StreamingCommunity.Util.headers import get_userAgent +from StreamingCommunity.Util.http_client import create_client +from StreamingCommunity.Util.table import TVShowManager + + +# Logic class +from StreamingCommunity.Api.Template.config_loader import site_constant +from StreamingCommunity.Api.Template.Class.SearchType import MediaManager + + +# Variable +console = Console() +media_search_manager = MediaManager() +table_show_manager = TVShowManager() + + +def title_search(query: str) -> int: + """ + Search for titles based on a search query. + + Parameters: + - query (str): The query to search for. + + Returns: + int: The number of titles found. + """ + media_search_manager.clear() + table_show_manager.clear() + + search_url = "https://hd4me.net/lista-film" + console.print(f"[cyan]Search url: [yellow]{search_url}") + + try: + response = create_client(headers={'user-agent': get_userAgent()}).get(search_url) + response.raise_for_status() + + except Exception as e: + console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}") + return 0 + + # Create soup instance + soup = BeautifulSoup(response.text, "html.parser") + + # Collect data from new structure + for li in soup.find_all("li"): + + a = li.find("a", href=True, id=True) + if not a: + continue + + href = a["href"].strip() + title = a.get_text().split("โ€“")[0].strip() + id_attr = a.get("id") + + if query.lower() in title.lower(): + media_dict = { + 'id': id_attr, + 'name': title, + 'type': 'film', + 'url': 'https://hd4me.net' + href, + 'image': None + } + media_search_manager.add_media(media_dict) + + # Return the number of titles found + return media_search_manager.get_length() \ No newline at end of file diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/site.py b/StreamingCommunity/Api/Site/mediasetinfinity/site.py index d0b46f3c1..6b42dcb05 100644 --- a/StreamingCommunity/Api/Site/mediasetinfinity/site.py +++ b/StreamingCommunity/Api/Site/mediasetinfinity/site.py @@ -56,6 +56,9 @@ def title_search(query: str) -> int: resp_json = response.json() items = resp_json.get("data", {}).get("getSearchPage", {}).get("areaContainersConnection", {}).get("areaContainers", [])[0].get("areas", [])[0].get("sections", [])[0].get("collections", [])[0].get("itemsConnection", {}).get("items", []) + if len(items) == 1: + return 0 + # Process items for item in items: is_series = ( @@ -76,12 +79,12 @@ def title_search(query: str) -> int: date = '' media_search_manager.add_media({ - "url": item.get("cardLink", {}).get("value", ""), "id": item.get("guid", ""), "name": item.get("cardTitle", "No Title"), "type": item_type, "image": None, "date": date, + "url": item.get("cardLink", {}).get("value", "") }) return media_search_manager.get_length() \ No newline at end of file diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py index 46e9b6eec..7daa70c95 100644 --- a/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +++ b/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py @@ -15,15 +15,17 @@ class GetSerieInfo: - def __init__(self, url): + def __init__(self, url, min_duration=10): """ Initialize the GetSerieInfo class for scraping TV series information. Args: - url (str): The URL of the streaming site. + - min_duration (int): Minimum duration in minutes for episodes to be included """ self.headers = get_headers() self.url = url + self.min_duration = min_duration self.seasons_manager = SeasonManager() self.serie_id = None self.public_id = None @@ -71,6 +73,7 @@ def _process_available_seasons(self, data): if season: stagioni_disponibili.append({ 'tvSeasonNumber': season['tvSeasonNumber'], + 'title': season.get('title', ''), 'url': url, 'id': str(url).split("/")[-1], 'guid': season['guid'] @@ -104,33 +107,33 @@ def _extract_season_sb_ids(self, stagioni_disponibili): print("Response for _extract_season_sb_ids:", response_page.status_code, " season index:", season['tvSeasonNumber']) soup = BeautifulSoup(response_page.text, 'html.parser') - # Try first with 'Episodi', then with 'Puntate intere' - link = soup.find('a', string='Episodi') - if not link: - #print("Using word: Puntate intere") - link = soup.find('a', string='Puntate intere') - - if link is None: - link = soup.find('a', class_ = 'titleCarousel') + # Check for titleCarousel links (multiple categories) + carousel_links = soup.find_all('a', class_='titleCarousel') - if link and link.has_attr('href'): - if not link.string == 'Puntate intere': - print("Using word: Episodi") - - season['sb'] = link['href'].split(',')[-1] + if carousel_links: + print(f"Found {len(carousel_links)} titleCarousel categories") + season['categories'] = [] + + for carousel_link in carousel_links: + if carousel_link.has_attr('href'): + category_title = carousel_link.find('h2') + category_name = category_title.text.strip() if category_title else 'Unnamed' + sb_id = carousel_link['href'].split(',')[-1] + + season['categories'].append({ + 'name': category_name, + 'sb': sb_id + }) else: - logging.warning(f"Link 'Episodi' or 'Puntate intere' not found for season {season['tvSeasonNumber']}") + logging.warning(f"No titleCarousel categories found for season {season['tvSeasonNumber']}") - def _get_season_episodes(self, season): + def _get_season_episodes(self, season, sb_id, category_name): """Get episodes for a specific season""" - if not season.get('sb'): - return - episode_headers = { 'user-agent': get_userAgent(), } params = { - 'byCustomValue': "{subBrandId}{" + str(season["sb"].replace('sb', '')) + "}", + 'byCustomValue': "{subBrandId}{" + str(sb_id.replace('sb', '')) + "}", 'sort': ':publishInfo_lastPublished|asc,tvSeasonEpisodeNumber|asc', 'range': '0-100', } @@ -141,22 +144,33 @@ def _get_season_episodes(self, season): episode_response.raise_for_status() episode_data = episode_response.json() - season['episodes'] = [] + episodes = [] + filtered_count = 0 for entry in episode_data.get('entries', []): + duration = int(entry.get('mediasetprogram$duration', 0) / 60) if entry.get('mediasetprogram$duration') else 0 + + # Filter episodes by minimum duration + if duration < self.min_duration: + filtered_count += 1 + continue + episode_info = { 'id': entry.get('guid'), 'title': entry.get('title'), - 'duration': int(entry.get('mediasetprogram$duration', 0) / 60) if entry.get('mediasetprogram$duration') else 0, + 'duration': duration, 'url': entry.get('media', [{}])[0].get('publicUrl') if entry.get('media') else None, - 'name': entry.get('title') + 'name': entry.get('title'), + 'category': category_name } - season['episodes'].append(episode_info) + episodes.append(episode_info) - print(f"Found {len(season['episodes'])} episodes for season {season['tvSeasonNumber']}") + print(f"Found {len(episodes)} episodes for season {season['tvSeasonNumber']} ({category_name})") + return episodes except Exception as e: logging.error(f"Failed to get episodes for season {season['tvSeasonNumber']} with error: {e}") + return [] def collect_season(self) -> None: """ @@ -191,7 +205,12 @@ def collect_season(self) -> None: # Step 7: Get episodes for each season for season in self.stagioni_disponibili: - self._get_season_episodes(season) + if 'categories' in season: + season['episodes'] = [] + for category in season['categories']: + episodes = self._get_season_episodes(season, category['sb'], category['name']) + if episodes: + season['episodes'].extend(episodes) # Step 8: Populate seasons manager self._populate_seasons_manager() @@ -209,14 +228,15 @@ def _populate_seasons_manager(self): if season_data.get('episodes') and len(season_data['episodes']) > 0: season_obj = self.seasons_manager.add_season({ 'number': season_data['tvSeasonNumber'], - 'name': f"Season {season_data['tvSeasonNumber']}" + 'name': f"Season {season_data['tvSeasonNumber']}", + 'id': season_data.get('title', '') }) if season_obj: for episode in season_data['episodes']: season_obj.episodes.add(episode) seasons_with_episodes += 1 - + # ------------- FOR GUI ------------- def getNumberSeason(self) -> int: """ diff --git a/StreamingCommunity/Api/Site/raiplay/site.py b/StreamingCommunity/Api/Site/raiplay/site.py index aee9d09b3..a6599a17f 100644 --- a/StreamingCommunity/Api/Site/raiplay/site.py +++ b/StreamingCommunity/Api/Site/raiplay/site.py @@ -86,9 +86,9 @@ def title_search(query: str) -> int: media_search_manager.add_media({ 'id': item.get('id', ''), - 'name': item.get('titolo', 'Unknown'), - 'type': "tv", 'path_id': path_id, + 'name': item.get('titolo', 'Unknown'), + 'type': 'tv', 'url': url, 'image': image, 'year': image.split("/")[5] diff --git a/StreamingCommunity/Api/Template/Util/manage_ep.py b/StreamingCommunity/Api/Template/Util/manage_ep.py index 915553d5b..15869ce57 100644 --- a/StreamingCommunity/Api/Template/Util/manage_ep.py +++ b/StreamingCommunity/Api/Template/Util/manage_ep.py @@ -291,24 +291,41 @@ def display_episodes_list(episodes_manager) -> str: # Set up table for displaying episodes table_show_manager = TVShowManager() + # Check if any episode has a non-empty category + has_category = False + for media in episodes_manager: + category = media.get('category') if isinstance(media, dict) else getattr(media, 'category', None) + if category is not None and str(category).strip() != '': + has_category = True + break + # Add columns to the table column_info = { "Index": {'color': 'red'}, "Name": {'color': 'magenta'}, - "Duration": {'color': 'blue'} } + + if has_category: + column_info["Category"] = {'color': 'green'} + + column_info["Duration"] = {'color': 'blue'} + table_show_manager.add_column(column_info) # Populate the table with episodes information for i, media in enumerate(episodes_manager): name = media.get('name') if isinstance(media, dict) else getattr(media, 'name', None) duration = media.get('duration') if isinstance(media, dict) else getattr(media, 'duration', None) + category = media.get('category') if isinstance(media, dict) else getattr(media, 'category', None) episode_info = { 'Index': str(i + 1), 'Name': name, - 'Duration': duration + 'Duration': duration, } + + if has_category: + episode_info['Category'] = category table_show_manager.add_tv_show(episode_info) diff --git a/StreamingCommunity/Lib/Downloader/DASH/downloader.py b/StreamingCommunity/Lib/Downloader/DASH/downloader.py index f45300b09..0ef9b5302 100644 --- a/StreamingCommunity/Lib/Downloader/DASH/downloader.py +++ b/StreamingCommunity/Lib/Downloader/DASH/downloader.py @@ -588,14 +588,11 @@ def finalize_output(self): self.output_file = new_filename # Display file information - if os.path.exists(output_file): - file_size = internet_manager.format_file_size(os.path.getsize(output_file)) - duration = print_duration_table(output_file, description=False, return_string=True) - console.print(f"[yellow]Output [red]{os.path.abspath(output_file)} [cyan]with size [red]{file_size} [cyan]and duration [red]{duration}") - else: - console.print(f"[red]Output file not found: {output_file}") - self.error = f"Output file not found: {output_file}" - return None + file_size = internet_manager.format_file_size(os.path.getsize(output_file)) + duration = print_duration_table(output_file, description=False, return_string=True) + console.print(f"[yellow]Output[white]: [red]{os.path.abspath(output_file)} \n" + f" [cyan]with size[white]: [red]{file_size} \n" + f" [cyan]and duration[white]: [red]{duration}") if CLEANUP_TMP: @@ -613,7 +610,7 @@ def finalize_output(self): try: os.rmdir(self.out_path) - except Exception as e: + except Exception: pass # Verify the final file exists before returning diff --git a/StreamingCommunity/Lib/Downloader/HLS/downloader.py b/StreamingCommunity/Lib/Downloader/HLS/downloader.py index a3b8f11b5..b5c04d93d 100644 --- a/StreamingCommunity/Lib/Downloader/HLS/downloader.py +++ b/StreamingCommunity/Lib/Downloader/HLS/downloader.py @@ -706,7 +706,10 @@ def _print_summary(self, use_shortest: bool): os.rename(self.path_manager.output_path, new_filename) self.path_manager.output_path = new_filename - console.print(f"[yellow]Output [red]{os.path.abspath(self.path_manager.output_path)} [cyan]with size [red]{file_size} [cyan]and duration [red]{duration}") + # Display file information + console.print(f"[yellow]Output[white]: [red]{os.path.abspath(self.path_manager.output_path)} \n" + f" [cyan]with size[white]: [red]{file_size} \n" + f" [cyan]and duration[white]: [red]{duration}") def get_progress_data(self) -> Optional[Dict]: """Get current download progress data.""" @@ -721,4 +724,4 @@ def get_progress_data(self) -> Optional[Dict]: except Exception as e: logging.error(f"Error getting progress data: {e}") - return None + return None \ No newline at end of file diff --git a/StreamingCommunity/Lib/Downloader/MEGA/crypto.py b/StreamingCommunity/Lib/Downloader/MEGA/crypto.py new file mode 100644 index 000000000..e308f3c18 --- /dev/null +++ b/StreamingCommunity/Lib/Downloader/MEGA/crypto.py @@ -0,0 +1,118 @@ +# 25-06-2020 By @rodwyer "https://pypi.org/project/mega.py/" + + +import json +import base64 +import struct +import binascii +import random +import codecs + + +# External libraries +from Crypto.Cipher import AES + + +def makebyte(x): + return codecs.latin_1_encode(x)[0] + +def makestring(x): + return codecs.latin_1_decode(x)[0] + +def aes_cbc_encrypt(data, key): + aes_cipher = AES.new(key, AES.MODE_CBC, makebyte('\0' * 16)) + return aes_cipher.encrypt(data) + +def aes_cbc_decrypt(data, key): + aes_cipher = AES.new(key, AES.MODE_CBC, makebyte('\0' * 16)) + return aes_cipher.decrypt(data) + +def aes_cbc_encrypt_a32(data, key): + return str_to_a32(aes_cbc_encrypt(a32_to_str(data), a32_to_str(key))) + +def aes_cbc_decrypt_a32(data, key): + return str_to_a32(aes_cbc_decrypt(a32_to_str(data), a32_to_str(key))) + +def encrypt_key(a, key): + return sum((aes_cbc_encrypt_a32(a[i:i + 4], key) + for i in range(0, len(a), 4)), ()) + +def decrypt_key(a, key): + return sum((aes_cbc_decrypt_a32(a[i:i + 4], key) + for i in range(0, len(a), 4)), ()) + +def decrypt_attr(attr, key): + attr = aes_cbc_decrypt(attr, a32_to_str(key)) + attr = makestring(attr) + attr = attr.rstrip('\0') + + if attr[:6] == 'MEGA{"': + json_start = attr.index('{') + json_end = attr.rfind('}') + 1 + return json.loads(attr[json_start:json_end]) + +def a32_to_str(a): + return struct.pack('>%dI' % len(a), *a) + +def str_to_a32(b): + if isinstance(b, str): + b = makebyte(b) + if len(b) % 4: + b += b'\0' * (4 - len(b) % 4) + return struct.unpack('>%dI' % (len(b) / 4), b) + + +def mpi_to_int(s): + return int(binascii.hexlify(s[2:]), 16) + +def extended_gcd(a, b): + if a == 0: + return (b, 0, 1) + else: + g, y, x = extended_gcd(b % a, a) + return (g, x - (b // a) * y, y) + +def modular_inverse(a, m): + g, x, y = extended_gcd(a, m) + if g != 1: + raise Exception('modular inverse does not exist') + else: + return x % m + +def base64_url_decode(data): + data += '=='[(2 - len(data) * 3) % 4:] + for search, replace in (('-', '+'), ('_', '/'), (',', '')): + data = data.replace(search, replace) + return base64.b64decode(data) + +def base64_to_a32(s): + return str_to_a32(base64_url_decode(s)) + +def base64_url_encode(data): + data = base64.b64encode(data) + data = makestring(data) + for search, replace in (('+', '-'), ('/', '_'), ('=', '')): + data = data.replace(search, replace) + + return data + +def a32_to_base64(a): + return base64_url_encode(a32_to_str(a)) + +def get_chunks(size): + p = 0 + s = 0x20000 + while p + s < size: + yield (p, s) + p += s + if s < 0x100000: + s += 0x20000 + + yield (p, size - p) + +def make_id(length): + text = '' + possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + for i in range(length): + text += random.choice(possible) + return text \ No newline at end of file diff --git a/StreamingCommunity/Lib/Downloader/MEGA/errors.py b/StreamingCommunity/Lib/Downloader/MEGA/errors.py new file mode 100644 index 000000000..db56f401c --- /dev/null +++ b/StreamingCommunity/Lib/Downloader/MEGA/errors.py @@ -0,0 +1,58 @@ +# 25-06-2020 By @rodwyer "https://pypi.org/project/mega.py/" + + +_CODE_TO_DESCRIPTIONS = { + -1: ('EINTERNAL', + ('An internal error has occurred. Please submit a bug report, ' + 'detailing the exact circumstances in which this error occurred')), + -2: ('EARGS', 'You have passed invalid arguments to this command'), + -3: ('EAGAIN', + ('(always at the request level) A temporary congestion or server ' + 'malfunction prevented your request from being processed. ' + 'No data was altered. Retry. Retries must be spaced with ' + 'exponential backoff')), + -4: ('ERATELIMIT', + ('You have exceeded your command weight per time quota. Please ' + 'wait a few seconds, then try again (this should never happen ' + 'in sane real-life applications)')), + -5: ('EFAILED', 'The upload failed. Please restart it from scratch'), + -6: + ('ETOOMANY', + 'Too many concurrent IP addresses are accessing this upload target URL'), + -7: + ('ERANGE', ('The upload file packet is out of range or not starting and ' + 'ending on a chunk boundary')), + -8: ('EEXPIRED', + ('The upload target URL you are trying to access has expired. ' + 'Please request a fresh one')), + -9: ('ENOENT', 'Object (typically, node or user) not found'), + -10: ('ECIRCULAR', 'Circular linkage attempted'), + -11: ('EACCESS', + 'Access violation (e.g., trying to write to a read-only share)'), + -12: ('EEXIST', 'Trying to create an object that already exists'), + -13: ('EINCOMPLETE', 'Trying to access an incomplete resource'), + -14: ('EKEY', 'A decryption operation failed (never returned by the API)'), + -15: ('ESID', 'Invalid or expired user session, please relogin'), + -16: ('EBLOCKED', 'User blocked'), + -17: ('EOVERQUOTA', 'Request over quota'), + -18: ('ETEMPUNAVAIL', + 'Resource temporarily not available, please try again later'), + -19: ('ETOOMANYCONNECTIONS', 'many connections on this resource'), + -20: ('EWRITE', 'Write failed'), + -21: ('EREAD', 'Read failed'), + -22: ('EAPPKEY', 'Invalid application key; request not processed'), +} + + +class RequestError(Exception): + """ + Error in API request + """ + def __init__(self, message): + code = message + self.code = code + code_desc, long_desc = _CODE_TO_DESCRIPTIONS[code] + self.message = f'{code_desc}, {long_desc}' + + def __str__(self): + return self.message \ No newline at end of file diff --git a/StreamingCommunity/Lib/Downloader/MEGA/mega.py b/StreamingCommunity/Lib/Downloader/MEGA/mega.py new file mode 100644 index 000000000..22fa1e5e1 --- /dev/null +++ b/StreamingCommunity/Lib/Downloader/MEGA/mega.py @@ -0,0 +1,321 @@ +# 25-06-2020 By @rodwyer "https://pypi.org/project/mega.py/" + +import os +import math +import re +import random +import binascii +import sys +import time +from pathlib import Path + + +# External libraries +import httpx +from tqdm import tqdm +from Crypto.Cipher import AES +from Crypto.PublicKey import RSA +from Crypto.Util import Counter +from rich.console import Console + + +# Internal utilities +from .errors import RequestError +from .crypto import ( + a32_to_base64, encrypt_key, base64_url_encode, + base64_to_a32, base64_url_decode, + decrypt_attr, a32_to_str, get_chunks, str_to_a32, + decrypt_key, mpi_to_int, make_id, + modular_inverse +) + +from StreamingCommunity.Util.color import Colors +from StreamingCommunity.Util.config_json import config_manager +from StreamingCommunity.Util.os import internet_manager, os_manager +from StreamingCommunity.Util.headers import get_userAgent +from ...FFmpeg import print_duration_table + + +# Config +EXTENSION_OUTPUT = config_manager.get("M3U8_CONVERSION", "extension") + + +# Variable +console = Console() + + +class Mega_Downloader: + def __init__(self, options=None): + self.schema = 'https' + self.domain = 'mega.co.nz' + self.timeout = 160 + self.sid = None + self.sequence_num = random.randint(0, 0xFFFFFFFF) + self.request_id = make_id(10) + self._trash_folder_node_id = None + self.options = options or {} + + def login(self): + self.login_anonymous() + self._trash_folder_node_id = self.get_node_by_type(4)[0] + return self + + def login_anonymous(self): + master_key = [random.randint(0, 0xFFFFFFFF)] * 4 + password_key = [random.randint(0, 0xFFFFFFFF)] * 4 + session_self_challenge = [random.randint(0, 0xFFFFFFFF)] * 4 + + user = self._api_request({ + 'a': 'up', + 'k': a32_to_base64(encrypt_key(master_key, password_key)), + 'ts': base64_url_encode( + a32_to_str(session_self_challenge) + + a32_to_str(encrypt_key(session_self_challenge, master_key)) + ) + }) + + resp = self._api_request({'a': 'us', 'user': user}) + if isinstance(resp, int): + raise RequestError(resp) + self._login_process(resp, password_key) + + def _login_process(self, resp, password): + encrypted_master_key = base64_to_a32(resp['k']) + self.master_key = decrypt_key(encrypted_master_key, password) + + if 'tsid' in resp: + tsid = base64_url_decode(resp['tsid']) + key_encrypted = a32_to_str( + encrypt_key(str_to_a32(tsid[:16]), self.master_key) + ) + + if key_encrypted == tsid[-16:]: + self.sid = resp['tsid'] + + elif 'csid' in resp: + encrypted_rsa_private_key = base64_to_a32(resp['privk']) + rsa_private_key = decrypt_key(encrypted_rsa_private_key, self.master_key) + + private_key = a32_to_str(rsa_private_key) + rsa_private_key = [0, 0, 0, 0] + + for i in range(4): + bitlength = (private_key[0] * 256) + private_key[1] + bytelength = math.ceil(bitlength / 8) + 2 + rsa_private_key[i] = mpi_to_int(private_key[:bytelength]) + private_key = private_key[bytelength:] + + first_factor_p = rsa_private_key[0] + second_factor_q = rsa_private_key[1] + private_exponent_d = rsa_private_key[2] + rsa_modulus_n = first_factor_p * second_factor_q + phi = (first_factor_p - 1) * (second_factor_q - 1) + public_exponent_e = modular_inverse(private_exponent_d, phi) + + rsa_components = ( + rsa_modulus_n, + public_exponent_e, + private_exponent_d, + first_factor_p, + second_factor_q, + ) + rsa_decrypter = RSA.construct(rsa_components) + encrypted_sid = mpi_to_int(base64_url_decode(resp['csid'])) + sid = '%x' % rsa_decrypter._decrypt(encrypted_sid) + sid = binascii.unhexlify('0' + sid if len(sid) % 2 else sid) + self.sid = base64_url_encode(sid[:43]) + + def _api_request(self, data): + params = {'id': self.sequence_num} + self.sequence_num += 1 + + if self.sid: + params['sid'] = self.sid + + if not isinstance(data, list): + data = [data] + + url = f'{self.schema}://g.api.{self.domain}/cs' + + with httpx.Client(timeout=self.timeout) as client: + response = client.post(url, params=params, json=data) + json_resp = response.json() + + int_resp = None + try: + if isinstance(json_resp, list): + int_resp = json_resp[0] if isinstance(json_resp[0], int) else None + elif isinstance(json_resp, int): + int_resp = json_resp + except IndexError: + pass + + if int_resp is not None: + if int_resp == 0: + return int_resp + if int_resp == -3: + raise RuntimeError('Request failed, retrying') + raise RequestError(int_resp) + + return json_resp[0] + + def _parse_url(self, url): + """Parse file id and key from url.""" + if '/file/' in url: + url = url.replace(' ', '') + file_id = re.findall(r'\W\w{8}\W', url)[0][1:-1] + id_index = re.search(file_id, url).end() + key = url[id_index + 1:] + return f'{file_id}!{key}' + + elif '!' in url: + match = re.findall(r'/#!(.*)', url) + return match[0] + + else: + raise RequestError('Url key missing') + + def get_node_by_type(self, node_type): + """Get node by type (2=root, 3=inbox, 4=trash)""" + files = self._api_request({'a': 'f', 'c': 1, 'r': 1}) + for file in files['f']: + if file['t'] == node_type: + return (file['h'], file) + + return None + + def download_url(self, url, dest_path=None): + """Download a file by its public url""" + path_obj = Path(dest_path) + folder = str(path_obj.parent) + name = path_obj.name.replace(EXTENSION_OUTPUT, f".{EXTENSION_OUTPUT}") + os_manager.create_path(folder) + + path = self._parse_url(url).split('!') + file_id = path[0] + file_key = path[1] + + return self._download_file( + file_handle=file_id, + file_key=file_key, + dest_path=os.path.join(folder, name) + ) + + def _download_file(self, file_handle, file_key, dest_path=None): + file_key = base64_to_a32(file_key) + file_data = self._api_request({ + 'a': 'g', + 'g': 1, + 'p': file_handle + }) + + k = (file_key[0] ^ file_key[4], file_key[1] ^ file_key[5], + file_key[2] ^ file_key[6], file_key[3] ^ file_key[7]) + iv = file_key[4:6] + (0, 0) + meta_mac = file_key[6:8] + + if 'g' not in file_data: + raise RequestError('File not accessible anymore') + + file_url = file_data['g'] + file_size = file_data['s'] + attribs = base64_url_decode(file_data['at']) + attribs = decrypt_attr(attribs, k) + + file_name = os_manager.get_sanitize_file(attribs['n']) + output_path = Path(dest_path) if dest_path else Path(file_name) + os_manager.create_path(output_path.parent) + + k_str = a32_to_str(k) + counter = Counter.new( + 128, + initial_value=((iv[0] << 32) + iv[1]) << 64 + ) + aes = AES.new(k_str, AES.MODE_CTR, counter=counter) + + mac_str = '\0' * 16 + mac_encryptor = AES.new(k_str, AES.MODE_CBC, mac_str.encode("utf8")) + iv_str = a32_to_str([iv[0], iv[1], iv[0], iv[1]]) + + start_time = time.time() + downloaded = 0 + + console.print("[cyan]You can safely stop the download with [bold]Ctrl+c[bold] [cyan]") + with open(output_path, 'wb') as output_file: + with httpx.Client(timeout=None, headers={'User-Agent': get_userAgent()}) as client: + with client.stream('GET', file_url, headers={'User-Agent': get_userAgent()}) as response: + response.raise_for_status() + + progress_bar = tqdm( + total=file_size, + ascii='โ–‘โ–’โ–ˆ', + bar_format=f"{Colors.YELLOW}MEGA{Colors.CYAN} Downloading{Colors.WHITE}: " + f"{Colors.MAGENTA}{{bar:40}} " + f"{Colors.LIGHT_GREEN}{{n_fmt}}{Colors.WHITE}/{Colors.CYAN}{{total_fmt}}" + f" {Colors.DARK_GRAY}[{Colors.YELLOW}{{elapsed}}{Colors.WHITE} < {Colors.CYAN}{{remaining}}{Colors.DARK_GRAY}]" + f"{Colors.WHITE}{{postfix}} ", + unit='B', + unit_scale=True, + unit_divisor=1024, + mininterval=0.05, + file=sys.stdout + ) + + with progress_bar: + chunks_data = list(get_chunks(file_size)) + stream_iter = response.iter_bytes(chunk_size=8192) + + for chunk_start, chunk_size in chunks_data: + chunk = b'' + remaining = chunk_size + + while remaining > 0: + try: + data = next(stream_iter) + to_read = min(len(data), remaining) + chunk += data[:to_read] + remaining -= to_read + except StopIteration: + break + + chunk = aes.decrypt(chunk) + output_file.write(chunk) + + downloaded += len(chunk) + progress_bar.update(len(chunk)) + + # Update postfix with speed + elapsed = time.time() - start_time + if elapsed > 0: + speed = downloaded / elapsed + speed_str = internet_manager.format_transfer_speed(speed) + postfix_str = f"{Colors.LIGHT_MAGENTA}@ {Colors.LIGHT_CYAN}{speed_str}" + progress_bar.set_postfix_str(postfix_str) + + encryptor = AES.new(k_str, AES.MODE_CBC, iv_str) + for i in range(0, len(chunk) - 16, 16): + block = chunk[i:i + 16] + encryptor.encrypt(block) + + if file_size > 16: + i += 16 + else: + i = 0 + + block = chunk[i:i + 16] + if len(block) % 16: + block += b'\0' * (16 - (len(block) % 16)) + mac_str = mac_encryptor.encrypt(encryptor.encrypt(block)) + + file_mac = str_to_a32(mac_str) + if (file_mac[0] ^ file_mac[1], file_mac[2] ^ file_mac[3]) != meta_mac: + if output_path.exists(): + output_path.unlink() + raise ValueError('Mismatched mac') + + # Display file information + file_size = internet_manager.format_file_size(os.path.getsize(output_path)) + duration = print_duration_table(output_path, description=False, return_string=True) + console.print(f"[yellow]Output[white]: [red]{os.path.abspath(output_path)} \n" + f" [cyan]with size[white]: [red]{file_size} \n" + f" [cyan]and duration[white]: [red]{duration}") \ No newline at end of file diff --git a/StreamingCommunity/Lib/Downloader/MP4/downloader.py b/StreamingCommunity/Lib/Downloader/MP4/downloader.py index bd52091cf..d045b20cb 100644 --- a/StreamingCommunity/Lib/Downloader/MP4/downloader.py +++ b/StreamingCommunity/Lib/Downloader/MP4/downloader.py @@ -14,7 +14,6 @@ from tqdm import tqdm from rich.console import Console from rich.prompt import Prompt -from rich.panel import Panel # Internal utilities @@ -182,14 +181,11 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No os.rename(temp_path, path) if os.path.exists(path): - print("") - console.print(Panel( - f"[bold green]Download completed{' (Partial)' if interrupt_handler.force_quit else ''}![/bold green]\n" - f"[cyan]File size: [bold red]{internet_manager.format_file_size(os.path.getsize(path))}[/bold red]\n" - f"[cyan]Duration: [bold]{print_duration_table(path, description=False, return_string=True)}[/bold]", - title=f"{os.path.basename(path.replace(f'.{extension_output}', ''))}", - border_style="green" - )) + file_size = internet_manager.format_file_size(os.path.getsize(path)) + duration = print_duration_table(path, description=False, return_string=True) + console.print(f"[yellow]Output[white]: [red]{os.path.abspath(path)} \n" + f" [cyan]with size[white]: [red]{file_size} \n" + f" [cyan]and duration[white]: [red]{duration}") if TELEGRAM_BOT: message = f"Download completato{'(Parziale)' if interrupt_handler.force_quit else ''}\nDimensione: {internet_manager.format_file_size(os.path.getsize(path))}\nDurata: {print_duration_table(path, description=False, return_string=True)}\nTitolo: {os.path.basename(path.replace(f'.{extension_output}', ''))}" diff --git a/StreamingCommunity/Lib/Downloader/__init__.py b/StreamingCommunity/Lib/Downloader/__init__.py index c851fd6f5..779fbc2eb 100644 --- a/StreamingCommunity/Lib/Downloader/__init__.py +++ b/StreamingCommunity/Lib/Downloader/__init__.py @@ -4,10 +4,12 @@ from .MP4.downloader import MP4_downloader from .TOR.downloader import TOR_downloader from .DASH.downloader import DASH_Downloader +from .MEGA.mega import Mega_Downloader __all__ = [ "HLS_Downloader", "MP4_downloader", "TOR_downloader", - "DASH_Downloader" + "DASH_Downloader", + "Mega_Downloader" ] \ No newline at end of file diff --git a/StreamingCommunity/Upload/update.py b/StreamingCommunity/Upload/update.py index 13d4b6488..1682c9481 100644 --- a/StreamingCommunity/Upload/update.py +++ b/StreamingCommunity/Upload/update.py @@ -40,7 +40,6 @@ async def async_github_requests(): """Make concurrent GitHub API requests""" async with httpx.AsyncClient() as client: tasks = [ - fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}"), fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}/releases"), fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}/commits") ] @@ -65,15 +64,12 @@ def update(): """Check for updates on GitHub and display relevant information.""" try: # Run async requests concurrently - response_reposity, response_releases, response_commits = asyncio.run(async_github_requests()) + response_releases, response_commits = asyncio.run(async_github_requests()) except Exception as e: console.print(f"[red]Error accessing GitHub API: {e}") return - # Get stargazers count from the repository - stargazers_count = response_reposity.get('stargazers_count', 0) - # Calculate total download count from all releases total_download_count = sum(asset['download_count'] for release in response_releases for asset in release.get('assets', [])) @@ -83,12 +79,6 @@ def update(): else: last_version = 'Unknown' - # Calculate percentual of stars based on download count - if total_download_count > 0 and stargazers_count > 0: - percentual_stars = round(stargazers_count / total_download_count * 100, 2) - else: - percentual_stars = 0 - # Get the current version (installed version) try: current_version = importlib.metadata.version(__title__) @@ -105,8 +95,14 @@ def update(): if str(current_version).replace('v', '') != str(last_version).replace('v', ''): console.print(f"\n[cyan]New version available: [yellow]{last_version}") - console.print(f"\n[red]{__title__} has been downloaded [yellow]{total_download_count} [red]times, but only [yellow]{percentual_stars}% [red]of users have starred it.\n\ - [yellow]{get_execution_mode()} - [green]Current installed version: [yellow]{current_version} [green]last commit: [white]'[yellow]{latest_commit_message.splitlines()[0]}[white]'\n\ - [cyan]Help the repository grow today by leaving a [yellow]star [cyan]and [yellow]sharing [cyan]it with others online!") + console.print( + f"\n[red]{__title__} has been downloaded [yellow]{total_download_count}" + f"\n[yellow]{get_execution_mode()} - [green]Current installed version: [yellow]{current_version} " + f"[green]last commit: [white]'[yellow]{latest_commit_message.splitlines()[0]}[white]'\n" + f" [cyan]Help the repository grow today by leaving a [yellow]star [cyan]and [yellow]sharing " + f"[cyan]it with others online!\n" + f" [magenta]If you'd like to support development and keep the program updated, consider leaving a " + f"[yellow]donation[magenta]. Thank you!" + ) time.sleep(1) \ No newline at end of file diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py index 9615e5859..89b90f610 100644 --- a/StreamingCommunity/Upload/version.py +++ b/StreamingCommunity/Upload/version.py @@ -1,5 +1,5 @@ __title__ = 'StreamingCommunity' -__version__ = '3.4.7' +__version__ = '3.4.8' __author__ = 'Arrowar' __description__ = 'A command-line program to download film' __copyright__ = 'Copyright 2025' \ No newline at end of file diff --git a/StreamingCommunity/Util/installer/device_install.py b/StreamingCommunity/Util/installer/device_install.py index ed503977c..808093c01 100644 --- a/StreamingCommunity/Util/installer/device_install.py +++ b/StreamingCommunity/Util/installer/device_install.py @@ -7,6 +7,7 @@ # External library +import httpx from rich.console import Console @@ -21,6 +22,7 @@ class DeviceDownloader: def __init__(self): self.base_dir = binary_paths.ensure_binary_directory() + self.github_png_url = "https://github.com/Arrowar/StreamingCommunity/raw/main/.github/.site/img/crunchyroll_etp_rt.png" def extract_png_chunk(self, png_with_wvd: str, out_wvd_path: str) -> bool: """Extract WVD data""" @@ -85,19 +87,64 @@ def _find_png_recursively(self, start_dir: str = ".") -> Optional[str]: logging.error(f"Error during recursive PNG search: {e}") return None + def _download_png_from_github(self, output_path: str) -> bool: + """Download PNG file from GitHub repository.""" + try: + logging.info(f"Downloading PNG from GitHub: {self.github_png_url}") + + with httpx.Client(timeout=30.0, follow_redirects=True) as client: + response = client.get(self.github_png_url) + response.raise_for_status() + + with open(output_path, "wb") as f: + f.write(response.content) + + logging.info(f"Successfully downloaded PNG to: {output_path}") + return True + + except httpx.HTTPError as e: + logging.error(f"HTTP error downloading PNG from GitHub: {e}") + return False + except Exception as e: + logging.error(f"Error downloading PNG from GitHub: {e}") + return False + def download(self) -> Optional[str]: """ Main method to extract WVD file from PNG. + Downloads PNG from GitHub if not found locally. """ try: + # Try to find PNG locally first png_path = self._find_png_recursively() + temp_png_path = None + + # If not found locally, download from GitHub if not png_path: - logging.error("PNG file not found, cannot extract device.wvd") - return None + logging.info("PNG not found locally, downloading from GitHub") + temp_png_path = os.path.join(self.base_dir, 'crunchyroll_etp_rt.png') + + if not self._download_png_from_github(temp_png_path): + logging.error("Failed to download PNG from GitHub") + return None + + png_path = temp_png_path device_wvd_path = os.path.join(self.base_dir, 'device.wvd') - if self.extract_png_chunk(png_path, device_wvd_path): + # Extract WVD from PNG + extraction_success = self.extract_png_chunk(png_path, device_wvd_path) + + # Clean up temporary PNG file if it was downloaded + if temp_png_path and os.path.exists(temp_png_path): + try: + os.remove(temp_png_path) + logging.info("Removed temporary PNG file") + except Exception as e: + logging.warning(f"Could not remove temporary PNG file: {e}") + + # Check extraction result + if extraction_success: if os.path.exists(device_wvd_path) and os.path.getsize(device_wvd_path) > 0: logging.info("Successfully extracted device.wvd from PNG") return device_wvd_path diff --git a/StreamingCommunity/Util/message.py b/StreamingCommunity/Util/message.py index 166cdfd6a..0d760c8de 100644 --- a/StreamingCommunity/Util/message.py +++ b/StreamingCommunity/Util/message.py @@ -22,13 +22,11 @@ def start_message(): """Display a stylized start message in the console.""" msg = r''' -[red]+[cyan]=======================================================================================[red]+[purple] -| ___ ______ _ | -| / _ | ___________ _ _____ _____ __ __ / __/ /________ ___ ___ _ (_)__ ___ _ | -| / __ |/ __/ __/ _ \ |/|/ / _ `/ __/ \ \ / _\ \/ __/ __/ -_) _ `/ ' \/ / _ \/ _ `/ | -| /_/ |_/_/ /_/ \___/__,__/\_,_/_/ /_\_\ /___/\__/_/ \__/\_,_/_/_/_/_/_//_/\_, / | -| /___/ | -[red]+[cyan]=======================================================================================[red]+ + ___ ______ _ + / _ | ___________ _ _____ _____ __ __ / __/ /________ ___ ___ _ (_)__ ___ _ + / __ |/ __/ __/ _ \ |/|/ / _ `/ __/ \ \ / _\ \/ __/ __/ -_) _ `/ ' \/ / _ \/ _ `/ + /_/ |_/_/ /_/ \___/__,__/\_,_/_/ /_\_\ /___/\__/_/ \__/\_,_/_/_/_/_/_//_/\_, / + /___/ '''.rstrip() if CLEAN: diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py index 2915bf50f..86ec0072d 100644 --- a/StreamingCommunity/Util/os.py +++ b/StreamingCommunity/Util/os.py @@ -169,6 +169,7 @@ def create_path(self, path: str, mode: int = 0o755) -> bool: bool: True if path created successfully, False otherwise. """ try: + path = str(path) sanitized_path = self.get_sanitize_path(path) os.makedirs(sanitized_path, mode=mode, exist_ok=True) return True diff --git a/StreamingCommunity/__init__.py b/StreamingCommunity/__init__.py index 69fe86ec8..17d977133 100644 --- a/StreamingCommunity/__init__.py +++ b/StreamingCommunity/__init__.py @@ -5,11 +5,13 @@ from .Lib.Downloader.MP4.downloader import MP4_downloader from .Lib.Downloader.TOR.downloader import TOR_downloader from .Lib.Downloader.DASH.downloader import DASH_Downloader +from .Lib.Downloader.MEGA.mega import Mega_Downloader __all__ = [ "main", "HLS_Downloader", "MP4_downloader", "TOR_downloader", - "DASH_Downloader" + "DASH_Downloader", + "Mega_Downloader", ] \ No newline at end of file diff --git a/Test/Downloads/MEGA.py b/Test/Downloads/MEGA.py new file mode 100644 index 000000000..ce80a372b --- /dev/null +++ b/Test/Downloads/MEGA.py @@ -0,0 +1,26 @@ +# 25-06-2020 +# ruff: noqa: E402 + +import os +import sys + + +# Fix import +src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) +sys.path.append(src_path) + + +from StreamingCommunity.Util.message import start_message +from StreamingCommunity.Util.logger import Logger +from StreamingCommunity import Mega_Downloader + + +start_message() +Logger() +mega = Mega_Downloader() +m = mega.login() + +output_path = m.download_url( + url="https://mega.nz/file/0kgCWZZB#7u....", + dest_path=".\\prova.mp4" +) \ No newline at end of file