diff --git a/.github/.site/css/style.css b/.github/.site/css/style.css
deleted file mode 100644
index 33d50fc84..000000000
--- a/.github/.site/css/style.css
+++ /dev/null
@@ -1,560 +0,0 @@
-@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
-
-:root {
- --primary-color: #8c52ff;
- --secondary-color: #6930c3;
- --accent-color: #00e5ff;
- --background-color: #121212;
- --card-background: #1e1e1e;
- --text-color: #f8f9fa;
- --shadow-color: rgba(0, 0, 0, 0.25);
- --card-hover: #2a2a2a;
- --border-color: #333333;
-}
-
-[data-theme="light"] {
- --background-color: #ffffff;
- --card-background: #f8f9fa;
- --text-color: #212529;
- --shadow-color: rgba(0, 0, 0, 0.1);
- --card-hover: #e9ecef;
- --border-color: #dee2e6;
-}
-
-* {
- margin: 0;
- padding: 0;
- box-sizing: border-box;
- transition: all 0.2s ease;
-}
-
-body {
- font-family: 'Inter', 'Segoe UI', sans-serif;
- background-color: var(--background-color);
- color: var(--text-color);
- line-height: 1.6;
- min-height: 100vh;
- display: flex;
- flex-direction: column;
-}
-
-.container {
- max-width: 1400px;
- margin: 0 auto;
- padding: 20px;
- flex: 1;
-}
-
-.header-container {
- display: flex;
- justify-content: space-between;
- align-items: center;
- padding: 15px 20px;
- background: var(--card-background);
- border-radius: 12px;
- border: 1px solid var(--border-color);
- margin-bottom: 20px;
-}
-
-.sites-stats {
- display: flex;
- gap: 20px;
- align-items: center;
-}
-
-.total-sites, .last-update-global {
- display: flex;
- align-items: center;
- gap: 8px;
- color: var(--text-color);
- font-size: 0.95rem;
- background: var(--background-color);
- padding: 8px 16px;
- border-radius: 8px;
- border: 1px solid var(--border-color);
- transition: all 0.3s ease;
-}
-
-.total-sites:hover, .last-update-global:hover {
- border-color: var(--primary-color);
- transform: translateY(-2px);
-}
-
-.total-sites i, .last-update-global i {
- color: var(--primary-color);
- font-size: 1.1rem;
-}
-
-.site-grid {
- display: grid;
- grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
- gap: 24px;
- padding: 2rem 0;
-}
-
-.site-item {
- min-height: 220px;
- background-color: var(--card-background);
- border-radius: 16px;
- padding: 30px;
- box-shadow: 0 6px 20px var(--shadow-color);
- transition: all 0.3s ease;
- display: flex;
- flex-direction: column;
- align-items: center;
- border: 1px solid var(--border-color);
- position: relative;
- overflow: hidden;
- cursor: pointer;
-}
-
-.site-item::before {
- content: '';
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- height: 4px;
- background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
- transition: height 0.3s ease;
-}
-
-.site-item:hover {
- transform: translateY(-5px);
- box-shadow: 0 12px 30px var(--shadow-color);
- border-color: var(--primary-color);
-}
-
-.site-item:hover::before {
- height: 6px;
-}
-
-.site-item img {
- width: 80px;
- height: 80px;
- margin-bottom: 1.5rem;
- border-radius: 16px;
- object-fit: cover;
- border: 2px solid var(--border-color);
- transition: transform 0.3s ease;
-}
-
-.site-item:hover img {
- transform: scale(1.05);
-}
-
-.site-item h3 {
- font-size: 1.4rem;
- font-weight: 600;
- margin-bottom: 0.5rem;
- color: var(--primary-color);
- text-align: center;
- transition: color 0.3s ease;
-}
-
-.site-item:hover h3 {
- color: var(--accent-color);
-}
-
-.site-info {
- display: flex;
- flex-direction: column;
- align-items: center;
- gap: 8px;
- margin-top: 10px;
- text-align: center;
- font-size: 0.85rem;
- color: var(--text-color);
- opacity: 0.8;
-}
-
-.last-update, .old-domain {
- display: flex;
- align-items: center;
- gap: 6px;
-}
-
-.last-update i, .old-domain i {
- color: var(--primary-color);
-}
-
-.site-item:hover .site-info {
- opacity: 1;
-}
-
-.site-status {
- position: absolute;
- top: 10px;
- right: 10px;
- width: 12px;
- height: 12px;
- border-radius: 50%;
- background: #4CAF50;
-}
-
-.site-status.offline {
- background: #f44336;
-}
-
-.status-indicator {
- position: fixed;
- top: 20px;
- right: 20px;
- background: var(--card-background);
- border: 1px solid var(--border-color);
- border-radius: 12px;
- padding: 15px 20px;
- box-shadow: 0 4px 20px var(--shadow-color);
- z-index: 1001;
- min-width: 280px;
- max-width: 400px;
- transition: all 0.3s ease;
-}
-
-.status-indicator.hidden {
- opacity: 0;
- transform: translateY(-20px);
- pointer-events: none;
-}
-
-.status-header {
- display: flex;
- align-items: center;
- gap: 10px;
- margin-bottom: 15px;
- font-weight: 600;
- color: var(--primary-color);
-}
-
-.status-icon {
- width: 20px;
- height: 20px;
- border: 2px solid var(--primary-color);
- border-radius: 50%;
- border-top-color: transparent;
- animation: spin 1s linear infinite;
-}
-
-.status-icon.ready {
- border: none;
- background: #4CAF50;
- animation: none;
- position: relative;
-}
-
-.status-icon.ready::after {
- content: '✓';
- position: absolute;
- top: 50%;
- left: 50%;
- transform: translate(-50%, -50%);
- color: white;
- font-size: 12px;
- font-weight: bold;
-}
-
-@keyframes spin {
- 0% { transform: rotate(0deg); }
- 100% { transform: rotate(360deg); }
-}
-
-.status-text {
- color: var(--text-color);
- font-size: 0.9rem;
- margin-bottom: 10px;
-}
-
-.checking-sites {
- max-height: 200px;
- overflow-y: auto;
- background: var(--background-color);
- border-radius: 8px;
- padding: 10px;
- border: 1px solid var(--border-color);
-}
-
-.checking-site {
- display: flex;
- align-items: center;
- justify-content: between;
- gap: 10px;
- padding: 6px 8px;
- margin-bottom: 4px;
- border-radius: 6px;
- background: var(--card-background);
- font-size: 0.8rem;
- color: var(--text-color);
- transition: all 0.2s ease;
-}
-
-.checking-site.completed {
- opacity: 0.6;
- background: var(--card-hover);
-}
-
-.checking-site.online {
- border-left: 3px solid #4CAF50;
-}
-
-.checking-site.offline {
- border-left: 3px solid #f44336;
-}
-
-.checking-site .site-name {
- flex: 1;
- font-weight: 500;
- overflow: hidden;
- text-overflow: ellipsis;
- white-space: nowrap;
-}
-
-.checking-site .site-status-icon {
- width: 12px;
- height: 12px;
- border-radius: 50%;
- flex-shrink: 0;
-}
-
-.checking-site .site-status-icon.checking {
- background: var(--primary-color);
- animation: pulse 1s infinite;
-}
-
-.checking-site .site-status-icon.online {
- background: #4CAF50;
-}
-
-.checking-site .site-status-icon.offline {
- background: #f44336;
-}
-
-@keyframes pulse {
- 0%, 100% { opacity: 1; }
- 50% { opacity: 0.5; }
-}
-
-.progress-bar {
- width: 100%;
- height: 6px;
- background: var(--background-color);
- border-radius: 3px;
- overflow: hidden;
- margin-top: 10px;
-}
-
-.progress-fill {
- height: 100%;
- background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
- width: 0%;
- transition: width 0.3s ease;
- border-radius: 3px;
-}
-
-.loader {
- width: 48px;
- height: 48px;
- border: 3px solid var(--primary-color);
- border-bottom-color: transparent;
- border-radius: 50%;
- display: inline-block;
- position: relative;
- box-sizing: border-box;
- animation: rotation 1s linear infinite;
-}
-
-.loader::after {
- content: '';
- position: absolute;
- box-sizing: border-box;
- left: 0;
- top: 0;
- width: 48px;
- height: 48px;
- border-radius: 50%;
- border: 3px solid transparent;
- border-bottom-color: var(--accent-color);
- animation: rotationBack 0.5s linear infinite;
- transform: rotate(45deg);
-}
-
-@keyframes rotation {
- 0% { transform: rotate(0deg) }
- 100% { transform: rotate(360deg) }
-}
-
-@keyframes rotationBack {
- 0% { transform: rotate(0deg) }
- 100% { transform: rotate(-360deg) }
-}
-
-footer {
- background: var(--card-background);
- border-top: 1px solid var(--border-color);
- margin-top: auto;
- padding: 40px 20px;
- position: relative;
-}
-
-.footer-content {
- max-width: 1200px;
- margin: 0 auto;
- display: grid;
- grid-template-columns: repeat(3, 1fr);
- gap: 30px;
- position: relative;
- padding: 20px;
-}
-
-.footer-section {
- padding: 20px;
- border-radius: 12px;
- transition: transform 0.3s ease, background-color 0.3s ease;
- background-color: var(--card-background);
- border: 1px solid var(--border-color);
-}
-
-.footer-section:hover {
- transform: translateY(-5px);
- background-color: var(--card-hover);
-}
-
-.footer-title {
- color: var(--accent-color);
- font-size: 1.3rem;
- margin-bottom: 1.5rem;
- padding-bottom: 0.5rem;
- position: relative;
- letter-spacing: 0.5px;
-}
-
-.footer-title::after {
- content: '';
- position: absolute;
- bottom: 0;
- left: 0;
- width: 60px;
- height: 3px;
- border-radius: 2px;
- background: linear-gradient(90deg, var(--primary-color), var(--accent-color));
-}
-
-.footer-links {
- list-style: none;
-}
-
-.footer-links li {
- margin-bottom: 0.8rem;
-}
-
-.footer-links a {
- color: var(--text-color);
- text-decoration: none;
- display: flex;
- align-items: center;
- gap: 8px;
- opacity: 0.8;
- transition: all 0.3s ease;
- padding: 8px 12px;
- border-radius: 8px;
- background-color: transparent;
-}
-
-.footer-links a:hover {
- opacity: 1;
- color: var(--accent-color);
- transform: translateX(8px);
- background-color: rgba(140, 82, 255, 0.1);
-}
-
-.footer-links i {
- width: 20px;
- text-align: center;
- font-size: 1.2rem;
- color: var(--primary-color);
- transition: transform 0.3s ease;
-}
-
-.footer-links a:hover i {
- transform: scale(1.2);
-}
-
-.footer-description {
- margin-top: 15px;
- font-size: 0.9rem;
- color: var(--text-color);
- opacity: 0.8;
- line-height: 1.5;
-}
-
-.update-note {
- color: var(--accent-color);
- font-size: 0.9rem;
- opacity: 0.9;
-}
-
-/* Responsiveness */
-@media (max-width: 768px) {
- .site-grid {
- grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
- gap: 15px;
- padding: 1rem;
- }
-
- .site-item {
- min-height: 250px;
- padding: 20px;
- }
-
- .footer-content {
- grid-template-columns: 1fr;
- gap: 20px;
- padding: 15px;
- text-align: center;
- }
-
- .header-container {
- flex-direction: column;
- gap: 15px;
- }
-
- .sites-stats {
- flex-direction: column;
- width: 100%;
- }
-
- .total-sites, .last-update-global {
- width: 100%;
- justify-content: center;
- }
-
- .footer-title::after {
- left: 50%;
- transform: translateX(-50%);
- }
-
- .footer-links a {
- justify-content: center;
- }
-
- .footer-links a:hover {
- transform: translateY(-5px);
- }
-
- .footer-section {
- margin-bottom: 20px;
- }
-}
-
-@media (max-width: 480px) {
- .site-grid {
- grid-template-columns: 1fr;
- }
-
- .site-item {
- min-height: 220px;
- }
-
- .container {
- padding: 10px;
- }
-}
\ No newline at end of file
diff --git a/.github/.site/img/crunchyroll_etp_rt.png b/.github/.site/img/crunchyroll_etp_rt.png
deleted file mode 100644
index 109764821..000000000
Binary files a/.github/.site/img/crunchyroll_etp_rt.png and /dev/null differ
diff --git a/.github/.site/img/crunchyroll_x_cr_tab_id.png b/.github/.site/img/crunchyroll_x_cr_tab_id.png
deleted file mode 100644
index 38a522588..000000000
Binary files a/.github/.site/img/crunchyroll_x_cr_tab_id.png and /dev/null differ
diff --git a/.github/.site/index.html b/.github/.site/index.html
deleted file mode 100644
index 268b3cc2e..000000000
--- a/.github/.site/index.html
+++ /dev/null
@@ -1,68 +0,0 @@
-
-
-
-
-
-
- Streaming Directory
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.github/.site/js/script.js b/.github/.site/js/script.js
deleted file mode 100644
index 727e2977c..000000000
--- a/.github/.site/js/script.js
+++ /dev/null
@@ -1,245 +0,0 @@
-document.documentElement.setAttribute('data-theme', 'dark');
-
-let statusIndicator = null;
-let checkingSites = new Map();
-let totalSites = 0;
-let completedSites = 0;
-
-function createStatusIndicator() {
- statusIndicator = document.createElement('div');
- statusIndicator.className = 'status-indicator';
- statusIndicator.innerHTML = `
-
- Initializing site checks...
-
-
- `;
- document.body.appendChild(statusIndicator);
- return statusIndicator;
-}
-
-function updateStatusIndicator(status, text, progress = 0) {
- if (!statusIndicator) return;
-
- const statusIcon = statusIndicator.querySelector('.status-icon');
- const statusTitle = statusIndicator.querySelector('.status-title');
- const statusText = statusIndicator.querySelector('.status-text');
- const progressFill = statusIndicator.querySelector('.progress-fill');
-
- statusTitle.textContent = status;
- statusText.textContent = text;
- progressFill.style.width = `${progress}%`;
-
- if (status === 'Ready') {
- statusIcon.classList.add('ready');
- setTimeout(() => {
- statusIndicator.classList.add('hidden');
- setTimeout(() => statusIndicator.remove(), 300);
- }, 2000);
- }
-}
-
-function addSiteToCheck(siteName, siteUrl) {
- if (!statusIndicator) return;
-
- const checkingSitesContainer = statusIndicator.querySelector('.checking-sites');
- const siteElement = document.createElement('div');
- siteElement.className = 'checking-site';
- siteElement.innerHTML = `
- ${siteName}
-
- `;
- checkingSitesContainer.appendChild(siteElement);
- checkingSites.set(siteName, siteElement);
-}
-
-function updateSiteStatus(siteName, isOnline) {
- const siteElement = checkingSites.get(siteName);
- if (!siteElement) return;
-
- const statusIcon = siteElement.querySelector('.site-status-icon');
- statusIcon.classList.remove('checking');
- statusIcon.classList.add(isOnline ? 'online' : 'offline');
- siteElement.classList.add('completed', isOnline ? 'online' : 'offline');
-
- completedSites++;
- const progress = (completedSites / totalSites) * 100;
- updateStatusIndicator(
- 'Checking Sites...',
- `Checked ${completedSites}/${totalSites} sites`,
- progress
- );
-}
-
-async function checkSiteStatus(url, siteName) {
- try {
- console.log(`Checking status for: ${url}`);
- const controller = new AbortController();
- const timeoutId = setTimeout(() => controller.abort(), 3000);
-
- const response = await fetch(url, {
- method: 'HEAD',
- mode: 'no-cors',
- signal: controller.signal,
- headers: {
- 'Accept': 'text/html',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/133.0.0.0'
- }
- });
-
- clearTimeout(timeoutId);
-
- const isOnline = response.type === 'opaque';
- console.log(`Site ${url} is ${isOnline ? 'online' : 'offline'} (Type: ${response.type})`);
-
- if (siteName) {
- updateSiteStatus(siteName, isOnline);
- }
-
- return isOnline;
- } catch (error) {
- console.log(`Error checking ${url}:`, error.message);
-
- if (siteName) {
- updateSiteStatus(siteName, false);
- }
-
- return false;
- }
-}
-
-const domainsJsonUrl = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json';
-
-async function loadSiteData() {
- try {
- console.log('Starting to load site data from GitHub...');
-
- createStatusIndicator();
- updateStatusIndicator('Loading...', 'Fetching site data from GitHub repository...', 0);
-
- const siteList = document.getElementById('site-list');
-
- console.log(`Fetching from GitHub: ${domainsJsonUrl}`);
- const response = await fetch(domainsJsonUrl);
-
- if (!response.ok) throw new Error(`HTTP error! Status: ${response.status}`);
-
- const configSite = await response.json(); // Directly get the site data object
-
- siteList.innerHTML = '';
-
- if (configSite && Object.keys(configSite).length > 0) { // Check if configSite is a non-empty object
- totalSites = Object.keys(configSite).length;
- completedSites = 0;
- let latestUpdate = new Date(0);
-
- document.getElementById('sites-count').textContent = totalSites;
-
- updateStatusIndicator('Checking Sites...', `Starting checks for ${totalSites} sites...`, 0);
-
- Object.entries(configSite).forEach(([siteName, site]) => {
- addSiteToCheck(siteName, site.full_url);
- });
-
- const statusChecks = Object.entries(configSite).map(async ([siteName, site]) => {
- const isOnline = await checkSiteStatus(site.full_url, siteName);
- return { siteName, site, isOnline };
- });
-
- const results = await Promise.all(statusChecks);
-
- updateStatusIndicator('Ready', 'All sites checked successfully!', 100);
-
- results.forEach(({ siteName, site, isOnline }) => {
- const siteItem = document.createElement('div');
- siteItem.className = 'site-item';
- siteItem.style.cursor = 'pointer';
-
- const statusDot = document.createElement('div');
- statusDot.className = 'site-status';
- if (!isOnline) statusDot.classList.add('offline');
- siteItem.appendChild(statusDot);
-
- const updateTime = new Date(site.time_change);
- if (updateTime > latestUpdate) {
- latestUpdate = updateTime;
- }
-
- const siteInfo = document.createElement('div');
- siteInfo.className = 'site-info';
- if (site.time_change) {
- const updateDate = new Date(site.time_change);
- const formattedDate = updateDate.toLocaleDateString('it-IT', {
- year: 'numeric',
- month: '2-digit',
- day: '2-digit',
- hour: '2-digit',
- minute: '2-digit'
- });
- const lastUpdate = document.createElement('span');
- lastUpdate.className = 'last-update';
- lastUpdate.innerHTML = ` ${formattedDate}`;
- siteInfo.appendChild(lastUpdate);
- }
-
- if (site.old_domain) {
- const oldDomain = document.createElement('span');
- oldDomain.className = 'old-domain';
- oldDomain.innerHTML = ` ${site.old_domain}`;
- siteInfo.appendChild(oldDomain);
- }
-
- siteItem.addEventListener('click', function() {
- window.open(site.full_url, '_blank', 'noopener,noreferrer');
- });
-
- const siteIcon = document.createElement('img');
- siteIcon.src = `https://t2.gstatic.com/faviconV2?client=SOCIAL&type=FAVICON&fallback_opts=TYPE,SIZE,URL&url=${site.full_url}&size=128`;
- siteIcon.alt = `${siteName} icon`;
- siteIcon.onerror = function() {
- this.src = 'data:image/svg+xml;utf8,';
- };
-
- const siteTitle = document.createElement('h3');
- siteTitle.textContent = siteName;
- siteItem.appendChild(siteIcon);
- siteItem.appendChild(siteTitle);
- siteItem.appendChild(siteInfo);
- siteList.appendChild(siteItem);
- });
-
- const formattedDate = latestUpdate.toLocaleDateString('it-IT', {
- year: 'numeric',
- month: '2-digit',
- day: '2-digit',
- hour: '2-digit',
- minute: '2-digit'
- });
- document.getElementById('last-update-time').textContent = formattedDate;
- } else {
- siteList.innerHTML = 'No sites available
';
- updateStatusIndicator('Ready', 'No sites found in the JSON file.', 100);
- }
- } catch (error) {
- console.error('Errore:', error);
- siteList.innerHTML = `
-
-
Errore nel caricamento
-
-
- `;
- if (statusIndicator) {
- updateStatusIndicator('Error', `Failed to load: ${error.message}`, 0);
- statusIndicator.querySelector('.status-icon').style.background = '#f44336';
- }
- }
-}
-
-document.addEventListener('DOMContentLoaded', () => {
- loadSiteData();
-});
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 791ce62e0..9d02ab977 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -13,6 +13,7 @@ Please make sure to check the following:
- [ ] You have the latest commit installed.
- [ ] The issue relates to a website or a specific functionality.
- [ ] If the issue is related to a website, you have verified that the URL works correctly in your browser.
+- [ ] If the issue is related to audio or subtitles, you have tested the downloaded file with VLC media player first.
- [ ] You have searched through closed issues for similar problems or potential solutions. Issues that can be resolved by already closed topics may be automatically closed.
## Describe the issue
diff --git a/.github/.site/login.md b/.github/doc/login.md
similarity index 100%
rename from .github/.site/login.md
rename to .github/doc/login.md
diff --git a/.github/doc/site.md b/.github/doc/site.md
new file mode 100644
index 000000000..5e68620b0
--- /dev/null
+++ b/.github/doc/site.md
@@ -0,0 +1,23 @@
+# Services Overview
+
+| Site Name | Stream Type | DRM | Max Resolution | Region |
+|--------------------|-------------|-----|----------------|--------|
+| Altadefinizione | HLS | ❌ | 1080p | IT |
+| Animeunity | MP4 | ❌ | 1080p | IT |
+| Animeworld | MP4 | ❌ | 1080p | IT |
+| Crunchyroll | DASH | ✅ | 1080p | IT |
+| Discovery | DASH | ✅ | 720p | US |
+| Dmax | HLS | ❌ | 1080p | IT |
+| Guardaserie | HLS | ❌ | 1080p | IT |
+| Hd4Me | MEGA | ❌ | 720p | IT |
+| Ipersphera | MEGA | ❌ | 1080p | IT |
+| Mediasetinfinity | DASH | ✅ | 1080p | IT |
+| Nove | HLS | ❌ | 1080p | IT |
+| Raiplay | DASH | ✅ | 1080p | IT |
+| Realtime | HLS | ✅ | 1080p | IT |
+| Streamingcommunity | HLS | ❌ | 1080p | IT |
+| Tubitv | HLS | ✅ | 1080p | US |
+
+---
+
+*Last updated: 2025-12-24 14:23:56*
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
deleted file mode 100644
index 915fd3b4c..000000000
--- a/.github/workflows/pages.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-permissions:
- contents: read
- pages: write
- id-token: write
-
-concurrency:
- group: "pages"
- cancel-in-progress: false
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v4
-
- - name: Setup Pages
- uses: actions/configure-pages@v5
-
- - name: Copy site files
- run: |
- mkdir -p _site
- cp -r .github/.site/* _site/
- ls -la _site/
-
- - name: Upload artifact
- uses: actions/upload-pages-artifact@v3
- with:
- path: _site
-
- deploy:
- environment:
- name: github-pages
- url: ${{ steps.deployment.outputs.page_url }}
- runs-on: ubuntu-latest
- needs: build
- steps:
- - name: Deploy to GitHub Pages
- id: deployment
- uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/.github/.domain/domain_update.py b/.github/workflows/script/domain_update.py
similarity index 100%
rename from .github/.domain/domain_update.py
rename to .github/workflows/script/domain_update.py
diff --git a/.github/.domain/domains.json b/.github/workflows/script/domains.json
similarity index 100%
rename from .github/.domain/domains.json
rename to .github/workflows/script/domains.json
diff --git a/.github/workflows/script/generate_services_table.py b/.github/workflows/script/generate_services_table.py
new file mode 100644
index 000000000..41f444960
--- /dev/null
+++ b/.github/workflows/script/generate_services_table.py
@@ -0,0 +1,194 @@
+# 23.12.25
+
+import re
+from pathlib import Path
+from typing import List, Tuple
+from datetime import datetime
+
+
+def extract_service_info(init_file: Path) -> Tuple[str, str, bool, bool, str, str]:
+ """
+ Extract _stream_type, _drm, _deprecate, _maxResolution, and _region from a service __init__.py file
+
+ Args:
+ init_file: Path to the __init__.py file
+
+ Returns:
+ Tuple of (service_name, stream_type, drm, deprecate, max_resolution, region)
+ """
+ service_name = init_file.parent.name
+ stream_type = "N/A"
+ drm = False
+ deprecate = False
+ max_resolution = "N/A"
+ region = "N/A" # Default value for _region
+
+ try:
+ with open(init_file, 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ # Extract _stream_type
+ stream_match = re.search(r'_stream_type\s*=\s*["\'](\w+)["\']', content)
+ if stream_match:
+ stream_type = stream_match.group(1)
+
+ # Extract _drm
+ drm_match = re.search(r'_drm\s*=\s*(True|False)', content)
+ if drm_match:
+ drm = drm_match.group(1) == 'True'
+
+ # Extract _deprecate
+ deprecate_match = re.search(r'_deprecate\s*=\s*(True|False)', content)
+ if deprecate_match:
+ deprecate = deprecate_match.group(1) == 'True'
+
+ # Extract _maxResolution
+ resolution_match = re.search(r'_maxResolution\s*=\s*["\']([\w\s]+)["\']', content)
+ if resolution_match:
+ max_resolution = resolution_match.group(1)
+
+ # Extract _region
+ region_match = re.search(r'_region\s*=\s*["\']([\w\s]+)["\']', content)
+ if region_match:
+ region = region_match.group(1)
+
+ except Exception as e:
+ print(f"Error reading {init_file}: {e}")
+
+ return service_name, stream_type, drm, deprecate, max_resolution, region
+
+
+def find_service_files(base_path: Path) -> List[Path]:
+ """
+ Find all service __init__.py files
+
+ Args:
+ base_path: Base path of the project
+
+ Returns:
+ List of paths to service __init__.py files
+ """
+ services_path = base_path / "StreamingCommunity" / "Api" / "Service"
+ init_files = []
+
+ if not services_path.exists():
+ print(f"Services path not found: {services_path}")
+ return init_files
+
+ # Iterate through service directories
+ for service_dir in services_path.iterdir():
+ if service_dir.is_dir() and not service_dir.name.startswith('__'):
+ init_file = service_dir / "__init__.py"
+ if init_file.exists():
+ init_files.append(init_file)
+
+ return sorted(init_files)
+
+
+def generate_markdown_table(services: List[Tuple[str, str, bool, str, str]]) -> str:
+ """
+ Generate markdown table from services data with dynamic column widths
+ Only includes services where _deprecate = False
+
+ Args:
+ services: List of (service_name, stream_type, drm, max_resolution, region) tuples
+
+ Returns:
+ Markdown formatted table
+ """
+ services = sorted(services, key=lambda x: x[0].lower())
+
+ # Prepare data with display names
+ table_data = []
+ for service_name, stream_type, drm, max_resolution, region in services:
+ display_name = service_name.replace('_', ' ').title()
+ drm_icon = "✅" if drm else "❌"
+ table_data.append((display_name, stream_type, drm_icon, max_resolution, region))
+
+ # Calculate maximum width for each column
+ col1_header = "Site Name"
+ col2_header = "Stream Type"
+ col3_header = "DRM"
+ col4_header = "Max Resolution"
+ col5_header = "Region"
+
+ # Start with header widths
+ max_col1 = len(col1_header)
+ max_col2 = len(col2_header)
+ max_col3 = len(col3_header)
+ max_col4 = len(col4_header)
+ max_col5 = len(col5_header)
+
+ # Check all data rows
+ for display_name, stream_type, drm_icon, max_resolution, region in table_data:
+ max_col1 = max(max_col1, len(display_name))
+ max_col2 = max(max_col2, len(stream_type))
+ max_col3 = max(max_col3, len(drm_icon))
+ max_col4 = max(max_col4, len(max_resolution))
+ max_col5 = max(max_col5, len(region))
+
+ # Build table with dynamic widths
+ lines = ["# Services Overview", ""]
+
+ # Header row
+ header = f"| {col1_header.ljust(max_col1)} | {col2_header.ljust(max_col2)} | {col3_header.ljust(max_col3)} | {col4_header.ljust(max_col4)} | {col5_header.ljust(max_col5)} |"
+ lines.append(header)
+
+ # Separator row
+ separator = f"|{'-' * (max_col1 + 2)}|{'-' * (max_col2 + 2)}|{'-' * (max_col3 + 2)}|{'-' * (max_col4 + 2)}|{'-' * (max_col5 + 2)}|"
+ lines.append(separator)
+
+ # Data rows
+ for display_name, stream_type, drm_icon, max_resolution, region in table_data:
+ row = f"| {display_name.ljust(max_col1)} | {stream_type.ljust(max_col2)} | {drm_icon.ljust(max_col3)} | {max_resolution.ljust(max_col4)} | {region.ljust(max_col5)} |"
+ lines.append(row)
+
+ lines.append("")
+ lines.append("---")
+ lines.append("")
+ lines.append(f"*Last updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*")
+ lines.append("")
+
+ return "\n".join(lines)
+
+
+def main():
+ script_dir = Path(__file__).parent
+ base_path = script_dir.parent.parent.parent
+ print(f"Base path: {base_path}")
+
+ # Find all service __init__.py files
+ init_files = find_service_files(base_path)
+ print(f"Found {len(init_files)} service files")
+
+ if not init_files:
+ print("No service files found!")
+ return
+
+ # Extract information from each service
+ services = []
+ deprecated_count = 0
+ for init_file in init_files:
+ service_name, stream_type, drm, deprecate, max_resolution, region = extract_service_info(init_file)
+
+ # Only include services that are not deprecated
+ if not deprecate:
+ services.append((service_name, stream_type, drm, max_resolution, region))
+ else:
+ deprecated_count += 1
+
+ print(f"Deprecated services: {deprecated_count}")
+
+ # Generate markdown table
+ markdown_content = generate_markdown_table(services)
+
+ # Write to site.md
+ output_file = base_path / ".github" / "doc" / "site.md"
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ f.write(markdown_content)
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/.github/workflows/update_domain.yml b/.github/workflows/update_domain.yml
index d9b0515b5..67dbd2cea 100644
--- a/.github/workflows/update_domain.yml
+++ b/.github/workflows/update_domain.yml
@@ -35,16 +35,16 @@ jobs:
sudo sh -c 'echo "nameserver 77.88.8.8" >> /etc/resolv.conf'
- name: Execute domain update script
- run: python .github/.domain/domain_update.py
+ run: python .github/workflows/script/domain_update.py
- name: Always amend last commit
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
- if ! git diff --quiet .github/.domain/domains.json; then
+ if ! git diff --quiet .github/workflows/script/domains.json; then
echo "📝 Changes detected - amending last commit"
- git add .github/.domain/domains.json
+ git add .github/workflows/script/domains.json
git commit --amend --no-edit
git push --force-with-lease origin main
else
diff --git a/.gitignore b/.gitignore
index cc089b665..6a39012a5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,4 +55,5 @@ active_requests.json
working_proxies.json
start.sh
.DS_Store
-GUI/db.sqlite3
\ No newline at end of file
+GUI/db.sqlite3
+console.log
\ No newline at end of file
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index e884cf9b3..000000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,4 +0,0 @@
-recursive-include StreamingCommunity *
-recursive-include StreamingCommunity/Api *
-recursive-include StreamingCommunity/Lib *
-include requirements.txt
\ No newline at end of file
diff --git a/README.md b/README.md
index be334f20c..ed3ec1a5e 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@
*⚡ **Quick Start:** `pip install StreamingCommunity && StreamingCommunity`*
+📺 **[Services](.github/doc/site.md)** - See all supported streaming platforms
---
@@ -24,8 +25,7 @@
- 🚀 [Quick Start](#quick-start)
- 📥 [Downloaders](#downloaders)
- 🛠️ [Configuration](#configuration)
-- 🔐 [Login](.github/.site/login.md)
-- 🌐 [Domain](https://arrowar.github.io/StreamingCommunity)
+- 🔐 [Login](.github/doc/login.md)
- 💡 [Usage Examples](#usage-examples)
- 🔍 [Global Search](#global-search)
- 🧩 [Advanced Features](#advanced-options)
diff --git a/StreamingCommunity/Api/Service/altadefinizione/__init__.py b/StreamingCommunity/Api/Service/altadefinizione/__init__.py
index b1c401946..c3151bd43 100644
--- a/StreamingCommunity/Api/Service/altadefinizione/__init__.py
+++ b/StreamingCommunity/Api/Service/altadefinizione/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 2
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/animeunity/__init__.py b/StreamingCommunity/Api/Service/animeunity/__init__.py
index 1485faa56..fe65f36ec 100644
--- a/StreamingCommunity/Api/Service/animeunity/__init__.py
+++ b/StreamingCommunity/Api/Service/animeunity/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 1
_useFor = "Anime"
+_region = "IT"
_deprecate = False
+_stream_type = "MP4"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/animeworld/__init__.py b/StreamingCommunity/Api/Service/animeworld/__init__.py
index 84ece6798..5b8181b7d 100644
--- a/StreamingCommunity/Api/Service/animeworld/__init__.py
+++ b/StreamingCommunity/Api/Service/animeworld/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 6
_useFor = "Anime"
+_region = "IT"
_deprecate = False
+_stream_type = "MP4"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/crunchyroll/__init__.py b/StreamingCommunity/Api/Service/crunchyroll/__init__.py
index bd5345929..6efca6125 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/__init__.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 7
_useFor = "Anime"
+_region = "IT"
_deprecate = False
+_stream_type = "DASH"
+_maxResolution = "1080p"
+_drm = True
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/crunchyroll/film.py b/StreamingCommunity/Api/Service/crunchyroll/film.py
index b25f02414..d4ab3e8d9 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/film.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/film.py
@@ -46,24 +46,17 @@ def download_film(select_title: MediaItem) -> str:
mp4_name = f"{os_manager.get_sanitize_file(select_title.name, select_title.date)}.{extension_output}"
mp4_path = os.path.join(site_constants.MOVIE_FOLDER, mp4_name.replace(f".{extension_output}", ""))
- # Generate mpd and license URLs
+ # Get playback session
url_id = select_title.get('url').split('/')[-1]
+ playback_result = get_playback_session(client, url_id)
- # Get playback session
- try:
- playback_result = get_playback_session(client, url_id)
-
- # Check if access was denied (403)
- if playback_result is None:
- console.print("[red]✗ Access denied: This content requires a premium subscription")
- return None, False
-
- mpd_url, mpd_headers, mpd_list_sub, token, audio_locale = playback_result
-
- except Exception as e:
- console.print(f"[red]✗ Error getting playback session: {str(e)}")
+ # Check if access was denied (403)
+ if playback_result is None:
+ console.print("[red]✗ Access denied: This content requires a premium subscription")
return None, False
+ mpd_url, mpd_headers, mpd_list_sub, token, _ = playback_result
+
# Parse playback token from mpd_url
parsed_url = urlparse(mpd_url)
query_params = parse_qs(parsed_url.query)
@@ -100,6 +93,6 @@ def download_film(select_title: MediaItem) -> str:
playback_token = token or query_params.get('playbackGuid', [None])[0]
if playback_token:
client.delete_active_stream(url_id, playback_token)
- console.print("[dim]✓ Playback session closed[/dim]")
+ console.print("[dim]Playback session closed")
return status['path'], status['stopped']
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/crunchyroll/series.py b/StreamingCommunity/Api/Service/crunchyroll/series.py
index 9eee5a145..92eec317f 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/series.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/series.py
@@ -53,31 +53,22 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.get('name')}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.get('name')} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.get('name'))}.{extension_output}"
mp4_path = os_manager.get_sanitize_path(os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}"))
- # Generate mpd and license URLs
- url_id = obj_episode.get('url').split('/')[-1]
-
# Get playback session
- try:
- # Get playback session with token for cleanup
- playback_result = get_playback_session(client, url_id)
-
- # Check if access was denied (403)
- if playback_result is None:
- console.print("[red]✗ Access denied: This episode requires a premium subscription")
- return None, False
-
- mpd_url, mpd_headers, mpd_list_sub, token, audio_locale = playback_result
-
- except Exception as e:
- console.print(f"[red]✗ Error getting playback session: {str(e)}")
+ url_id = obj_episode.get('url').split('/')[-1]
+ playback_result = get_playback_session(client, url_id)
+
+ # Check if access was denied (403)
+ if playback_result is None:
+ console.print("[red]✗ Access denied: This episode requires a premium subscription")
return None, False
+ mpd_url, mpd_headers, mpd_list_sub, token, _ = playback_result
parsed_url = urlparse(mpd_url)
query_params = parse_qs(parsed_url.query)
@@ -113,7 +104,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
playback_token = token or query_params.get('playbackGuid', [None])[0]
if playback_token:
client.delete_active_stream(url_id, playback_token)
- console.print("[dim]✓ Playback session closed[/dim]")
+ console.print("[dim]Playback session closed")
return status['path'], status['stopped']
diff --git a/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py b/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
index 54d64d00d..e2cefeadb 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
@@ -1,16 +1,18 @@
# 16.03.25
import logging
-from typing import List, Dict, Tuple
+from typing import List, Dict
# Internal utilities
from StreamingCommunity.Api.Template.object import SeasonManager
+from StreamingCommunity.Util.config_json import config_manager
from .get_license import CrunchyrollClient
# Variable
NORMALIZE_SEASON_NUMBERS = False # Set to True to remap seasons to 1..N range
+DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
def get_series_seasons(series_id, client: CrunchyrollClient, params):
@@ -135,16 +137,25 @@ def _fetch_episodes_for_season(self, season_number: int) -> List[Dict]:
self._episodes_cache[season_number] = episode_list
return episode_list
- def _get_episode_audio_locales_and_urls(self, episode_id: str) -> Tuple[List[str], Dict[str, str]]:
- """
- Fetch available audio locales and their URLs for an episode.
- 1. Try 'versions' from CMS
- 2. Fallback to single audio_locale from metadata
- 3. Last resort: query playback and cleanup token
+ def _get_preferred_audio_locale(self) -> str:
+ lang_mapping = {
+ 'ita': 'it-IT',
+ 'eng': 'en-US',
+ 'jpn': 'ja-JP',
+ 'ger': 'de-DE',
+ 'fre': 'fr-FR',
+ 'spa': 'es-419',
+ 'por': 'pt-BR'
+ }
- Returns: (audio_locales, urls_by_locale)
- """
- url = f'https://www.crunchyroll.com/content/v2/cms/objects/{episode_id}'
+ preferred_lang = DOWNLOAD_SPECIFIC_AUDIO[0] if DOWNLOAD_SPECIFIC_AUDIO else 'ita'
+ preferred_locale = lang_mapping.get(preferred_lang.lower(), 'it-IT')
+ return preferred_locale
+
+ def _get_episode_id_for_preferred_language(self, base_episode_id: str) -> str:
+ """Get the correct episode ID for the preferred audio language."""
+ preferred_locale = self._get_preferred_audio_locale()
+ url = f'https://www.crunchyroll.com/content/v2/cms/objects/{base_episode_id}'
params = {
'ratings': 'true',
'locale': 'it-IT',
@@ -154,65 +165,48 @@ def _get_episode_audio_locales_and_urls(self, episode_id: str) -> Tuple[List[str
response = self.client._request_with_retry('GET', url, params=params)
if response.status_code != 200:
- logging.warning(f"Failed to fetch audio locales for episode {episode_id}")
- return [], {}
+ logging.warning(f"Failed to fetch episode details for {base_episode_id}")
+ return base_episode_id
data = response.json()
item = (data.get("data") or [{}])[0] or {}
meta = item.get('episode_metadata', {}) or {}
-
- # Strategy 1: versions array
- versions = meta.get("versions") or item.get("versions") or []
- audio_locales = []
- urls_by_locale = {}
-
- if versions:
- for v in versions:
- if not isinstance(v, dict):
- continue
-
- locale = v.get("audio_locale")
- guid = v.get("guid")
- if locale and guid:
- audio_locales.append(locale)
- urls_by_locale[locale] = f"https://www.crunchyroll.com/watch/{guid}"
-
- if audio_locales:
- return sorted(set(audio_locales)), urls_by_locale
-
- # Strategy 2: single audio_locale from metadata
- base_audio = (
- meta.get("audio_locale")
- or item.get("audio_locale")
- or (meta.get("audio") or {}).get("locale")
- or (item.get("audio") or {}).get("locale")
- )
- if base_audio:
- return [base_audio], {base_audio: f"https://www.crunchyroll.com/watch/{episode_id}"}
+ versions = meta.get("versions") or []
- # Strategy 3: query playback as last resort
- try:
- from .get_license import get_playback_session
- _url, _hdrs, _subs, token, audio_loc = get_playback_session(self.client, episode_id)
-
- # Cleanup token immediately
- if token:
- try:
- self.client.delete_active_stream(episode_id, token)
- except Exception:
- pass
-
- if audio_loc:
- return [audio_loc], {audio_loc: f"https://www.crunchyroll.com/watch/{episode_id}"}
- except Exception as e:
- logging.warning(f"Playback fallback failed for {episode_id}: {e}")
-
- return [], {}
+ # Print all available audio locales
+ available_locales = []
+ for version in versions:
+ if isinstance(version, dict):
+ locale = version.get("audio_locale")
+ if locale:
+ available_locales.append(locale)
+ print(f"Available audio locales: {available_locales}")
+
+ # Find matching version by audio_locale
+ for i, version in enumerate(versions):
+ if isinstance(version, dict):
+ audio_locale = version.get("audio_locale")
+ guid = version.get("guid")
+
+ if audio_locale == preferred_locale:
+ print(f"Found matching locale! Selected: {audio_locale} -> {guid}")
+ return version.get("guid", base_episode_id)
+ # Fallback: try to find any available version if preferred not found
+ if versions and isinstance(versions[0], dict):
+ fallback_guid = versions[0].get("guid")
+ fallback_locale = versions[0].get("audio_locale")
+ if fallback_guid:
+ print(f"[DEBUG] Preferred locale {preferred_locale} not found, using fallback: {fallback_locale} -> {fallback_guid}")
+ logging.info(f"Preferred locale {preferred_locale} not found, using fallback: {fallback_locale}")
+ return fallback_guid
+
except Exception as e:
- logging.error(f"Error parsing audio locales for episode {episode_id}: {e}")
- return [], {}
+ logging.error(f"Error getting episode ID for preferred language: {e}")
+
+ print(f"[DEBUG] No suitable version found, returning original episode ID: {base_episode_id}")
+ return base_episode_id
# ------------- FOR GUI -------------
def getNumberSeason(self) -> int:
@@ -245,20 +239,13 @@ def selectEpisode(self, season_number: int, episode_index: int) -> dict:
return None
episode = episodes[episode_index]
- episode_id = episode.get("url", "").split("/")[-1] if "url" in episode else None
+ base_episode_id = episode.get("url", "").split("/")[-1] if "url" in episode else None
- if not episode_id:
+ if not base_episode_id:
return episode
- # Try to get best audio URL
- try:
- _, urls_by_locale = self._get_episode_audio_locales_and_urls(episode_id)
- new_url = urls_by_locale.get("it-IT") or urls_by_locale.get("en-US")
-
- if new_url:
- episode["url"] = new_url
-
- except Exception as e:
- logging.warning(f"Could not update episode URL: {e}")
+ preferred_episode_id = self._get_episode_id_for_preferred_language(base_episode_id)
+ episode["url"] = f"https://www.crunchyroll.com/watch/{preferred_episode_id}"
+ #print(f"Updated episode URL: {episode['url']}")
return episode
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py b/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
index 233f78e55..f651601fc 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
@@ -20,12 +20,10 @@
class PlaybackError(Exception):
- """Custom exception for playback-related errors that shouldn't crash the program"""
pass
class RateLimiter:
- """Simple token-bucket rate limiter to avoid server-side throttling."""
def __init__(self, qps: float):
self.qps = max(0.1, float(qps))
self._last = 0.0
@@ -51,7 +49,7 @@ def __init__(self) -> None:
self.access_token: Optional[str] = None
self.refresh_token: Optional[str] = None
self.account_id: Optional[str] = None
- self.expires_at: float = 0.0 # epoch timestamp
+ self.expires_at: float = 0.0
# Rate limiting configuration
self.rate_limiter = RateLimiter(qps=DEFAULT_QPS)
@@ -218,8 +216,7 @@ def get_streams(self, media_id: str) -> Optional[Dict]:
Get available streams for media_id.
"""
response = self._request_with_retry(
- 'GET',
- f'{BASE_URL}/playback/v3/{media_id}/web/chrome/play',
+ 'GET', f'{BASE_URL}/playback/v3/{media_id}/web/chrome/play',
params={'locale': self.locale}
)
@@ -231,7 +228,6 @@ def get_streams(self, media_id: str) -> Optional[Dict]:
raise PlaybackError("TOO_MANY_ACTIVE_STREAMS. Wait a few minutes and try again.")
response.raise_for_status()
-
data = response.json()
if data.get('error') == 'Playback is Rejected':
@@ -281,40 +277,30 @@ def _find_token_anywhere(obj) -> Optional[str]:
def get_playback_session(client: CrunchyrollClient, url_id: str) -> Optional[Tuple[str, Dict, List[Dict], Optional[str], Optional[str]]]:
"""
Return the playback session details.
-
+
Returns:
Tuple with (mpd_url, headers, subtitles, token, audio_locale) or None if access denied
"""
data = client.get_streams(url_id)
-
- # If get_streams returns None, it means access was denied (403)
- if data is None:
- return None
-
- url = data.get('url')
- audio_locale_current = data.get('audio_locale') or data.get('audio', {}).get('locale')
-
- # Collect subtitles with metadata
- subtitles = []
- subs_obj = data.get('subtitles') or {}
- if isinstance(subs_obj, dict):
- for lang, info in subs_obj.items():
- if not info:
- continue
- sub_url = info.get('url')
- if not sub_url:
- continue
-
- subtitles.append({
- 'language': lang,
- 'url': sub_url,
- 'format': info.get('format'),
- 'type': info.get('type'), # "subtitles" | "captions"
- 'closed_caption': bool(info.get('closed_caption')),
- 'label': info.get('display') or info.get('title') or info.get('language')
- })
-
- token = _find_token_anywhere(data)
- headers = client._get_headers()
-
- return url, headers, subtitles, token, audio_locale_current
\ No newline at end of file
+ try:
+ url = data.get('url')
+ audio_locale_current = data.get('audio_locale') or data.get('audio', {}).get('locale')
+
+ # Collect subtitles with metadata
+ subtitles = []
+ subtitles_data = data.get('subtitles', {})
+ for lang_code, sub_info in subtitles_data.items():
+ if sub_info.get('url'):
+ subtitles.append({
+ 'language': sub_info.get('language'),
+ 'format': sub_info.get('format'),
+ 'url': sub_info.get('url'),
+ })
+
+ token = _find_token_anywhere(data)
+ headers = client._get_headers()
+ return url, headers, subtitles, token, audio_locale_current
+
+ except Exception as e:
+ logging.error(f"Failed to parse playback session: {e}, Premium subscription may be required.")
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/__init__.py b/StreamingCommunity/Api/Service/discovery/__init__.py
new file mode 100644
index 000000000..1ac4ee954
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discovery/__init__.py
@@ -0,0 +1,102 @@
+# 22.12.25
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Api.Template import site_constants, MediaItem, get_select_title
+
+
+# Logic
+from .site import title_search, table_show_manager, media_search_manager
+from .series import download_series
+
+
+# Variables
+indice = 13
+_useFor = "Film_&_Serie"
+_region = "US"
+_deprecate = False
+_stream_type = "DASH"
+_maxResolution = "720p"
+_drm = True
+
+
+msg = Prompt()
+console = Console()
+
+
+def process_search_result(select_title, selections=None):
+ """
+ Handles the search result and initiates download for film or series
+
+ Parameters:
+ select_title (MediaItem): The selected media item
+ selections (dict, optional): Dictionary containing selection inputs
+ {'season': season_selection, 'episode': episode_selection}
+
+ Returns:
+ bool: True if processing was successful, False otherwise
+ """
+ if not select_title:
+ console.print("[yellow]No title selected or selection cancelled.")
+ return False
+
+ if select_title.type == 'tv':
+ season_selection = None
+ episode_selection = None
+
+ if selections:
+ season_selection = selections.get('season')
+ episode_selection = selections.get('episode')
+
+ download_series(select_title, season_selection, episode_selection)
+ media_search_manager.clear()
+ table_show_manager.clear()
+ return True
+
+
+def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
+ """
+ Main function for searching and downloading content
+
+ Parameters:
+ string_to_search (str, optional): Search query string
+ get_onlyDatabase (bool, optional): If True, return only the database object
+ direct_item (dict, optional): Direct item to process (bypass search)
+ selections (dict, optional): Dictionary containing selection inputs
+ {'season': season_selection, 'episode': episode_selection}
+ """
+ if direct_item:
+ select_title = MediaItem(**direct_item)
+ result = process_search_result(select_title, selections)
+ return result
+
+ # Get search query from user
+ actual_search_query = None
+ if string_to_search is not None:
+ actual_search_query = string_to_search.strip()
+ else:
+ actual_search_query = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constants.SITE_NAME}").strip()
+
+ # Handle empty input
+ if not actual_search_query:
+ return False
+
+ # Search on database
+ len_database = title_search(actual_search_query)
+
+ # If only database is needed, return the manager
+ if get_onlyDatabase:
+ return media_search_manager
+
+ if len_database > 0:
+ select_title = get_select_title(table_show_manager, media_search_manager, len_database)
+ result = process_search_result(select_title, selections)
+ return result
+
+ else:
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
+ return False
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/series.py b/StreamingCommunity/Api/Service/discovery/series.py
new file mode 100644
index 000000000..14c8e1fb1
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discovery/series.py
@@ -0,0 +1,170 @@
+# 22.12.25
+
+import os
+from typing import Tuple
+
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Util import os_manager, config_manager, start_message
+from StreamingCommunity.Api.Template import site_constants, MediaItem
+from StreamingCommunity.Api.Template.episode_manager import (
+ manage_selection,
+ map_episode_title,
+ validate_selection,
+ validate_episode_selection,
+ display_episodes_list,
+ display_seasons_list
+)
+from StreamingCommunity.Lib.DASH.downloader import DASH_Downloader
+
+
+# Logic
+from .util.ScrapeSerie import GetSerieInfo
+from .util.get_license import get_playback_info, generate_license_headers
+
+
+# Variables
+msg = Prompt()
+console = Console()
+extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+
+
+def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str, bool]:
+ """
+ Download a specific episode
+
+ Parameters:
+ index_season_selected (int): Season number
+ index_episode_selected (int): Episode index
+ scrape_serie (GetSerieInfo): Series scraper instance
+
+ Returns:
+ Tuple[str, bool]: (output_path, stopped_status)
+ """
+ start_message()
+
+ # Get episode information
+ obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected - 1)
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+
+ # Define output path
+ mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
+ mp4_path = os_manager.get_sanitize_path(
+ os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
+ )
+
+ # Get playback information using video_id
+ playback_info = get_playback_info(obj_episode.video_id)
+ if str(playback_info['type']).strip().lower() != 'dash' or playback_info['license_url'] is None:
+ console.print(f"[red]Unsupported streaming type. Playbackk info: {playback_info}")
+ return None, False
+
+ # Generate license headers
+ license_headers = generate_license_headers(playback_info['license_token'])
+
+ # Download the episode
+ dash_process = DASH_Downloader(
+ license_url=playback_info['license_url'],
+ mpd_url=playback_info['mpd_url'],
+ output_path=os.path.join(mp4_path, mp4_name),
+ )
+
+ dash_process.parse_manifest(custom_headers=license_headers)
+
+ if dash_process.download_and_decrypt(custom_headers=license_headers):
+ dash_process.finalize_output()
+
+ # Get final status
+ status = dash_process.get_status()
+
+ if status['error'] is not None and status['path']:
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
+
+ return status['path'], status['stopped']
+
+
+def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
+ """
+ Handle downloading episodes for a specific season
+
+ Parameters:
+ index_season_selected (int): Season number
+ scrape_serie (GetSerieInfo): Series scraper instance
+ download_all (bool): Whether to download all episodes
+ episode_selection (str, optional): Pre-defined episode selection
+ """
+ # Get episodes for the selected season
+ episodes = scrape_serie.getEpisodeSeasons(index_season_selected)
+ episodes_count = len(episodes)
+
+ if episodes_count == 0:
+ console.print(f"[red]No episodes found for season {index_season_selected}")
+ return
+
+ if download_all:
+ for i_episode in range(1, episodes_count + 1):
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+ if stopped:
+ break
+ else:
+ if episode_selection is not None:
+ last_command = episode_selection
+ console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
+ else:
+ last_command = display_episodes_list(episodes)
+
+ # Prompt user for episode selection
+ list_episode_select = manage_selection(last_command, episodes_count)
+ list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
+
+ # Download selected episodes
+ for i_episode in list_episode_select:
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+ if stopped:
+ break
+
+
+def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
+ """
+ Handle downloading a complete series
+
+ Parameters:
+ select_season (MediaItem): Series metadata from search
+ season_selection (str, optional): Pre-defined season selection
+ episode_selection (str, optional): Pre-defined episode selection
+ """
+ id_parts = select_season.id.split('|')
+
+ # Initialize series scraper
+ scrape_serie = GetSerieInfo(id_parts[1], id_parts[0])
+ seasons_count = scrape_serie.getNumberSeason()
+
+ if seasons_count == 0:
+ console.print("[red]No seasons found for this series")
+ return
+
+ # Handle season selection
+ if season_selection is None:
+ index_season_selected = display_seasons_list(scrape_serie.seasons_manager)
+ else:
+ index_season_selected = season_selection
+ console.print(f"\n[cyan]Using provided season selection: [yellow]{season_selection}")
+
+ # Validate the selection
+ list_season_select = manage_selection(index_season_selected, seasons_count)
+ list_season_select = validate_selection(list_season_select, seasons_count)
+
+ # Loop through selected seasons and download episodes
+ for i_season in list_season_select:
+ if len(list_season_select) > 1 or index_season_selected == "*":
+ download_episode(i_season, scrape_serie, download_all=True)
+ else:
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/site.py b/StreamingCommunity/Api/Service/discovery/site.py
new file mode 100644
index 000000000..e3950c507
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discovery/site.py
@@ -0,0 +1,82 @@
+# 22.12.25
+
+# External libraries
+from rich.console import Console
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client
+from StreamingCommunity.Api.Template import site_constants, MediaManager
+from StreamingCommunity.Util.table import TVShowManager
+
+
+# Logic
+from .util.get_license import get_api
+
+
+# Variables
+console = Console()
+media_search_manager = MediaManager()
+table_show_manager = TVShowManager()
+
+
+def title_search(query: str) -> int:
+ """
+ Search for titles on Discovery+
+
+ Parameters:
+ query (str): Search query
+
+ Returns:
+ int: Number of results found
+ """
+ media_search_manager.clear()
+ table_show_manager.clear()
+
+ api = get_api()
+ search_url = 'https://us1-prod-direct.go.discovery.com/cms/routes/search/result'
+ console.print(f"[cyan]Search url: [yellow]{search_url}")
+
+ params = {
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed',
+ 'contentFilter[query]': query
+ }
+
+ try:
+ response = create_client(headers=api.get_request_headers()).get(
+ search_url,
+ params=params,
+ cookies=api.get_cookies()
+ )
+ response.raise_for_status()
+
+ except Exception as e:
+ console.print(f"[red]Site: {site_constants.SITE_NAME}, request search error: {e}")
+ return 0
+
+ # Parse response
+ data = response.json()
+ for element in data.get('included', []):
+ element_type = element.get('type')
+
+ # Handle both shows and movies
+ if element_type in ['show', 'movie']:
+ attributes = element.get('attributes', {})
+
+ if 'name' in attributes:
+ if element_type == 'show':
+ date = attributes.get('newestEpisodeDate', '').split("T")[0]
+ else:
+ date = attributes.get('airDate', '').split("T")[0]
+
+ combined_id = f"{element.get('id')}|{attributes.get('alternateId')}"
+ media_search_manager.add_media({
+ 'id': combined_id,
+ 'name': attributes.get('name', 'No Title'),
+ 'type': 'tv' if element_type == 'show' else 'movie',
+ 'image': None,
+ 'date': date
+ })
+
+ return media_search_manager.get_length()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/util/ScrapeSerie.py b/StreamingCommunity/Api/Service/discovery/util/ScrapeSerie.py
new file mode 100644
index 000000000..4662055be
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discovery/util/ScrapeSerie.py
@@ -0,0 +1,156 @@
+# 22.12.25
+
+import logging
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client
+from StreamingCommunity.Api.Template.object import SeasonManager
+from .get_license import get_api
+
+
+class GetSerieInfo:
+ def __init__(self, show_alternate_id, show_id):
+ """
+ Initialize series scraper for Discovery+
+
+ Args:
+ show_alternate_id (str): The alternate ID of the show (e.g., 'homestead-rescue-discovery')
+ show_id (str): The numeric ID of the show
+ """
+ self.api = get_api()
+ self.show_alternate_id = show_alternate_id
+ self.show_id = show_id
+ self.series_name = ""
+ self.seasons_manager = SeasonManager()
+ self.n_seasons = 0
+ self.collection_id = None
+ self._get_show_info()
+
+ def _get_show_info(self):
+ """Get show information including number of seasons and collection ID"""
+ try:
+ response = create_client(headers=self.api.get_request_headers()).get(
+ f'https://us1-prod-direct.go.discovery.com/cms/routes/show/{self.show_alternate_id}',
+ params={
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed'
+ },
+ cookies=self.api.get_cookies()
+ )
+ response.raise_for_status()
+ data = response.json()
+
+ # Get series name from first show element
+ for element in data.get('included', []):
+ if element.get('type') == 'show':
+ self.series_name = element.get('attributes', {}).get('name', '')
+ break
+
+ # Get number of seasons
+ filters = data.get('included', [])[4].get('attributes', {}).get('component', {}).get('filters', [])
+ if filters:
+ self.n_seasons = int(filters[0].get('initiallySelectedOptionIds', [0])[0])
+
+ # Get collection ID
+ for element in data.get('included', []):
+ if element.get('type') == 'collection':
+ self.collection_id = element.get('id')
+ #print(f"Collection ID: {self.collection_id}")
+ #break
+
+ return True
+
+ except Exception as e:
+ logging.error(f"Failed to get show info: {e}")
+ return False
+
+ def _get_season_episodes(self, season_number):
+ """
+ Get episodes for a specific season
+
+ Args:
+ season_number (int): Season number
+ """
+ try:
+ response = create_client(headers=self.api.get_request_headers()).get(
+ f'https://us1-prod-direct.go.discovery.com/cms/collections/{self.collection_id}',
+ params={
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed',
+ 'pf[seasonNumber]': season_number,
+ 'pf[show.id]': self.show_id
+ },
+ cookies=self.api.get_cookies()
+ )
+ response.raise_for_status()
+
+ data = response.json()
+ episodes = []
+
+ for element in data.get('included', []):
+ if element.get('type') == 'video':
+ attributes = element.get('attributes', {})
+ if 'episodeNumber' in attributes:
+ episodes.append({
+ 'id': attributes.get('alternateId', ''),
+ 'video_id': element.get('id', ''),
+ 'name': attributes.get('name', ''),
+ 'episode_number': attributes.get('episodeNumber', 0),
+ 'duration': attributes.get('videoDuration', 0) // 60000
+ })
+
+ # Sort by episode number
+ episodes.sort(key=lambda x: x['episode_number'])
+ print("Add n_episodes:", len(episodes), "for season:", season_number)
+ return episodes
+
+ except Exception as e:
+ logging.error(f"Failed to get episodes for season {season_number}: {e}")
+ return []
+
+ def collect_season(self):
+ """Collect all seasons and episodes"""
+ try:
+ for season_num in range(1, self.n_seasons + 1):
+ episodes = self._get_season_episodes(season_num)
+
+ if episodes:
+ season_obj = self.seasons_manager.add_season({
+ 'number': season_num,
+ 'name': f"Season {season_num}",
+ 'id': f"season_{season_num}"
+ })
+
+ if season_obj:
+ for episode in episodes:
+ season_obj.episodes.add(episode)
+
+ except Exception as e:
+ logging.error(f"Error in collect_season: {e}")
+
+
+ # ------------- FOR GUI -------------
+ def getNumberSeason(self) -> int:
+ """Get total number of seasons"""
+ if not self.seasons_manager.seasons:
+ self.collect_season()
+ return len(self.seasons_manager.seasons)
+
+ def getEpisodeSeasons(self, season_number: int) -> list:
+ """Get all episodes for a specific season"""
+ if not self.seasons_manager.seasons:
+ self.collect_season()
+
+ season_index = season_number - 1
+ season = self.seasons_manager.seasons[season_index]
+ return season.episodes.episodes
+
+ def selectEpisode(self, season_number: int, episode_index: int) -> dict:
+ """Get information for a specific episode"""
+ episodes = self.getEpisodeSeasons(season_number)
+ if not episodes or episode_index < 0 or episode_index >= len(episodes):
+ logging.error(f"Episode index {episode_index} out of range for season {season_number}")
+ return None
+
+ return episodes[episode_index]
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/util/get_license.py b/StreamingCommunity/Api/Service/discovery/util/get_license.py
new file mode 100644
index 000000000..638742202
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discovery/util/get_license.py
@@ -0,0 +1,179 @@
+# 22.12.25
+
+import uuid
+import random
+
+
+# External library
+from ua_generator import generate
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client_curl
+
+
+# Variable
+_discovery_api = None
+
+
+class DiscoveryAPI:
+ def __init__(self):
+ self.device_id = str(uuid.uuid4())
+ self.device_info = self._generate_device_info()
+ self.user_agent = self.device_info['user_agent']
+ self.bearer_token = None
+ self._initialize()
+
+ def _generate_device_info(self):
+ ua = generate(device='desktop', browser=random.choice(['chrome', 'firefox', 'edge', 'safari']))
+
+ browser_name_map = {
+ 'chrome': 'chrome',
+ 'firefox': 'firefox',
+ 'edge': 'edge',
+ 'safari': 'safari'
+ }
+
+ browser_name = browser_name_map.get(ua.browser.lower(), 'chrome')
+ browser_version = ua.ch.browser_full_version if hasattr(ua.ch, 'browser_full_version') else '125.0.0.0'
+ os_version = ua.ch.platform_version if hasattr(ua.ch, 'platform_version') else 'NT 10.0'
+
+ device_info = {
+ 'user_agent': ua.text,
+ 'device': {
+ 'browser': {
+ 'name': browser_name,
+ 'version': browser_version,
+ },
+ 'id': '',
+ 'language': random.choice(['en', 'en-US', 'en-GB']),
+ 'make': '',
+ 'model': '',
+ 'name': browser_name,
+ 'os': ua.ch.platform if hasattr(ua.ch, 'platform') else 'Windows',
+ 'osVersion': os_version,
+ 'player': {
+ 'name': 'Discovery Player Web',
+ 'version': '3.1.0',
+ },
+ 'type': 'desktop',
+ }
+ }
+
+ return device_info
+
+ def _initialize(self):
+ headers = {
+ 'user-agent': self.user_agent,
+ 'x-device-info': f'dsc/4.4.1 (desktop/desktop; Windows/NT 10.0; {self.device_id})',
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1'
+ }
+ params = {
+ 'deviceId': self.device_id,
+ 'realm': 'go',
+ 'shortlived': 'true'
+ }
+
+ try:
+ response = create_client_curl(headers=headers).get('https://us1-prod-direct.go.discovery.com/token', params=params)
+ response.raise_for_status()
+ self.bearer_token = response.json()['data']['attributes']['token']
+
+ except Exception as e:
+ raise RuntimeError(f"Failed to get bearer token: {e}")
+
+ def get_request_headers(self):
+ return {
+ 'accept': '*/*',
+ 'user-agent': self.user_agent,
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1',
+ 'x-disco-params': 'realm=go,siteLookupKey=dsc,bid=dsc,hn=go.discovery.com,hth=us,features=ar',
+ }
+
+ def get_cookies(self):
+ return {'st': self.bearer_token}
+
+
+def get_api():
+ """Get or create Discovery API instance"""
+ global _discovery_api
+ if _discovery_api is None:
+ _discovery_api = DiscoveryAPI()
+ return _discovery_api
+
+
+def get_playback_info(video_id):
+ """
+ Get playback information for a video including MPD URL and license token
+
+ Args:
+ video_id (str): The video ID
+ """
+ api = get_api()
+
+ cookies = api.get_cookies()
+ headers = {
+ 'user-agent': api.user_agent,
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1',
+ }
+
+ json_data = {
+ 'videoId': video_id,
+ 'wisteriaProperties': {
+ 'advertiser': {},
+ 'appBundle': '',
+ 'device': api.device_info['device'],
+ 'gdpr': 0,
+ 'platform': 'desktop',
+ 'product': 'dsc',
+ 'siteId': 'dsc'
+ },
+ 'deviceInfo': {
+ 'adBlocker': False,
+ 'deviceId': '',
+ 'drmTypes': {
+ 'widevine': True,
+ 'playready': False,
+ 'fairplay': False,
+ 'clearkey': True,
+ },
+ 'drmSupported': True
+ },
+ }
+
+ response = create_client_curl().post('https://us1-prod-direct.go.discovery.com/playback/v3/videoPlaybackInfo', cookies=cookies, headers=headers, json=json_data)
+
+ if response.status_code == 403:
+ json_response = response.json()
+ errors = json_response.get('errors', [])
+ if errors and errors[0].get('code') == 'access.denied.missingpackage':
+ raise RuntimeError("Content requires a subscription/account to view")
+ else:
+ raise RuntimeError("Content is geo-restricted")
+
+ response.raise_for_status()
+ json_response = response.json()
+
+ streaming_data = json_response['data']['attributes']['streaming']
+ widevine_scheme = streaming_data[0]['protection']['schemes'].get('widevine')
+
+ return {
+ 'mpd_url': streaming_data[0]['url'],
+ 'license_url': widevine_scheme['licenseUrl'] if widevine_scheme else None,
+ 'license_token': streaming_data[0]['protection']['drmToken'] if widevine_scheme else None,
+ 'type': streaming_data[0]['type']
+ }
+
+
+
+def generate_license_headers(license_token):
+ """
+ Generate headers for license requests
+
+ Args:
+ license_token (str): The DRM token from playback info
+ """
+ return {
+ 'preauthorization': license_token,
+ 'user-agent': get_api().user_agent,
+ }
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/dmax/__init__.py b/StreamingCommunity/Api/Service/dmax/__init__.py
index 65fed939c..4bd108c65 100644
--- a/StreamingCommunity/Api/Service/dmax/__init__.py
+++ b/StreamingCommunity/Api/Service/dmax/__init__.py
@@ -17,7 +17,12 @@
# Variable
indice = 9
_useFor = "Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
@@ -57,7 +62,6 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
Parameters:
string_to_search (str, optional): String to search for. Can be passed from run.py.
- If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
diff --git a/StreamingCommunity/Api/Service/dmax/series.py b/StreamingCommunity/Api/Service/dmax/series.py
index f77288716..5372d9e71 100644
--- a/StreamingCommunity/Api/Service/dmax/series.py
+++ b/StreamingCommunity/Api/Service/dmax/series.py
@@ -51,7 +51,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/guardaserie/__init__.py b/StreamingCommunity/Api/Service/guardaserie/__init__.py
index 7caeddc51..7b8b23ba3 100644
--- a/StreamingCommunity/Api/Service/guardaserie/__init__.py
+++ b/StreamingCommunity/Api/Service/guardaserie/__init__.py
@@ -20,7 +20,12 @@
# Variable
indice = 4
_useFor = "Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/guardaserie/series.py b/StreamingCommunity/Api/Service/guardaserie/series.py
index 95046e694..460d5a979 100644
--- a/StreamingCommunity/Api/Service/guardaserie/series.py
+++ b/StreamingCommunity/Api/Service/guardaserie/series.py
@@ -53,7 +53,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
index_season_selected_formatted = dynamic_format_number(str(index_season_selected))
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.tv_name} \\ [magenta]{obj_episode.get('name')}[/magenta] ([cyan]S{index_season_selected_formatted}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.tv_name} \\ [magenta]{obj_episode.get('name')} ([cyan]S{index_season_selected_formatted}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.tv_name, index_season_selected_formatted, index_episode_selected, obj_episode.get('name'))}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/hd4me/__init__.py b/StreamingCommunity/Api/Service/hd4me/__init__.py
index fdbbe626f..540567ab1 100644
--- a/StreamingCommunity/Api/Service/hd4me/__init__.py
+++ b/StreamingCommunity/Api/Service/hd4me/__init__.py
@@ -19,7 +19,12 @@
# Variable
indice = 10
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "MEGA"
+_maxResolution = "720p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/ipersphera/__init__.py b/StreamingCommunity/Api/Service/ipersphera/__init__.py
index a776e0374..20969fbf6 100644
--- a/StreamingCommunity/Api/Service/ipersphera/__init__.py
+++ b/StreamingCommunity/Api/Service/ipersphera/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 12
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "MEGA"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/mediasetinfinity/__init__.py b/StreamingCommunity/Api/Service/mediasetinfinity/__init__.py
index 60e2f7542..38e981ef5 100644
--- a/StreamingCommunity/Api/Service/mediasetinfinity/__init__.py
+++ b/StreamingCommunity/Api/Service/mediasetinfinity/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 3
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "DASH"
+_maxResolution = "1080p"
+_drm = True
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/mediasetinfinity/series.py b/StreamingCommunity/Api/Service/mediasetinfinity/series.py
index 71f9b2553..9ff5f859e 100644
--- a/StreamingCommunity/Api/Service/mediasetinfinity/series.py
+++ b/StreamingCommunity/Api/Service/mediasetinfinity/series.py
@@ -53,7 +53,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/nove/__init__.py b/StreamingCommunity/Api/Service/nove/__init__.py
new file mode 100644
index 000000000..829f90c12
--- /dev/null
+++ b/StreamingCommunity/Api/Service/nove/__init__.py
@@ -0,0 +1,100 @@
+# 26.11.2025
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Api.Template import site_constants, MediaItem, get_select_title
+
+
+# Logic
+from .site import title_search, table_show_manager, media_search_manager
+from .series import download_series
+
+
+# Variable
+indice = 14
+_useFor = "Serie"
+_region = "IT"
+_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = False
+
+
+msg = Prompt()
+console = Console()
+
+
+def process_search_result(select_title, selections=None):
+ """
+ Handles the search result and initiates the download for either a film or series.
+
+ Parameters:
+ select_title (MediaItem): The selected media item. Can be None if selection fails.
+ selections (dict, optional): Dictionary containing selection inputs that bypass manual input
+ e.g., {'season': season_selection, 'episode': episode_selection}
+ Returns:
+ bool: True if processing was successful, False otherwise
+ """
+ if not select_title:
+ return False
+
+ if select_title.type == 'tv':
+ season_selection = None
+ episode_selection = None
+
+ if selections:
+ season_selection = selections.get('season')
+ episode_selection = selections.get('episode')
+
+ download_series(select_title, season_selection, episode_selection)
+ media_search_manager.clear()
+ table_show_manager.clear()
+ return True
+
+
+def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
+ """
+ Main function of the application for search.
+
+ Parameters:
+ string_to_search (str, optional): String to search for. Can be passed from run.py.
+ get_onlyDatabase (bool, optional): If True, return only the database search manager object.
+ direct_item (dict, optional): Direct item to process (bypasses search).
+ selections (dict, optional): Dictionary containing selection inputs that bypass manual input
+ for series (season/episode).
+ """
+ if direct_item:
+ select_title = MediaItem(**direct_item)
+ result = process_search_result(select_title, selections)
+ return result
+
+ # Get the user input for the search term
+ actual_search_query = None
+ if string_to_search is not None:
+ actual_search_query = string_to_search.strip()
+ else:
+ actual_search_query = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constants.SITE_NAME}").strip()
+
+ # Handle empty input
+ if not actual_search_query:
+ return False
+
+ # Search on database
+ len_database = title_search(actual_search_query)
+
+ # If only the database is needed, return the manager
+ if get_onlyDatabase:
+ return media_search_manager
+
+ if len_database > 0:
+ select_title = get_select_title(table_show_manager, media_search_manager, len_database)
+ result = process_search_result(select_title, selections)
+ return result
+
+ else:
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
+ return False
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/nove/series.py b/StreamingCommunity/Api/Service/nove/series.py
new file mode 100644
index 000000000..5372d9e71
--- /dev/null
+++ b/StreamingCommunity/Api/Service/nove/series.py
@@ -0,0 +1,169 @@
+# 26.11.2025
+
+import os
+from typing import Tuple
+
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Util import config_manager, start_message
+from StreamingCommunity.Api.Template import site_constants, MediaItem
+from StreamingCommunity.Api.Template.episode_manager import (
+ manage_selection,
+ map_episode_title,
+ validate_selection,
+ validate_episode_selection,
+ display_episodes_list,
+ display_seasons_list
+)
+from StreamingCommunity.Lib.HLS import HLS_Downloader
+
+
+# Logic
+from ..realtime.util.ScrapeSerie import GetSerieInfo
+from ..realtime.util.get_license import get_bearer_token, get_playback_url
+
+
+# Variable
+msg = Prompt()
+console = Console()
+extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+
+
+def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
+ """
+ Downloads a specific episode from the specified season.
+
+ Parameters:
+ - index_season_selected (int): Season number
+ - index_episode_selected (int): Episode index
+ - scrape_serie (GetSerieInfo): Scraper object with series information
+
+ Returns:
+ - str: Path to downloaded file
+ - bool: Whether download was stopped
+ """
+ start_message()
+
+ # Get episode information
+ obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+
+ # Define filename and path for the downloaded video
+ mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
+ mp4_path = os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
+
+ # Get m3u8 playlist
+ bearer_token = get_bearer_token()
+ master_playlist = get_playback_url(obj_episode.id, bearer_token, False, obj_episode.channel)
+
+ # Download the episode
+ hls_process = HLS_Downloader(
+ m3u8_url=master_playlist,
+ output_path=os.path.join(mp4_path, mp4_name)
+ ).start()
+
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
+
+ return hls_process['path'], hls_process['stopped']
+
+
+def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
+ """
+ Handle downloading episodes for a specific season.
+
+ Parameters:
+ - index_season_selected (int): Season number
+ - scrape_serie (GetSerieInfo): Scraper object with series information
+ - download_all (bool): Whether to download all episodes
+ - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
+ """
+ # Get episodes for the selected season
+ episodes = scrape_serie.getEpisodeSeasons(index_season_selected)
+ episodes_count = len(episodes)
+
+ if episodes_count == 0:
+ console.print(f"[red]No episodes found for season {index_season_selected}")
+ return
+
+ if download_all:
+ # Download all episodes in the season
+ for i_episode in range(1, episodes_count + 1):
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+
+ if stopped:
+ break
+
+ console.print(f"\n[red]End downloaded [yellow]season: [red]{index_season_selected}.")
+
+ else:
+ # Display episodes list and manage user selection
+ if episode_selection is None:
+ last_command = display_episodes_list(episodes)
+ else:
+ last_command = episode_selection
+ console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
+
+ # Validate the selection
+ list_episode_select = manage_selection(last_command, episodes_count)
+ list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
+
+ # Download selected episodes if not stopped
+ for i_episode in list_episode_select:
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+
+ if stopped:
+ break
+
+
+def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
+ """
+ Handle downloading a complete series.
+
+ Parameters:
+ - select_season (MediaItem): Series metadata from search
+ - season_selection (str, optional): Pre-defined season selection that bypasses manual input
+ - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
+ """
+ start_message()
+
+ # Init class
+ scrape_serie = GetSerieInfo(select_season.url)
+
+ # Collect information about season
+ scrape_serie.getNumberSeason()
+ seasons_count = len(scrape_serie.seasons_manager)
+
+ # If season_selection is provided, use it instead of asking for input
+ if season_selection is None:
+ index_season_selected = display_seasons_list(scrape_serie.seasons_manager)
+ else:
+ index_season_selected = season_selection
+ console.print(f"\n[cyan]Using provided season selection: [yellow]{season_selection}")
+
+ # Validate the selection
+ list_season_select = manage_selection(index_season_selected, seasons_count)
+ list_season_select = validate_selection(list_season_select, seasons_count)
+
+ # Loop through the selected seasons and download episodes
+ for i_season in list_season_select:
+ try:
+ season = scrape_serie.seasons_manager.seasons[i_season - 1]
+ except IndexError:
+ console.print(f"[red]Season index {i_season} not found! Available seasons: {[s.number for s in scrape_serie.seasons_manager.seasons]}")
+ continue
+
+ season_number = season.number
+
+ if len(list_season_select) > 1 or index_season_selected == "*":
+ download_episode(season_number, scrape_serie, download_all=True)
+ else:
+ download_episode(season_number, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/nove/site.py b/StreamingCommunity/Api/Service/nove/site.py
new file mode 100644
index 000000000..4dbf516a9
--- /dev/null
+++ b/StreamingCommunity/Api/Service/nove/site.py
@@ -0,0 +1,69 @@
+# 26.11.2025
+
+
+# External libraries
+from rich.console import Console
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client, get_userAgent
+from StreamingCommunity.Util.table import TVShowManager
+from StreamingCommunity.Api.Template import site_constants, MediaManager
+
+
+# Variable
+console = Console()
+media_search_manager = MediaManager()
+table_show_manager = TVShowManager()
+
+
+def title_search(query: str) -> int:
+ """
+ Search for titles based on a search query.
+
+ Parameters:
+ - query (str): The query to search for.
+
+ Returns:
+ int: The number of titles found.
+ """
+ media_search_manager.clear()
+ table_show_manager.clear()
+
+ search_url = f"https://public.aurora.enhanced.live/site/search/page/?include=default&filter[environment]=nove&v=2&q={query}&page[number]=1&page[size]=20"
+ console.print(f"[cyan]Search url: [yellow]{search_url}")
+
+ try:
+ response = create_client(headers={'user-agent': get_userAgent()}).get(search_url)
+ response.raise_for_status()
+
+ except Exception as e:
+ console.print(f"[red]Site: {site_constants.SITE_NAME}, request search error: {e}")
+ return 0
+
+ # Collect json data
+ try:
+ data = response.json().get('data')
+ except Exception as e:
+ console.log(f"Error parsing JSON response: {e}")
+ return 0
+
+ for dict_title in data:
+ try:
+ # Skip non-showpage entries
+ if dict_title.get('type') != 'showpage':
+ continue
+
+ media_search_manager.add_media({
+ 'name': dict_title.get('title'),
+ 'type': 'tv',
+ 'date': dict_title.get('dateLastModified').split('T')[0],
+ 'image': dict_title.get('image').get('url'),
+ 'url': f'https://public.aurora.enhanced.live/site/page/{str(dict_title.get("slug")).lower().replace(" ", "-")}/?include=default&filter[environment]=nove&v=2&parent_slug={dict_title.get("parentSlug")}',
+ })
+
+ except Exception as e:
+ print(f"Error parsing a film entry: {e}")
+
+ # Return the number of titles found
+ return media_search_manager.get_length()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/raiplay/__init__.py b/StreamingCommunity/Api/Service/raiplay/__init__.py
index 027a8ccdb..d00eb39f1 100644
--- a/StreamingCommunity/Api/Service/raiplay/__init__.py
+++ b/StreamingCommunity/Api/Service/raiplay/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 5
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "DASH"
+_maxResolution = "1080p"
+_drm = True
+
msg = Prompt()
console = Console()
diff --git a/StreamingCommunity/Api/Service/raiplay/series.py b/StreamingCommunity/Api/Service/raiplay/series.py
index 030c51eef..06fee7eb7 100644
--- a/StreamingCommunity/Api/Service/raiplay/series.py
+++ b/StreamingCommunity/Api/Service/raiplay/series.py
@@ -56,7 +56,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/realtime/__init__.py b/StreamingCommunity/Api/Service/realtime/__init__.py
index ed4f1143c..bdf649710 100644
--- a/StreamingCommunity/Api/Service/realtime/__init__.py
+++ b/StreamingCommunity/Api/Service/realtime/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 8
_useFor = "Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = True
+
msg = Prompt()
console = Console()
@@ -58,7 +63,6 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
Parameters:
string_to_search (str, optional): String to search for. Can be passed from run.py.
- If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
diff --git a/StreamingCommunity/Api/Service/realtime/series.py b/StreamingCommunity/Api/Service/realtime/series.py
index 84b2c4f8c..160d2b554 100644
--- a/StreamingCommunity/Api/Service/realtime/series.py
+++ b/StreamingCommunity/Api/Service/realtime/series.py
@@ -51,7 +51,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/streamingcommunity/__init__.py b/StreamingCommunity/Api/Service/streamingcommunity/__init__.py
index f2ae7a1dd..bb7903d35 100644
--- a/StreamingCommunity/Api/Service/streamingcommunity/__init__.py
+++ b/StreamingCommunity/Api/Service/streamingcommunity/__init__.py
@@ -18,7 +18,12 @@
# Variable
indice = 0
_useFor = "Film_&_Serie"
+_region = "IT"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = False
+
msg = Prompt()
console = Console()
@@ -64,7 +69,6 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
Parameters:
string_to_search (str, optional): String to search for. Can be passed from run.py.
- If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
diff --git a/StreamingCommunity/Api/Service/streamingcommunity/series.py b/StreamingCommunity/Api/Service/streamingcommunity/series.py
index 5a3611433..0e9ac2a06 100644
--- a/StreamingCommunity/Api/Service/streamingcommunity/series.py
+++ b/StreamingCommunity/Api/Service/streamingcommunity/series.py
@@ -52,7 +52,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Service/tubitv/__init__.py b/StreamingCommunity/Api/Service/tubitv/__init__.py
index d4bda8649..09e3b63a0 100644
--- a/StreamingCommunity/Api/Service/tubitv/__init__.py
+++ b/StreamingCommunity/Api/Service/tubitv/__init__.py
@@ -19,7 +19,12 @@
# Variable
indice = 11
_useFor = "Serie"
+_region = "US"
_deprecate = False
+_stream_type = "HLS"
+_maxResolution = "1080p"
+_drm = True
+
msg = Prompt()
console = Console()
@@ -64,7 +69,6 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
Parameters:
string_to_search (str, optional): String to search for. Can be passed from run.py.
- If 'back', special handling might occur in get_user_input.
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
direct_item (dict, optional): Direct item to process (bypasses search).
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
diff --git a/StreamingCommunity/Api/Service/tubitv/series.py b/StreamingCommunity/Api/Service/tubitv/series.py
index f718a7a21..1eb11a10c 100644
--- a/StreamingCommunity/Api/Service/tubitv/series.py
+++ b/StreamingCommunity/Api/Service/tubitv/series.py
@@ -52,7 +52,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name}[/magenta] ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
diff --git a/StreamingCommunity/Api/Template/object.py b/StreamingCommunity/Api/Template/object.py
index 6fdf34c61..71fcb77de 100644
--- a/StreamingCommunity/Api/Template/object.py
+++ b/StreamingCommunity/Api/Template/object.py
@@ -8,6 +8,7 @@ def __init__(self, data: Dict[str, Any]):
self.data = data
self.id: int = data.get('id', 0)
+ self.video_id : str = data.get('video_id', '')
self.number: int = data.get('number', 1)
self.name: str = data.get('name', '')
self.duration: int = data.get('duration', 0)
diff --git a/StreamingCommunity/Lib/DASH/__init__.py b/StreamingCommunity/Lib/DASH/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/StreamingCommunity/Lib/DASH/cdm_helpher.py b/StreamingCommunity/Lib/DASH/cdm_helpher.py
index ac132a72c..91bd51a7a 100644
--- a/StreamingCommunity/Lib/DASH/cdm_helpher.py
+++ b/StreamingCommunity/Lib/DASH/cdm_helpher.py
@@ -1,6 +1,5 @@
# 25.07.25
-import sys
import base64
from urllib.parse import urlencode
@@ -17,6 +16,7 @@
console = Console()
+
def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers: dict = None, query_params: dict =None, key: str=None):
"""
Extract Widevine CONTENT keys (KID/KEY) from a license using pywidevine.
@@ -61,10 +61,6 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
if 'Content-Type' not in req_headers:
req_headers['Content-Type'] = 'application/octet-stream'
- # Send license request
- if request_url is None:
- console.print("[red]License URL is None.")
- sys.exit(0)
response = requests.post(request_url, headers=req_headers, impersonate="chrome124", **request_kwargs)
if response.status_code != 200:
@@ -111,23 +107,20 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
'key': key_val.replace('-', '').strip()
})
- if not content_keys:
- console.print("[yellow]⚠️ No CONTENT keys found in license.")
- return None
-
- console.log(f"[cyan]KID: [green]{content_keys[0]['kid']} [white]| [cyan]KEY: [green]{content_keys[0]['key']}")
+ # Return keys
+ console.log(f"[cyan]Extracted {len(content_keys)} CONTENT keys from license.")
return content_keys
-
+
else:
content_keys = []
raw_kid = key.split(":")[0]
raw_key = key.split(":")[1]
+
content_keys.append({
'kid': raw_kid.replace('-', '').strip(),
'key': raw_key.replace('-', '').strip()
})
- # Return keys
console.log(f"[cyan]KID: [green]{content_keys[0]['kid']} [white]| [cyan]KEY: [green]{content_keys[0]['key']}")
return content_keys
@@ -135,6 +128,40 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
cdm.close(session_id)
+def map_keys_to_representations(keys: list, representations: list) -> dict:
+ """
+ Map decryption keys to representations based on their default_KID.
+
+ Args:
+ keys (list): List of key dictionaries with 'kid' and 'key' fields
+ representations (list): List of representation dictionaries with 'default_kid' field
+
+ Returns:
+ dict: Mapping of representation type to key info
+ """
+ key_mapping = {}
+
+ for rep in representations:
+ rep_type = rep.get('type', 'unknown')
+ default_kid = rep.get('default_kid')
+
+ if default_kid is None:
+ console.log(f"[yellow]Representation [yellow]{rep.get('id')} [yellow]has no default_kid, maybe problem with parser.")
+ continue
+
+ for key_info in keys:
+ if key_info['kid'].lower() == default_kid.lower():
+ key_mapping[rep_type] = {
+ 'kid': key_info['kid'],
+ 'key': key_info['key'],
+ 'representation_id': rep.get('id'),
+ 'default_kid': default_kid
+ }
+ break
+
+ return key_mapping
+
+
def get_info_wvd(cdm_device_path):
"""
Extract device information from a Widevine CDM device file (.wvd).
diff --git a/StreamingCommunity/Lib/DASH/downloader.py b/StreamingCommunity/Lib/DASH/downloader.py
index 601aa7b0e..bc4789a85 100644
--- a/StreamingCommunity/Lib/DASH/downloader.py
+++ b/StreamingCommunity/Lib/DASH/downloader.py
@@ -8,7 +8,6 @@
# External libraries
from rich.console import Console
-from rich.table import Table
# Internal utilities
@@ -21,7 +20,7 @@
from .parser import MPD_Parser
from .segments import MPD_Segments
from .decrypt import decrypt_with_mp4decrypt
-from .cdm_helpher import get_widevine_keys
+from .cdm_helpher import get_widevine_keys, map_keys_to_representations
# FFmpeg functions
@@ -30,12 +29,9 @@
# Config
-DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
-FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-RETRY_LIMIT = config_manager.get_int('REQUESTS', 'max_retry')
EXTENSION_OUTPUT = config_manager.get("M3U8_CONVERSION", "extension")
@@ -58,7 +54,7 @@ def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path:
self.cdm_device = get_wvd_path()
self.license_url = str(license_url).strip() if license_url else None
self.mpd_url = str(mpd_url).strip()
- self.mpd_sub_list = mpd_sub_list or []
+ self.mpd_sub_list = mpd_sub_list
# Sanitize the output path to remove invalid characters
sanitized_output_path = os_manager.get_sanitize_path(output_path)
@@ -67,8 +63,7 @@ def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path:
self.file_already_exists = os.path.exists(self.original_output_path)
self.parser = None
- # Added defaults to avoid AttributeError when no subtitles/audio/video are present
- # Non la soluzione migliore ma evita crash in assenza di audio/video/subs
+ # Pre-selected representations (set by parse_manifest)
self.selected_subs = []
self.selected_video = None
self.selected_audio = None
@@ -103,88 +98,53 @@ def _setup_temp_dirs(self):
def parse_manifest(self, custom_headers):
"""
- Parse the MPD manifest file and extract relevant information.
+ Parse the MPD manifest file and select representations based on configuration.
"""
if self.file_already_exists:
return
- self.parser = MPD_Parser(self.mpd_url)
+ # Initialize parser with tmp directory for auto-save and subtitle list
+ self.parser = MPD_Parser(self.mpd_url, auto_save=True, save_dir=self.tmp_dir, mpd_sub_list=self.mpd_sub_list)
self.parser.parse(custom_headers)
- def calculate_column_widths():
- """Calculate optimal column widths based on content."""
- data_rows = []
-
- # Video info
- selected_video, list_available_resolution, filter_custom_resolution, downloadable_video = self.parser.select_video(FILTER_CUSTOM_REOLUTION)
- self.selected_video = selected_video
-
- available_video = ', '.join(list_available_resolution) if list_available_resolution else "Nothing"
- set_video = str(filter_custom_resolution) if filter_custom_resolution else "Nothing"
- downloadable_video_str = str(downloadable_video) if downloadable_video else "Nothing"
-
- data_rows.append(["Video", available_video, set_video, downloadable_video_str])
+ # Select representations based on configuration
+ self.selected_video, _, _, _ = self.parser.select_video()
+ self.selected_audio, _, _, _ = self.parser.select_audio()
- # Audio info
- selected_audio, list_available_audio_langs, filter_custom_audio, downloadable_audio = self.parser.select_audio(DOWNLOAD_SPECIFIC_AUDIO)
- self.selected_audio = selected_audio
-
- if list_available_audio_langs:
- available_audio = ', '.join(list_available_audio_langs)
- set_audio = str(filter_custom_audio) if filter_custom_audio else "Nothing"
- downloadable_audio_str = str(downloadable_audio) if downloadable_audio else "Nothing"
-
- data_rows.append(["Audio", available_audio, set_audio, downloadable_audio_str])
-
- # Subtitle info
- available_sub_languages = [sub.get('language') for sub in self.mpd_sub_list]
-
- if available_sub_languages:
- available_subs = ', '.join(available_sub_languages)
-
- # Filter subtitles based on configuration
- if "*" in DOWNLOAD_SPECIFIC_SUBTITLE:
- self.selected_subs = self.mpd_sub_list
- downloadable_sub_languages = available_sub_languages
- else:
- self.selected_subs = [
- sub for sub in self.mpd_sub_list
- if sub.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
- ]
- downloadable_sub_languages = [sub.get('language') for sub in self.selected_subs]
-
- downloadable_subs = ', '.join(downloadable_sub_languages) if downloadable_sub_languages else "Nothing"
- set_subs = ', '.join(DOWNLOAD_SPECIFIC_SUBTITLE) if DOWNLOAD_SPECIFIC_SUBTITLE else "Nothing"
-
- data_rows.append(["Subtitle", available_subs, set_subs, downloadable_subs])
-
- # Calculate max width for each column
- headers = ["Type", "Available", "Set", "Downloadable"]
- max_widths = [len(header) for header in headers]
-
- for row in data_rows:
- for i, cell in enumerate(row):
- max_widths[i] = max(max_widths[i], len(str(cell)))
-
- # Add some padding
- max_widths = [w + 2 for w in max_widths]
-
- return data_rows, max_widths
+ # Auto-select subtitles based on selected audio language
+ selected_audio_language = self.selected_audio.get('language') if self.selected_audio else None
- data_rows, column_widths = calculate_column_widths()
-
- # Create table with dynamic widths
- table = Table(show_header=True, header_style="cyan", border_style="blue")
- table.add_column("Type", style="cyan bold", width=column_widths[0])
- table.add_column("Available", style="green", width=column_widths[1])
- table.add_column("Set", style="red", width=column_widths[2])
- table.add_column("Downloadable", style="yellow", width=column_widths[3])
-
- # Add all rows to the table
- for row in data_rows:
- table.add_row(*row)
+ # Only process subtitles if mpd_sub_list is not None
+ if self.mpd_sub_list is not None:
+ if "*" in DOWNLOAD_SPECIFIC_SUBTITLE:
+ self.selected_subs = self.mpd_sub_list
+ elif selected_audio_language and selected_audio_language in DOWNLOAD_SPECIFIC_SUBTITLE:
+ # If audio language is in the specific list, prioritize it
+ self.selected_subs = [
+ sub for sub in self.mpd_sub_list
+ if sub.get('language') == selected_audio_language
+ ]
+ else:
+ # Fallback to configured languages
+ self.selected_subs = [
+ sub for sub in self.mpd_sub_list
+ if sub.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
+ ]
+
+ # If no subtitles match configuration but we have audio language, auto-select matching subtitle
+ if not self.selected_subs and selected_audio_language:
+ matching_subs = [
+ sub for sub in self.mpd_sub_list
+ if sub.get('language') == selected_audio_language
+ ]
+ if matching_subs:
+ console.print(f"[yellow]Auto-selecting subtitle for audio language: {selected_audio_language}")
+ self.selected_subs = matching_subs
+ else:
+ self.selected_subs = []
- console.print(table)
+ # Print table with selections (only once here)
+ self.parser.print_tracks_table(self.selected_video, self.selected_audio, self.selected_subs)
console.print("")
def get_representation_by_type(self, typ):
@@ -199,17 +159,21 @@ def get_representation_by_type(self, typ):
def download_subtitles(self) -> bool:
"""
- Download subtitle files based on configuration with retry mechanism.
+ Download subtitle files based on parser's selected subtitles.
Returns True if successful or if no subtitles to download, False on critical error.
"""
+ if not self.selected_subs or self.mpd_sub_list is None:
+ return True
+
client = create_client(headers={'User-Agent': get_userAgent()})
for sub in self.selected_subs:
try:
- language = sub.get('language', 'unknown')
- fmt = sub.get('format', 'vtt')
+ language = sub.get('language')
+ fmt = sub.get('format')
# Download subtitle
+ console.log(f"[cyan]Downloading subtitle[white]: [red]{language} ({fmt})")
response = client.get(sub.get('url'))
response.raise_for_status()
@@ -228,12 +192,11 @@ def download_subtitles(self) -> bool:
def download_and_decrypt(self, custom_headers=None, query_params=None, key=None) -> bool:
"""
- Download and decrypt video/audio streams. Skips download if file already exists.
+ Download and decrypt video/audio streams using automatic key mapping based on default_KID.
Args:
- custom_headers (dict): Optional HTTP headers for the license request.
- query_params (dict): Optional query parameters to append to the license URL.
- - license_data (str/bytes): Optional raw license data to bypass HTTP request.
- key (str): Optional raw license data to bypass HTTP request.
"""
if self.file_already_exists:
@@ -259,31 +222,44 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
key=key
)
- if not keys:
- console.print("[red]No keys found, cannot proceed with download.")
- return False
+ # Map keys to representations based on default_KID
+ key_mapping = map_keys_to_representations(keys, self.parser.representations)
+
+ # Fallback: if only one key is available, use it even if mapping fails/partial
+ single_key = keys[0] if keys and len(keys) == 1 else None
- # Extract the first key for decryption
- KID = keys[0]['kid']
- KEY = keys[0]['key']
+ if not key_mapping:
+ if single_key:
+ console.print("[yellow]Warning: key mapping failed, but only 1 CONTENT key is available. Falling back to the single key for video/audio.")
+ key_mapping = {
+ "video": {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None},
+ "audio": {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None},
+ }
+ else:
+ console.print("[red]Could not map any keys to representations.")
+ return False
# Download subtitles
self.download_subtitles()
- # Download the video to get segment count
+ # Download and decrypt video
video_rep = self.get_representation_by_type("video")
if video_rep:
- encrypted_path = os.path.join(self.encrypted_dir, f"{video_rep['id']}_encrypted.m4s")
+ video_key_info = key_mapping.get("video")
+ if not video_key_info and single_key:
+ console.print("[yellow]Warning: no mapped key found for video; using the single available key.")
+ video_key_info = {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None}
+ if not video_key_info:
+ self.error = "No key found for video representation"
+ return False
+
+ console.log(f"[cyan]Using video key: [red]{video_key_info['kid']} [cyan]for representation [yellow]{video_key_info.get('representation_id')}")
+
+ video_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=video_rep, pssh=self.parser.pssh, custom_headers=custom_headers)
+ encrypted_path = video_downloader.get_concat_path(self.encrypted_dir)
# If m4s file doesn't exist, start downloading
if not os.path.exists(encrypted_path):
- video_downloader = MPD_Segments(
- tmp_folder=self.encrypted_dir,
- representation=video_rep,
- pssh=self.parser.pssh
- )
-
- # Set current downloader for progress tracking
self.current_downloader = video_downloader
self.current_download_type = 'video'
@@ -311,35 +287,37 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.current_downloader = None
self.current_download_type = None
- # Decrypt video
- decrypted_path = os.path.join(self.decrypted_dir, f"video.{extension_output}")
- result_path = decrypt_with_mp4decrypt(
- "Video", encrypted_path, KID, KEY, output_path=decrypted_path
- )
+ # Decrypt video using the mapped key
+ decrypted_path = os.path.join(self.decrypted_dir, f"video.{EXTENSION_OUTPUT}")
+ result_path = decrypt_with_mp4decrypt("Video", encrypted_path, video_key_info['kid'], video_key_info['key'], output_path=decrypted_path)
if not result_path:
- self.error = "Decryption of video failed"
+ self.error = f"Video decryption failed with key {video_key_info['kid']}"
return False
else:
self.error = "No video found"
return False
- # Now download audio with segment limiting
+ # Download and decrypt audio
audio_rep = self.get_representation_by_type("audio")
if audio_rep:
- encrypted_path = os.path.join(self.encrypted_dir, f"{audio_rep['id']}_encrypted.m4s")
+ audio_key_info = key_mapping.get("audio")
+ if not audio_key_info and single_key:
+ console.print("[yellow]Warning: no mapped key found for audio; using the single available key.")
+ audio_key_info = {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None}
+ if not audio_key_info:
+ self.error = "No key found for audio representation"
+ return False
+
+ console.log(f"[cyan]Using audio key: [red]{audio_key_info['kid']} [cyan]for representation [yellow]{audio_key_info.get('representation_id')}")
+
+ audio_language = audio_rep.get('language', 'Unknown')
+ audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self.parser.pssh, limit_segments=video_segments_count if video_segments_count > 0 else None, custom_headers=custom_headers)
+ encrypted_path = audio_downloader.get_concat_path(self.encrypted_dir)
# If m4s file doesn't exist, start downloading
if not os.path.exists(encrypted_path):
- audio_language = audio_rep.get('language', 'Unknown')
-
- audio_downloader = MPD_Segments(
- tmp_folder=self.encrypted_dir,
- representation=audio_rep,
- pssh=self.parser.pssh,
- limit_segments=video_segments_count if video_segments_count > 0 else None
- )
# Set current downloader for progress tracking
self.current_downloader = audio_downloader
@@ -366,14 +344,12 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.current_downloader = None
self.current_download_type = None
- # Decrypt audio
- decrypted_path = os.path.join(self.decrypted_dir, f"audio.{extension_output}")
- result_path = decrypt_with_mp4decrypt(
- f"Audio {audio_language}", encrypted_path, KID, KEY, output_path=decrypted_path
- )
+ # Decrypt audio using the mapped key
+ decrypted_path = os.path.join(self.decrypted_dir, f"audio.{EXTENSION_OUTPUT}")
+ result_path = decrypt_with_mp4decrypt(f"Audio {audio_language}", encrypted_path, audio_key_info['kid'], audio_key_info['key'], output_path=decrypted_path)
if not result_path:
- self.error = "Decryption of audio failed"
+ self.error = f"Audio decryption failed with key {audio_key_info['kid']}"
return False
else:
@@ -401,17 +377,15 @@ def download_segments(self, clear=False):
# Download video
video_rep = self.get_representation_by_type("video")
if video_rep:
- encrypted_path = os.path.join(self.encrypted_dir, f"{video_rep['id']}_encrypted.m4s")
-
+ video_downloader = MPD_Segments(
+ tmp_folder=self.encrypted_dir,
+ representation=video_rep,
+ pssh=self.parser.pssh
+ )
+ encrypted_path = video_downloader.get_concat_path(self.encrypted_dir)
+
# If m4s file doesn't exist, start downloading
if not os.path.exists(encrypted_path):
- video_downloader = MPD_Segments(
- tmp_folder=self.encrypted_dir,
- representation=video_rep,
- pssh=self.parser.pssh
- )
-
- # Set current downloader for progress tracking
self.current_downloader = video_downloader
self.current_download_type = 'video'
@@ -441,7 +415,7 @@ def download_segments(self, clear=False):
self.current_download_type = None
# NO DECRYPTION: just copy/move to decrypted folder
- decrypted_path = os.path.join(self.decrypted_dir, f"video.{extension_output}")
+ decrypted_path = os.path.join(self.decrypted_dir, f"video.{EXTENSION_OUTPUT}")
if os.path.exists(encrypted_path) and not os.path.exists(decrypted_path):
shutil.copy2(encrypted_path, decrypted_path)
@@ -453,20 +427,12 @@ def download_segments(self, clear=False):
# Download audio with segment limiting
audio_rep = self.get_representation_by_type("audio")
if audio_rep:
- encrypted_path = os.path.join(self.encrypted_dir, f"{audio_rep['id']}_encrypted.m4s")
-
+ audio_language = audio_rep.get('language', 'Unknown')
+ audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self.parser.pssh, limit_segments=video_segments_count if video_segments_count > 0 else None)
+ encrypted_path = audio_downloader.get_concat_path(self.encrypted_dir)
+
# If m4s file doesn't exist, start downloading
if not os.path.exists(encrypted_path):
- audio_language = audio_rep.get('language', 'Unknown')
-
- audio_downloader = MPD_Segments(
- tmp_folder=self.encrypted_dir,
- representation=audio_rep,
- pssh=self.parser.pssh,
- limit_segments=video_segments_count if video_segments_count > 0 else None
- )
-
- # Set current downloader for progress tracking
self.current_downloader = audio_downloader
self.current_download_type = f"audio_{audio_language}"
@@ -493,7 +459,7 @@ def download_segments(self, clear=False):
self.current_download_type = None
# NO DECRYPTION: just copy/move to decrypted folder
- decrypted_path = os.path.join(self.decrypted_dir, f"audio.{extension_output}")
+ decrypted_path = os.path.join(self.decrypted_dir, f"audio.{EXTENSION_OUTPUT}")
if os.path.exists(encrypted_path) and not os.path.exists(decrypted_path):
shutil.copy2(encrypted_path, decrypted_path)
@@ -514,8 +480,8 @@ def finalize_output(self):
return output_file
# Definition of decrypted files
- video_file = os.path.join(self.decrypted_dir, f"video.{extension_output}")
- audio_file = os.path.join(self.decrypted_dir, f"audio.{extension_output}")
+ video_file = os.path.join(self.decrypted_dir, f"video.{EXTENSION_OUTPUT}")
+ audio_file = os.path.join(self.decrypted_dir, f"audio.{EXTENSION_OUTPUT}")
output_file = self.original_output_path
# Set the output file path for status tracking
@@ -543,7 +509,7 @@ def finalize_output(self):
return None
# Merge subtitles if available
- if MERGE_SUBTITLE and self.selected_subs:
+ if MERGE_SUBTITLE and self.selected_subs and self.mpd_sub_list is not None:
# Check which subtitle files actually exist
existing_sub_tracks = []
diff --git a/StreamingCommunity/Lib/DASH/parser.py b/StreamingCommunity/Lib/DASH/parser.py
index ea6d0009a..bcddaec2d 100644
--- a/StreamingCommunity/Lib/DASH/parser.py
+++ b/StreamingCommunity/Lib/DASH/parser.py
@@ -1,53 +1,45 @@
# 25.07.25
-import re
+import json
import logging
-from urllib.parse import urljoin
-import xml.etree.ElementTree as ET
+from urllib.parse import urljoin, urlparse
from typing import List, Dict, Optional, Tuple, Any
+from pathlib import Path
+from datetime import datetime
+from isodate import parse_duration
-# External library
+# External libraries
+from lxml import etree
from curl_cffi import requests
from rich.console import Console
+from rich.table import Table
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
-# Variable
+# Variables
console = Console()
max_timeout = config_manager.get_int('REQUESTS', 'timeout')
-max_retry = config_manager.get_int('REQUESTS', 'max_retry')
-
+FILTER_CUSTOM_RESOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
+DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
class CodecQuality:
- """Utility class to rank codec quality"""
VIDEO_CODEC_RANK = {
- 'av01': 5, # AV1
- 'vp9': 4, # VP9
- 'vp09': 4, # VP9
- 'hev1': 3, # HEVC/H.265
- 'hvc1': 3, # HEVC/H.265
- 'avc1': 2, # H.264
- 'avc3': 2, # H.264
- 'mp4v': 1, # MPEG-4
+ 'av01': 5, 'vp9': 4, 'vp09': 4, 'hev1': 3,
+ 'hvc1': 3, 'avc1': 2, 'avc3': 2, 'mp4v': 1,
}
AUDIO_CODEC_RANK = {
- 'opus': 5, # Opus
- 'mp4a.40.2': 4, # AAC-LC
- 'mp4a.40.5': 3, # AAC-HE
- 'mp4a': 2, # Generic AAC
- 'ac-3': 2, # Dolby Digital
- 'ec-3': 3, # Dolby Digital Plus
+ 'opus': 5, 'mp4a.40.2': 4, 'mp4a.40.5': 3,
+ 'mp4a': 2, 'ac-3': 2, 'ec-3': 3,
}
@staticmethod
def get_video_codec_rank(codec: Optional[str]) -> int:
- """Get ranking for video codec"""
if not codec:
return 0
codec_lower = codec.lower()
@@ -58,7 +50,6 @@ def get_video_codec_rank(codec: Optional[str]) -> int:
@staticmethod
def get_audio_codec_rank(codec: Optional[str]) -> int:
- """Get ranking for audio codec"""
if not codec:
return 0
codec_lower = codec.lower()
@@ -66,603 +57,885 @@ def get_audio_codec_rank(codec: Optional[str]) -> int:
if codec_lower.startswith(key):
return rank
return 0
+
+
+class DurationUtils:
+ """Utilities for handling ISO-8601 durations"""
+ @staticmethod
+ def parse_duration(duration_str: Optional[str]) -> int:
+ """Parse ISO-8601 duration to seconds using isodate library"""
+ if not duration_str:
+ return 0
+ try:
+ duration = parse_duration(duration_str)
+ return int(duration.total_seconds())
+ except Exception:
+ return 0
+
+ @staticmethod
+ def format_duration(seconds: int) -> str:
+ """Format seconds like '~48m55s' or '~1h02m03s'"""
+ if not seconds or seconds < 0:
+ return ""
+
+ h = seconds // 3600
+ m = (seconds % 3600) // 60
+ s = seconds % 60
+
+ if h > 0:
+ return f"~{h}h{m:02d}m{s:02d}s"
+ return f"~{m}m{s:02d}s"
-class URLBuilder:
+class URLBuilder:
+ """Handles URL construction with template substitution"""
+
@staticmethod
- def build_url(base: str, template: str, rep_id: Optional[str] = None, number: Optional[int] = None, time: Optional[int] = None, bandwidth: Optional[int] = None) -> str:
- """Build absolute URL preserving query/hash"""
+ def build_url(base: str, template: str, rep_id: Optional[str] = None, number: Optional[int] = None, time: Optional[int] = None, bandwidth: Optional[int] = None) -> Optional[str]:
if not template:
return None
- # Substitute RepresentationID and Bandwidth first
+ # Substitute placeholders
if rep_id is not None:
template = template.replace('$RepresentationID$', rep_id)
if bandwidth is not None:
template = template.replace('$Bandwidth$', str(bandwidth))
-
- # Handle $Number$ with optional formatting
- template = URLBuilder._replace_number(template, number)
-
- # Replace $Time$ if present
- if '$Time$' in template and time is not None:
+ if time is not None:
template = template.replace('$Time$', str(time))
+
+ # Handle $Number$ with optional formatting (e.g., $Number%05d$)
+ if '$Number' in template:
+ num_str = str(number if number is not None else 0)
+
+ # Check for formatting like $Number%05d$
+ if '%0' in template and 'd$' in template:
+ start = template.find('%0')
+ end = template.find('d$', start)
+ if start != -1 and end != -1:
+ width_str = template[start+2:end]
+ try:
+ width = int(width_str)
+ num_str = str(number if number is not None else 0).zfill(width)
+ except ValueError:
+ pass
+
+ template = template.replace('$Number%05d$', num_str)
+ template = template.replace('$Number$', num_str)
return URLBuilder._finalize_url(base, template)
- @staticmethod
- def _replace_number(template: str, number: Optional[int]) -> str:
- """Handle $Number$ placeholder with formatting"""
- def _replace_number_match(match):
- num = number if number is not None else 0
- fmt = match.group(1)
-
- if fmt:
- # fmt like %05d -> convert to python format
- m = re.match(r'%0(\d+)d', fmt)
- if m:
- width = int(m.group(1))
- return str(num).zfill(width)
-
- return str(num)
-
- return re.sub(r'\$Number(\%0\d+d)?\$', _replace_number_match, template)
-
@staticmethod
def _finalize_url(base: str, template: str) -> str:
"""Finalize URL construction preserving query and fragment"""
-
- # Split path/query/fragment to avoid urljoin mangling query
- split = template.split('#', 1)
- path_and_query = split[0]
- frag = ('#' + split[1]) if len(split) == 2 else ''
+ parts = template.split('#', 1)
+ path_and_query = parts[0]
+ fragment = ('#' + parts[1]) if len(parts) == 2 else ''
if '?' in path_and_query:
- path_part, query_part = path_and_query.split('?', 1)
- abs_path = urljoin(base, path_part)
+ path, query = path_and_query.split('?', 1)
+ abs_path = urljoin(base, path)
+ return abs_path + '?' + query + fragment
+ else:
+ return urljoin(base, path_and_query) + fragment
+
- # ensure we don't accidentally lose existing query separators
- final = abs_path + '?' + query_part + frag
+class NamespaceManager:
+ """Manages XML namespaces for DASH manifests"""
+
+ def __init__(self, root: etree._Element):
+ self.nsmap = self._extract_namespaces(root)
+
+ @staticmethod
+ def _extract_namespaces(root: etree._Element) -> Dict[str, str]:
+ """Extract namespaces from root element"""
+ nsmap = {}
+ if root.nsmap:
+ # Use 'mpd' as default prefix for the main namespace
+ nsmap['mpd'] = root.nsmap.get(None) or 'urn:mpeg:dash:schema:mpd:2011'
+ nsmap['cenc'] = 'urn:mpeg:cenc:2013'
+
+ # Add other namespaces if present
+ for prefix, uri in root.nsmap.items():
+ if prefix is not None:
+ nsmap[prefix] = uri
else:
- abs_path = urljoin(base, path_and_query)
- final = abs_path + frag
+ # Fallback to default DASH namespace
+ nsmap['mpd'] = 'urn:mpeg:dash:schema:mpd:2011'
+ nsmap['cenc'] = 'urn:mpeg:cenc:2013'
+ return nsmap
+
+ def find(self, element: etree._Element, path: str) -> Optional[etree._Element]:
+ """Find element using namespace-aware XPath"""
+ return element.find(path, namespaces=self.nsmap)
+
+ def findall(self, element: etree._Element, path: str) -> List[etree._Element]:
+ """Find all elements using namespace-aware XPath"""
+ return element.findall(path, namespaces=self.nsmap)
+
+
+class BaseURLResolver:
+ """Resolves base URLs at different MPD hierarchy levels"""
+
+ def __init__(self, mpd_url: str, ns_manager: NamespaceManager):
+ self.mpd_url = mpd_url
+ self.ns = ns_manager
+
+ def get_initial_base_url(self, root: etree._Element) -> str:
+ """Get base URL from MPD root"""
+ base_url = self.mpd_url.rsplit('/', 1)[0] + '/'
+
+ base_elem = self.ns.find(root, 'mpd:BaseURL')
+ if base_elem is not None and base_elem.text:
+ base_text = base_elem.text.strip()
+ base_url = base_text if base_text.startswith('http') else urljoin(base_url, base_text)
+
+ return base_url
+
+ def resolve_base_url(self, element: etree._Element, current_base: str) -> str:
+ """Resolve base URL for any element"""
+ base_elem = self.ns.find(element, 'mpd:BaseURL')
+ if base_elem is not None and base_elem.text:
+ base_text = base_elem.text.strip()
+ return base_text if base_text.startswith('http') else urljoin(current_base, base_text)
+ return current_base
+
+
+class ContentProtectionHandler:
+ """Handles DRM and content protection"""
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def is_protected(self, element: etree._Element) -> bool:
+ """Check if element has DRM protection"""
+ for cp in self.ns.findall(element, 'mpd:ContentProtection'):
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ value = (cp.get('value') or '').lower()
+
+ # Check for CENC or Widevine
+ if 'urn:mpeg:dash:mp4protection:2011' in scheme_id and ('cenc' in value or value):
+ return True
+ if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id: # Widevine UUID
+ return True
+
+ return False
+
+ def extract_default_kid(self, element: etree._Element) -> Optional[str]:
+ """Extract default_KID from ContentProtection elements (Widevine/PlayReady/CENC).
+ """
+ def _extract_kid_from_cp(cp: etree._Element) -> Optional[str]:
+ kid = (cp.get('{urn:mpeg:cenc:2013}default_KID') or cp.get('default_KID') or cp.get('cenc:default_KID'))
+
+ # Fallback: any attribute key that ends with 'default_KID' (case-insensitive)
+ if not kid:
+ for k, v in (cp.attrib or {}).items():
+ if isinstance(k, str) and k.lower().endswith('default_kid') and v:
+ kid = v
+ break
- return final
+ if not kid:
+ return None
+ # Normalize UUID -> hex (no dashes), lowercase
+ return kid.strip().replace('-', '').lower()
-class SegmentTimelineParser:
- """Parser for SegmentTimeline elements"""
+ cps = self.ns.findall(element, 'mpd:ContentProtection')
+ if not cps:
+ return None
+
+ # Prefer Widevine KID, then mp4protection, then any other CP that has it.
+ preferred = []
+ fallback = []
+
+ for cp in cps:
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id: # Widevine
+ preferred.append(cp)
+ elif 'urn:mpeg:dash:mp4protection:2011' in scheme_id:
+ preferred.append(cp)
+ else:
+ fallback.append(cp)
+
+ for cp in preferred + fallback:
+ kid = _extract_kid_from_cp(cp)
+ if kid:
+ return kid
+
+ return None
- def __init__(self, namespace: Dict[str, str]):
- self.ns = namespace
+ def extract_pssh(self, root: etree._Element) -> Optional[str]:
+ """Extract PSSH (Protection System Specific Header)"""
+ # Try Widevine first
+ for cp in self.ns.findall(root, './/mpd:ContentProtection'):
+ scheme_id = cp.get('schemeIdUri', '')
+ if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id:
+ pssh = self.ns.find(cp, 'cenc:pssh')
+ if pssh is not None and pssh.text:
+ return pssh.text.strip()
+
+ # Fallback to any PSSH
+ for cp in self.ns.findall(root, './/mpd:ContentProtection'):
+ pssh = self.ns.find(cp, 'cenc:pssh')
+ if pssh is not None and pssh.text:
+ console.print("Found PSSH (fallback)")
+ return pssh.text.strip()
+
+ return None
- def parse(self, seg_timeline_element, start_number: int = 1) -> Tuple[List[int], List[int]]:
- """
- Parse SegmentTimeline and return (number_list, time_list)
- """
+
+class SegmentTimelineParser:
+ """Parses SegmentTimeline elements"""
+
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def parse(self, seg_template: etree._Element, start_number: int = 1) -> Tuple[List[int], List[int]]:
+ """Parse SegmentTimeline and return (number_list, time_list)"""
+ seg_timeline = self.ns.find(seg_template, 'mpd:SegmentTimeline')
+ if seg_timeline is None:
+ return [], []
+
number_list = []
time_list = []
-
- if seg_timeline_element is None:
- return number_list, time_list
-
current_time = 0
current_number = start_number
- for s_element in seg_timeline_element.findall('mpd:S', self.ns):
- d = s_element.get('d')
+ for s_elem in self.ns.findall(seg_timeline, 'mpd:S'):
+ d = s_elem.get('d')
if d is None:
continue
-
- d = int(d)
- # Handle 't' attribute (explicit time)
- if s_element.get('t') is not None:
- current_time = int(s_element.get('t'))
+ d = int(d)
- # Get repeat count (default 0 means 1 segment)
- r = int(s_element.get('r', 0))
+ # Explicit time
+ if s_elem.get('t') is not None:
+ current_time = int(s_elem.get('t'))
- # Special case: r=-1 means repeat until end of Period
+ # Repeat count
+ r = int(s_elem.get('r', 0))
if r == -1:
- r = 0
+ r = 0 # Special case: repeat until end
- # Add (r+1) segments
- for i in range(r + 1):
+ # Add segments
+ for _ in range(r + 1):
number_list.append(current_number)
time_list.append(current_time)
current_number += 1
current_time += d
-
+
return number_list, time_list
-class RepresentationParser:
- """Parser for individual representations"""
+class SegmentURLBuilder:
+ """Builds segment URLs from SegmentTemplate"""
- def __init__(self, mpd_url: str, namespace: Dict[str, str]):
- self.mpd_url = mpd_url
- self.ns = namespace
- self.timeline_parser = SegmentTimelineParser(namespace)
-
- def _resolve_adaptation_base_url(self, adapt_set, initial_base: str) -> str:
- """Resolve base URL at AdaptationSet level"""
- base = initial_base
-
- # Check for BaseURL at AdaptationSet level
- adapt_base = adapt_set.find('mpd:BaseURL', self.ns)
- if adapt_base is not None and adapt_base.text:
- base_text = adapt_base.text.strip()
- if base_text.startswith('http'):
- base = base_text
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+ self.timeline_parser = SegmentTimelineParser(ns_manager)
+
+ def build_urls(self, seg_template: etree._Element, rep_id: str, bandwidth: int, base_url: str, period_duration: int = 0) -> Tuple[Optional[str], List[str], int, float]:
+ """Build initialization and segment URLs"""
+ init_template = seg_template.get('initialization')
+ media_template = seg_template.get('media')
+ start_number = int(seg_template.get('startNumber', 1))
+ timescale = int(seg_template.get('timescale', 1) or 1)
+ duration_attr = seg_template.get('duration')
+
+ # Build init URL
+ init_url = None
+ if init_template:
+ init_url = URLBuilder.build_url(base_url, init_template, rep_id=rep_id, bandwidth=bandwidth)
+
+ # Parse timeline
+ number_list, time_list = self.timeline_parser.parse(seg_template, start_number)
+
+ segment_count = 0
+ segment_duration = 0.0
+
+ # Determine segment count
+ if time_list:
+ segment_count = len(time_list)
+ elif number_list:
+ segment_count = len(number_list)
+ elif duration_attr:
+
+ # Estimate from duration
+ d = int(duration_attr)
+ segment_duration = d / float(timescale)
+
+ if period_duration > 0 and segment_duration > 0:
+ segment_count = int((period_duration / segment_duration) + 0.5)
else:
- base = urljoin(base, base_text)
+ segment_count = 100
+
+ max_segments = min(segment_count, 20000)
+ number_list = list(range(start_number, start_number + max_segments))
+ else:
+ segment_count = 100
+ number_list = list(range(start_number, start_number + 100))
- return base
-
- def parse_adaptation_set(self, adapt_set, base_url: str) -> List[Dict[str, Any]]:
- """
- Parse all representations in an adaptation set
- """
- representations = []
- mime_type = adapt_set.get('mimeType', '')
- lang = adapt_set.get('lang', '')
+ # Build segment URLs
+ segment_urls = self._build_segment_urls(
+ media_template, base_url, rep_id, bandwidth, number_list, time_list
+ )
- # Find SegmentTemplate at AdaptationSet level
- adapt_seg_template = adapt_set.find('mpd:SegmentTemplate', self.ns)
+ if not segment_count:
+ segment_count = len(segment_urls)
- # Risolvi il BaseURL a livello di AdaptationSet
- adapt_base_url = self._resolve_adaptation_base_url(adapt_set, base_url)
-
- for rep_element in adapt_set.findall('mpd:Representation', self.ns):
- representation = self._parse_representation(
- rep_element, adapt_set, adapt_seg_template,
- adapt_base_url,
- mime_type, lang
- )
- if representation:
- representations.append(representation)
-
- return representations
-
- def _parse_representation(self, rep_element, adapt_set, adapt_seg_template, base_url: str, mime_type: str, lang: str) -> Optional[Dict[str, Any]]:
- """Parse a single representation"""
- rep_id = rep_element.get('id')
- bandwidth = rep_element.get('bandwidth')
- codecs = rep_element.get('codecs')
- width = rep_element.get('width')
- height = rep_element.get('height')
- audio_sampling_rate = rep_element.get('audioSamplingRate')
-
- # Try to find SegmentTemplate at Representation level
- rep_seg_template = rep_element.find('mpd:SegmentTemplate', self.ns)
- seg_tmpl = rep_seg_template if rep_seg_template is not None else adapt_seg_template
+ return init_url, segment_urls, segment_count, segment_duration
+
+ def _build_segment_urls(self, template: str, base_url: str, rep_id: str, bandwidth: int, number_list: List[int], time_list: List[int]) -> List[str]:
+ """Build list of segment URLs"""
+ if not template:
+ return []
- if seg_tmpl is None:
- return None
+ urls = []
+
+ if '$Time$' in template and time_list:
+ for t in time_list:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, time=t, bandwidth=bandwidth))
+ elif '$Number' in template and number_list:
+ for n in number_list:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, number=n, bandwidth=bandwidth))
+ else:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, bandwidth=bandwidth))
+
+ return urls
- # Build URLs
- rep_base_url = self._resolve_base_url(rep_element, adapt_set, base_url)
- init_url, media_urls = self._build_segment_urls(seg_tmpl, rep_id, bandwidth, rep_base_url)
- # Determine content type first
- content_type = 'unknown'
+class MetadataExtractor:
+ """Extracts metadata from representations"""
+
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def get_audio_channels(self, rep_elem: etree._Element, adapt_elem: etree._Element) -> int:
+ """Extract audio channel count"""
+ for parent in (rep_elem, adapt_elem):
+ if parent is None:
+ continue
+
+ for acc in self.ns.findall(parent, 'mpd:AudioChannelConfiguration'):
+ val = acc.get('value')
+ if val:
+ try:
+ return int(val)
+ except ValueError:
+ pass
+ return 0
+
+ @staticmethod
+ def parse_frame_rate(frame_rate: Optional[str]) -> float:
+ """Parse frame rate (e.g., '25' or '30000/1001')"""
+ if not frame_rate:
+ return 0.0
+
+ fr = frame_rate.strip()
+ if '/' in fr:
+ try:
+ num, den = fr.split('/', 1)
+ return float(num) / float(den)
+ except Exception:
+ return 0.0
+
+ try:
+ return float(fr)
+ except Exception:
+ return 0.0
+
+ @staticmethod
+ def determine_content_type(mime_type: str, width: int, height: int, audio_sampling_rate: int, codecs: str) -> str:
+ """Determine if content is video, audio, or other"""
if mime_type:
- content_type = mime_type.split('/')[0]
+ return mime_type.split('/')[0]
elif width or height:
- content_type = 'video'
+ return 'video'
elif audio_sampling_rate or (codecs and 'mp4a' in codecs.lower()):
- content_type = 'audio'
-
- # Clean language: convert None, empty string, or "undefined" to None
- # For audio tracks without language, generate a generic name
- clean_lang = None
+ return 'audio'
+ return 'unknown'
+
+ @staticmethod
+ def clean_language(lang: str, content_type: str, rep_id: str, bandwidth: int) -> Optional[str]:
+ """Clean and normalize language tag"""
if lang and lang.lower() not in ['undefined', 'none', '']:
- clean_lang = lang
+ return lang
elif content_type == 'audio':
+ return f"aud_{rep_id}" if rep_id else f"aud_{bandwidth or 0}"
+ return None
- # Generate generic audio track name based on rep_id or bandwidth
- if rep_id:
- clean_lang = f"aud_{rep_id}"
- else:
- clean_lang = f"aud_{bandwidth or '0'}"
- return {
+class RepresentationParser:
+ """Parses DASH representations"""
+
+ def __init__(self, ns_manager: NamespaceManager, url_resolver: BaseURLResolver):
+ self.ns = ns_manager
+ self.url_resolver = url_resolver
+ self.segment_builder = SegmentURLBuilder(ns_manager)
+ self.protection_handler = ContentProtectionHandler(ns_manager)
+ self.metadata_extractor = MetadataExtractor(ns_manager)
+
+ def parse_adaptation_set(
+ self,
+ adapt_set: etree._Element,
+ base_url: str,
+ period_duration: int = 0
+ ) -> List[Dict[str, Any]]:
+ """Parse all representations in adaptation set"""
+ representations = []
+
+ # Adaptation set attributes
+ mime_type = adapt_set.get('mimeType', '')
+ lang = adapt_set.get('lang', '')
+ adapt_frame_rate = adapt_set.get('frameRate')
+ content_type = adapt_set.get('contentType', '')
+
+ # Resolve base URL
+ adapt_base = self.url_resolver.resolve_base_url(adapt_set, base_url)
+
+ # Check protection and extract default_KID
+ adapt_protected = self.protection_handler.is_protected(adapt_set)
+ adapt_default_kid = self.protection_handler.extract_default_kid(adapt_set)
+
+ # Get segment template
+ adapt_seg_template = self.ns.find(adapt_set, 'mpd:SegmentTemplate')
+
+ # Parse each representation
+ for rep_elem in self.ns.findall(adapt_set, 'mpd:Representation'):
+ rep = self._parse_representation(
+ rep_elem, adapt_set, adapt_seg_template,
+ adapt_base, mime_type, lang, period_duration
+ )
+
+ if rep:
+ rep_frame_rate = rep_elem.get('frameRate') or adapt_frame_rate
+ rep['frame_rate'] = self.metadata_extractor.parse_frame_rate(rep_frame_rate)
+ rep['channels'] = self.metadata_extractor.get_audio_channels(rep_elem, adapt_set)
+ rep_protected = adapt_protected or self.protection_handler.is_protected(rep_elem)
+ rep['protected'] = bool(rep_protected)
+ rep_default_kid = self.protection_handler.extract_default_kid(rep_elem) or adapt_default_kid
+ rep['default_kid'] = rep_default_kid
+ if content_type:
+ rep['type'] = content_type
+
+ representations.append(rep)
+
+ return representations
+
+ def _parse_representation(self, rep_elem: etree._Element, adapt_set: etree._Element, adapt_seg_template: Optional[etree._Element], base_url: str, mime_type: str, lang: str, period_duration: int) -> Optional[Dict[str, Any]]:
+ """Parse single representation"""
+ rep_id = rep_elem.get('id')
+ bandwidth = int(rep_elem.get('bandwidth', 0))
+ codecs = rep_elem.get('codecs')
+ width = int(rep_elem.get('width', 0))
+ height = int(rep_elem.get('height', 0))
+ audio_sampling_rate = int(rep_elem.get('audioSamplingRate', 0))
+
+ # Find segment template
+ rep_seg_template = self.ns.find(rep_elem, 'mpd:SegmentTemplate')
+ seg_template = rep_seg_template if rep_seg_template is not None else adapt_seg_template
+
+ # Handle SegmentBase (single file)
+ if seg_template is None:
+ return self._parse_segment_base(rep_elem, base_url, rep_id, bandwidth, codecs, width, height, audio_sampling_rate, mime_type, lang)
+
+ # Build segment URLs
+ rep_base = self.url_resolver.resolve_base_url(rep_elem, base_url)
+ init_url, segment_urls, seg_count, seg_duration = self.segment_builder.build_urls(
+ seg_template, rep_id, bandwidth, rep_base, period_duration
+ )
+
+ # Determine content type and language
+ content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
+ clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
+
+ rep_data = {
'id': rep_id,
'type': content_type,
'codec': codecs,
- 'bandwidth': int(bandwidth) if bandwidth else 0,
- 'width': int(width) if width else 0,
- 'height': int(height) if height else 0,
- 'audio_sampling_rate': int(audio_sampling_rate) if audio_sampling_rate else 0,
+ 'bandwidth': bandwidth,
+ 'width': width,
+ 'height': height,
+ 'audio_sampling_rate': audio_sampling_rate,
'language': clean_lang,
'init_url': init_url,
- 'segment_urls': media_urls
+ 'segment_urls': segment_urls,
+ 'segment_count': seg_count,
}
-
- def _resolve_base_url(self, rep_element, adapt_set, initial_base: str) -> str:
- """Resolve base URL at Representation level (AdaptationSet already resolved)"""
- base = initial_base
- # Representation-level BaseURL only
- if rep_element is not None:
- rep_base = rep_element.find('mpd:BaseURL', self.ns)
- if rep_base is not None and rep_base.text:
- base_text = rep_base.text.strip()
- if base_text.startswith('http'):
- base = base_text
- else:
- base = urljoin(base, base_text)
-
- return base
-
- def _build_segment_urls(self, seg_tmpl, rep_id: str, bandwidth: str, base_url: str) -> Tuple[str, List[str]]:
- """Build initialization and media segment URLs"""
- init = seg_tmpl.get('initialization')
- media = seg_tmpl.get('media')
- start_number = int(seg_tmpl.get('startNumber', 1))
-
- # Build init URL
- init_url = URLBuilder.build_url(
- base_url, init,
- rep_id=rep_id,
- bandwidth=int(bandwidth) if bandwidth else None
- ) if init else None
-
- # Parse segment timeline
- seg_timeline = seg_tmpl.find('mpd:SegmentTimeline', self.ns)
- number_list, time_list = self.timeline_parser.parse(seg_timeline, start_number)
-
- # Fallback solo se non c'è SegmentTimeline
- if not number_list and not time_list:
- number_list = list(range(start_number, start_number + 100))
- time_list = []
-
- # Build media URLs
- media_urls = self._build_media_urls(media, base_url, rep_id, bandwidth, number_list, time_list)
-
- return init_url, media_urls
-
- def _build_media_urls(self, media_template: str, base_url: str, rep_id: str, bandwidth: str, number_list: List[int], time_list: List[int]) -> List[str]:
- """Build list of media segment URLs"""
- if not media_template:
- return []
-
- media_urls = []
- bandwidth_int = int(bandwidth) if bandwidth else None
-
- if '$Time$' in media_template and time_list:
- for t in time_list:
- media_urls.append(URLBuilder.build_url(
- base_url, media_template,
- rep_id=rep_id, time=t, bandwidth=bandwidth_int
- ))
- elif '$Number' in media_template and number_list:
- for n in number_list:
- media_urls.append(URLBuilder.build_url(
- base_url, media_template,
- rep_id=rep_id, number=n, bandwidth=bandwidth_int
- ))
- else:
- media_urls.append(URLBuilder.build_url(
- base_url, media_template,
- rep_id=rep_id, bandwidth=bandwidth_int
- ))
-
- return media_urls
-
-
-class MPD_Parser:
- @staticmethod
- def _is_ad_period(period_id: str, base_url: str) -> bool:
- """
- Detect if a Period is an advertisement or bumper.
- Returns True if it's an ad, False if it's main content.
- """
- ad_indicators = [
- '_ad/', # Generic ad marker in URL
- 'ad_bumper', # Ad bumper
- '/creative/', # Ad creative folder
- '_OandO/', # Pluto TV bumpers
- ]
-
- # Check BaseURL for ad indicators
- for indicator in ad_indicators:
- if indicator in base_url:
- return True
+ if seg_duration:
+ rep_data['segment_duration_seconds'] = seg_duration
- # Check Period ID for patterns
- if period_id:
- if '_subclip_' in period_id:
- return False
- # Short periods (< 60s) are usually ads/bumpers
+ return rep_data
+
+ def _parse_segment_base(self, rep_elem: etree._Element, base_url: str, rep_id: str, bandwidth: int, codecs: str, width: int, height: int, audio_sampling_rate: int, mime_type: str, lang: str) -> Optional[Dict[str, Any]]:
+ """Parse representation with SegmentBase (single file)"""
+ seg_base = self.ns.find(rep_elem, 'mpd:SegmentBase')
+ rep_base = self.ns.find(rep_elem, 'mpd:BaseURL')
- return False
+ if seg_base is None or rep_base is None or not (rep_base.text or "").strip():
+ return None
+
+ media_url = urljoin(base_url, rep_base.text.strip())
+ content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
+ clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
+
+ return {
+ 'id': rep_id,
+ 'type': content_type,
+ 'codec': codecs,
+ 'bandwidth': bandwidth,
+ 'width': width,
+ 'height': height,
+ 'audio_sampling_rate': audio_sampling_rate,
+ 'language': clean_lang,
+ 'init_url': media_url,
+ 'segment_urls': [media_url],
+ 'segment_count': 1,
+ }
+
+
+class RepresentationFilter:
+ """Filters and deduplicates representations"""
@staticmethod
- def _deduplicate_videos(representations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """
- Remove duplicate video representations with same resolution.
- Keep the one with best codec, then highest bandwidth.
- """
+ def deduplicate_videos(reps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ """Keep best video per resolution"""
resolution_map = {}
- for rep in representations:
+ for rep in reps:
key = (rep['width'], rep['height'])
if key not in resolution_map:
resolution_map[key] = rep
else:
existing = resolution_map[key]
+ existing_rank = CodecQuality.get_video_codec_rank(existing['codec'])
+ new_rank = CodecQuality.get_video_codec_rank(rep['codec'])
- # Compare codec quality first
- existing_codec_rank = CodecQuality.get_video_codec_rank(existing['codec'])
- new_codec_rank = CodecQuality.get_video_codec_rank(rep['codec'])
-
- if new_codec_rank > existing_codec_rank:
- resolution_map[key] = rep
- elif new_codec_rank == existing_codec_rank and rep['bandwidth'] > existing['bandwidth']:
+ if new_rank > existing_rank or (new_rank == existing_rank and rep['bandwidth'] > existing['bandwidth']):
resolution_map[key] = rep
return list(resolution_map.values())
@staticmethod
- def _deduplicate_audios(representations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """
- Remove duplicate audio representations.
- Group by (language, sampling_rate) and keep the one with best codec, then highest bandwidth.
- """
+ def deduplicate_audios(reps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ """Keep best audio per language"""
audio_map = {}
- for rep in representations:
-
- # Use both language and sampling rate as key to differentiate audio tracks
+ for rep in reps:
key = (rep['language'], rep['audio_sampling_rate'])
if key not in audio_map:
audio_map[key] = rep
else:
existing = audio_map[key]
+ existing_rank = CodecQuality.get_audio_codec_rank(existing['codec'])
+ new_rank = CodecQuality.get_audio_codec_rank(rep['codec'])
- # Compare codec quality first
- existing_codec_rank = CodecQuality.get_audio_codec_rank(existing['codec'])
- new_codec_rank = CodecQuality.get_audio_codec_rank(rep['codec'])
-
- if new_codec_rank > existing_codec_rank:
- audio_map[key] = rep
- elif new_codec_rank == existing_codec_rank and rep['bandwidth'] > existing['bandwidth']:
+ if new_rank > existing_rank or (new_rank == existing_rank and rep['bandwidth'] > existing['bandwidth']):
audio_map[key] = rep
return list(audio_map.values())
+
+class AdPeriodDetector:
+ """Detects advertisement periods"""
+
+ AD_INDICATORS = ['_ad/', 'ad_bumper', '/creative/', '_OandO/']
+
@staticmethod
- def get_worst(representations):
- """
- Returns the video representation with the lowest resolution/bandwidth, or audio with lowest bandwidth.
- """
- videos = [r for r in representations if r['type'] == 'video']
- audios = [r for r in representations if r['type'] == 'audio']
- if videos:
- return min(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
- elif audios:
- return min(audios, key=lambda r: r['bandwidth'])
- return None
+ def is_ad_period(period_id: str, base_url: str) -> bool:
+ """Check if period is an advertisement"""
+ for indicator in AdPeriodDetector.AD_INDICATORS:
+ if indicator in base_url:
+ return True
+
+ if period_id and '_subclip_' in period_id:
+ return False
+
+ return False
+
+class FileTypeDetector:
+ """Detects file types from URLs"""
+
@staticmethod
- def get_list(representations, type_filter=None):
- """
- Returns the list of representations filtered by type ('video', 'audio', etc.).
- """
- if type_filter:
- return [r for r in representations if r['type'] == type_filter]
- return representations
+ def infer_url_type(url: Optional[str]) -> Optional[str]:
+ if not url:
+ return None
+ try:
+ path = urlparse(url).path
+ ext = Path(path).suffix
+ return ext.lstrip(".").lower() if ext else None
+ except Exception:
+ return None
+
+ @staticmethod
+ def infer_segment_urls_type(urls: Optional[List[str]]) -> Optional[str]:
+ if not urls:
+ return None
+
+ types = {FileTypeDetector.infer_url_type(u) for u in urls if u}
+ types.discard(None)
+
+ if not types:
+ return None
+ return next(iter(types)) if len(types) == 1 else "mixed"
+
+
+class TablePrinter:
+ """Prints representation tables"""
+
+ def __init__(self, mpd_duration: int, mpd_sub_list: list = None):
+ self.mpd_duration = mpd_duration
+ self.mpd_sub_list = mpd_sub_list or []
+
+ def print_table(self, representations: List[Dict[str, Any]], selected_video: Optional[Dict[str, Any]] = None, selected_audio: Optional[Dict[str, Any]] = None, selected_subs: list = None):
+ """Print tracks table using Rich tables"""
+ approx = DurationUtils.format_duration(self.mpd_duration)
+
+ videos = sorted([r for r in representations if r['type'] == 'video'],
+ key=lambda r: (r['height'], r['width'], r['bandwidth']), reverse=True)
+ audios = sorted([r for r in representations if r['type'] == 'audio'],
+ key=lambda r: r['bandwidth'], reverse=True)
+
+ # Create single table
+ table = Table(show_header=True, header_style="bold")
+ table.add_column("Type", style="cyan")
+ table.add_column("Sel", width=3, style="green bold")
+ table.add_column("Info", style="white")
+ table.add_column("Resolution/ID", style="yellow")
+ table.add_column("Bitrate", style="green")
+ table.add_column("Codec", style="white")
+ table.add_column("Lang/FPS", style="blue")
+ table.add_column("Channels", style="magenta")
+ table.add_column("Segments", style="white")
+ table.add_column("Duration", style="white")
+
+ # Add video tracks
+ for vid in videos:
+ checked = 'X' if selected_video and vid['id'] == selected_video['id'] else ' '
+ cenc = "*CENC" if vid.get('protected') else ""
+ fps = f"{vid['frame_rate']:.0f}" if vid.get('frame_rate') else ""
+
+ table.add_row("Video", checked, f"Vid {cenc}", f"{vid['width']}x{vid['height']}", f"{vid['bandwidth'] // 1000} Kbps", vid.get('codec', ''), fps, vid['id'], str(vid['segment_count']), approx or "")
+
+ # Add audio tracks
+ for aud in audios:
+ checked = 'X' if selected_audio and aud['id'] == selected_audio['id'] else ' '
+ cenc = "*CENC" if aud.get('protected') else ""
+ ch = f"{aud['channels']}CH" if aud.get('channels') else ""
+
+ table.add_row("Audio", checked, f"Aud {cenc}", aud['id'], f"{aud['bandwidth'] // 1000} Kbps", aud.get('codec', ''), aud.get('language', ''), ch, str(aud['segment_count']), approx or "")
+
+ # Add subtitle tracks from mpd_sub_list
+ if self.mpd_sub_list:
+ for sub in self.mpd_sub_list:
+ checked = 'X' if selected_subs and sub in selected_subs else ' '
+ language = sub.get('language')
+ sub_type = sub.get('format')
+
+ table.add_row("Subtitle", checked, f"Sub {sub_type}", language, "", "", language, "", "", approx or "")
+
+ console.print(table)
- def __init__(self, mpd_url: str):
+
+class MPD_Parser:
+ """Main MPD parser class"""
+
+ def __init__(self, mpd_url: str, auto_save: bool = True, save_dir: Optional[str] = None, mpd_sub_list: list = None):
self.mpd_url = mpd_url
+ self.auto_save = auto_save
+ self.save_dir = Path(save_dir) if save_dir else None
+ self.mpd_sub_list = mpd_sub_list or []
+
+ self.root = None
+ self.mpd_content = None
self.pssh = None
self.representations = []
- self.ns = {}
- self.root = None
-
- def parse(self, custom_headers: Dict[str, str]) -> None:
+ self.mpd_duration = 0
+
+ # Initialize utility classes (will be set after parsing)
+ self.ns_manager = None
+ self.url_resolver = None
+ self.protection_handler = None
+ self.rep_parser = None
+ self.table_printer = None
+
+ def parse(self, custom_headers: Optional[Dict[str, str]] = None) -> None:
"""Parse the MPD file and extract all representations"""
- self._fetch_and_parse_mpd(custom_headers)
- self._extract_namespace()
- self._extract_pssh()
+ self._fetch_and_parse_mpd(custom_headers or {})
+
+ # Initialize utility classes
+ self.ns_manager = NamespaceManager(self.root)
+ self.url_resolver = BaseURLResolver(self.mpd_url, self.ns_manager)
+ self.protection_handler = ContentProtectionHandler(self.ns_manager)
+ self.rep_parser = RepresentationParser(self.ns_manager, self.url_resolver)
+
+ # Extract MPD duration
+ duration_str = self.root.get('mediaPresentationDuration')
+ self.mpd_duration = DurationUtils.parse_duration(duration_str)
+ self.table_printer = TablePrinter(self.mpd_duration, self.mpd_sub_list)
+
+ # Extract PSSH and representations
+ self.pssh = self.protection_handler.extract_pssh(self.root)
self._parse_representations()
self._deduplicate_representations()
-
+
+ # Auto-save if enabled
+ if self.auto_save:
+ self._auto_save_files()
+
def _fetch_and_parse_mpd(self, custom_headers: Dict[str, str]) -> None:
"""Fetch MPD content and parse XML"""
response = requests.get(self.mpd_url, headers=custom_headers, timeout=max_timeout, impersonate="chrome124")
response.raise_for_status()
- logging.info(f"Successfully fetched MPD: {response.content}")
- self.root = ET.fromstring(response.content)
-
- def _extract_namespace(self) -> None:
- """Extract and register namespaces from the root element"""
- if self.root.tag.startswith('{'):
- uri = self.root.tag[1:].split('}')[0]
- self.ns['mpd'] = uri
- self.ns['cenc'] = 'urn:mpeg:cenc:2013'
-
- def _extract_pssh(self) -> None:
- """Extract Widevine PSSH from ContentProtection elements"""
- # Try to find Widevine PSSH first (preferred)
- for protection in self.root.findall('.//mpd:ContentProtection', self.ns):
- scheme_id = protection.get('schemeIdUri', '')
-
- # Check if this is Widevine ContentProtection
- if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id:
- pssh_element = protection.find('cenc:pssh', self.ns)
- if pssh_element is not None and pssh_element.text:
- self.pssh = pssh_element.text.strip()
- return
-
- # Fallback: try any PSSH (for compatibility with other services)
- for protection in self.root.findall('.//mpd:ContentProtection', self.ns):
- pssh_element = protection.find('cenc:pssh', self.ns)
- if pssh_element is not None and pssh_element.text:
- self.pssh = pssh_element.text.strip()
- print(f"Found PSSH (fallback): {self.pssh}")
- return
-
- def _get_period_base_url(self, period, initial_base: str) -> str:
- """Get base URL at Period level"""
- base = initial_base
-
- period_base = period.find('mpd:BaseURL', self.ns)
- if period_base is not None and period_base.text:
- base_text = period_base.text.strip()
- if base_text.startswith('http'):
- base = base_text
- else:
- base = urljoin(base, base_text)
-
- return base
-
+ logging.info(f"Successfully fetched MPD: {len(response.content)} bytes")
+ self.mpd_content = response.content
+ self.root = etree.fromstring(response.content)
+
def _parse_representations(self) -> None:
- """Parse all representations from the MPD, filtering out ads and aggregating main content"""
- base_url = self._get_initial_base_url()
- representation_parser = RepresentationParser(self.mpd_url, self.ns)
-
- # Dictionary to aggregate representations by ID
+ """Parse all representations from the MPD"""
+ base_url = self.url_resolver.get_initial_base_url(self.root)
rep_aggregator = {}
- periods = self.root.findall('.//mpd:Period', self.ns)
-
+
+ periods = self.ns_manager.findall(self.root, './/mpd:Period')
+
for period_idx, period in enumerate(periods):
period_id = period.get('id', f'period_{period_idx}')
- period_base_url = self._get_period_base_url(period, base_url)
-
- # CHECK IF THIS IS AN AD PERIOD
- is_ad = self._is_ad_period(period_id, period_base_url)
+ period_base_url = self.url_resolver.resolve_base_url(period, base_url)
# Skip ad periods
- if is_ad:
+ if AdPeriodDetector.is_ad_period(period_id, period_base_url):
continue
- for adapt_set in period.findall('mpd:AdaptationSet', self.ns):
- representations = representation_parser.parse_adaptation_set(adapt_set, period_base_url)
+ # Get period duration
+ period_duration_str = period.get('duration')
+ period_duration = DurationUtils.parse_duration(period_duration_str) or self.mpd_duration
+
+ # Parse adaptation sets
+ for adapt_set in self.ns_manager.findall(period, 'mpd:AdaptationSet'):
+ representations = self.rep_parser.parse_adaptation_set(
+ adapt_set, period_base_url, period_duration
+ )
+ # Aggregate representations by ID
for rep in representations:
rep_id = rep['id']
-
if rep_id not in rep_aggregator:
rep_aggregator[rep_id] = rep
else:
+ # Concatenate segment URLs for multi-period content
existing = rep_aggregator[rep_id]
-
- # Concatenate segment URLs
if rep['segment_urls']:
existing['segment_urls'].extend(rep['segment_urls'])
if not existing['init_url'] and rep['init_url']:
existing['init_url'] = rep['init_url']
- # Convert aggregated dict back to list
self.representations = list(rep_aggregator.values())
-
+
def _deduplicate_representations(self) -> None:
- """Remove duplicate video and audio representations"""
+ """Remove duplicate representations"""
videos = [r for r in self.representations if r['type'] == 'video']
audios = [r for r in self.representations if r['type'] == 'audio']
others = [r for r in self.representations if r['type'] not in ['video', 'audio']]
- deduplicated_videos = self._deduplicate_videos(videos)
- deduplicated_audios = self._deduplicate_audios(audios)
- self.representations = deduplicated_videos + deduplicated_audios + others
-
- def _get_initial_base_url(self) -> str:
- """Get the initial base URL from MPD-level BaseURL"""
- base_url = self.mpd_url.rsplit('/', 1)[0] + '/'
+ deduplicated_videos = RepresentationFilter.deduplicate_videos(videos)
+ deduplicated_audios = RepresentationFilter.deduplicate_audios(audios)
- # MPD-level BaseURL
- mpd_base = self.root.find('mpd:BaseURL', self.ns)
- if mpd_base is not None and mpd_base.text:
- base_text = mpd_base.text.strip()
-
- # Handle BaseURL that might already be absolute
- if base_text.startswith('http'):
- base_url = base_text
- else:
- base_url = urljoin(base_url, base_text)
-
- return base_url
+ self.representations = deduplicated_videos + deduplicated_audios + others
+
+ def get_resolutions(self) -> List[Dict[str, Any]]:
+ """Return list of video representations"""
+ return [r for r in self.representations if r['type'] == 'video']
- def get_resolutions(self):
- """Return list of video representations with their resolutions."""
- return [
- rep for rep in self.representations
- if rep['type'] == 'video'
- ]
-
- def get_audios(self):
- """Return list of audio representations."""
- return [
- rep for rep in self.representations
- if rep['type'] == 'audio'
- ]
-
- def get_best_video(self):
- """Return the best video representation (highest resolution, then bandwidth)."""
+ def get_audios(self) -> List[Dict[str, Any]]:
+ """Return list of audio representations"""
+ return [r for r in self.representations if r['type'] == 'audio']
+
+ def get_best_video(self) -> Optional[Dict[str, Any]]:
+ """Return the best video representation"""
videos = self.get_resolutions()
if not videos:
return None
-
- # Sort by (height, width, bandwidth)
return max(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
-
- def get_best_audio(self):
- """Return the best audio representation (highest bandwidth)."""
+
+ def get_best_audio(self) -> Optional[Dict[str, Any]]:
+ """Return the best audio representation"""
audios = self.get_audios()
if not audios:
return None
return max(audios, key=lambda r: r['bandwidth'])
-
- def select_video(self, force_resolution="Best"):
- """
- Select a video representation based on the requested resolution.
- Returns: (selected_video, list_available_resolution, filter_custom_resolution, downloadable_video)
- """
+
+ @staticmethod
+ def get_worst(representations: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ """Return the worst representation"""
+ videos = [r for r in representations if r['type'] == 'video']
+ audios = [r for r in representations if r['type'] == 'audio']
+
+ if videos:
+ return min(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
+ elif audios:
+ return min(audios, key=lambda r: r['bandwidth'])
+ return None
+
+ @staticmethod
+ def get_list(representations: List[Dict[str, Any]], type_filter: Optional[str] = None) -> List[Dict[str, Any]]:
+ """Return filtered list of representations"""
+ if type_filter:
+ return [r for r in representations if r['type'] == type_filter]
+ return representations
+
+ def select_video(self, force_resolution: str = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
+ """Select video representation based on resolution preference"""
video_reps = self.get_resolutions()
- list_available_resolution = [
- f"{rep['width']}x{rep['height']}" for rep in video_reps
- ]
- force_resolution_l = (force_resolution or "Best").lower()
-
- if force_resolution_l == "best":
+ available_resolutions = [f"{rep['width']}x{rep['height']}" for rep in video_reps]
+
+ # Use parameter or global config
+ resolution = (force_resolution or FILTER_CUSTOM_RESOLUTION or "best").lower()
+
+ if resolution == "best":
selected_video = self.get_best_video()
filter_custom_resolution = "Best"
-
- elif force_resolution_l == "worst":
- selected_video = MPD_Parser.get_worst(video_reps)
+ elif resolution == "worst":
+ selected_video = self.get_worst(video_reps)
filter_custom_resolution = "Worst"
-
else:
- selected_video = self.get_best_video()
- filter_custom_resolution = "Best"
-
+ # Try to find specific resolution (e.g., "1080p" -> "1920x1080")
+ selected_video = None
+ for rep in video_reps:
+ rep_res = f"{rep['width']}x{rep['height']}"
+ if (resolution in rep_res.lower() or
+ resolution.replace('p', '') in str(rep['height']) or
+ rep_res.lower() == resolution):
+ selected_video = rep
+ break
+
+ if not selected_video:
+ # Fallback to best if specific resolution not found
+ selected_video = self.get_best_video()
+ filter_custom_resolution = f"{resolution} (fallback to Best)"
+ else:
+ filter_custom_resolution = resolution
+
downloadable_video = f"{selected_video['width']}x{selected_video['height']}" if selected_video else "N/A"
- return selected_video, list_available_resolution, filter_custom_resolution, downloadable_video
-
- def select_audio(self, preferred_audio_langs=None):
- """
- Select an audio representation based on preferred languages.
- Returns: (selected_audio, list_available_audio_langs, filter_custom_audio, downloadable_audio)
- """
+ return selected_video, available_resolutions, filter_custom_resolution, downloadable_video
+
+ def select_audio(self, preferred_audio_langs: Optional[List[str]] = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
+ """Select audio representation based on language preference"""
audio_reps = self.get_audios()
+ available_langs = [rep['language'] for rep in audio_reps if rep['language']]
+
+ # Use parameter or global config
+ preferred_langs = preferred_audio_langs or DOWNLOAD_SPECIFIC_AUDIO
- # Include all languages (including generated ones like aud_XXX)
- list_available_audio_langs = [rep['language'] for rep in audio_reps]
-
selected_audio = None
filter_custom_audio = "First"
-
- if preferred_audio_langs:
- # Search for the first available language in order of preference
- for lang in preferred_audio_langs:
+
+ if preferred_langs:
+ for lang in preferred_langs:
for rep in audio_reps:
if rep['language'] and rep['language'].lower() == lang.lower():
selected_audio = rep
@@ -670,10 +943,90 @@ def select_audio(self, preferred_audio_langs=None):
break
if selected_audio:
break
- if not selected_audio:
- selected_audio = self.get_best_audio()
- else:
+
+ if not selected_audio:
selected_audio = self.get_best_audio()
-
+
downloadable_audio = selected_audio['language'] if selected_audio else "N/A"
- return selected_audio, list_available_audio_langs, filter_custom_audio, downloadable_audio
\ No newline at end of file
+ return selected_audio, available_langs, filter_custom_audio, downloadable_audio
+
+ def print_tracks_table(self, selected_video: Optional[Dict[str, Any]] = None, selected_audio: Optional[Dict[str, Any]] = None, selected_subs: list = None) -> None:
+ """Print tracks table"""
+ if self.table_printer:
+ self.table_printer.print_table(self.representations, selected_video, selected_audio, selected_subs)
+
+ def save_mpd(self, output_path: str) -> None:
+ """Save raw MPD manifest"""
+ if self.mpd_content is None:
+ raise ValueError("MPD content not available. Call parse() first.")
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'wb') as f:
+ f.write(self.mpd_content)
+
+ logging.info(f"MPD manifest saved to: {output_file}")
+
+ def save_best_video_json(self, output_path: str) -> None:
+ """Save best video representation as JSON"""
+ best_video = self.get_best_video()
+ if best_video is None:
+ raise ValueError("No video representation available.")
+
+ video_json = dict(best_video)
+ video_json["stream_type"] = "dash"
+ video_json["init_url_type"] = FileTypeDetector.infer_url_type(video_json.get("init_url"))
+ video_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(video_json.get("segment_urls"))
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(video_json, f, indent=2, ensure_ascii=False)
+
+ logging.info(f"Best video JSON saved to: {output_file}")
+
+ def save_best_audio_json(self, output_path: str) -> None:
+ """Save best audio representation as JSON"""
+ best_audio = self.get_best_audio()
+ if best_audio is None:
+ raise ValueError("No audio representation available.")
+
+ audio_json = dict(best_audio)
+ audio_json["stream_type"] = "dash"
+ audio_json["init_url_type"] = FileTypeDetector.infer_url_type(audio_json.get("init_url"))
+ audio_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(audio_json.get("segment_urls"))
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(audio_json, f, indent=2, ensure_ascii=False)
+
+ logging.info(f"Best audio JSON saved to: {output_file}")
+
+ def _auto_save_files(self) -> None:
+ """Auto-save MPD files to tmp directory"""
+ if not self.save_dir:
+ return
+
+ try:
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ self.save_dir.mkdir(parents=True, exist_ok=True)
+
+ # Save MPD manifest
+ mpd_path = self.save_dir / f"manifest_{timestamp}.mpd"
+ self.save_mpd(str(mpd_path))
+
+ # Save JSON files
+ if self.get_best_video():
+ video_path = self.save_dir / f"best_video_{timestamp}.json"
+ self.save_best_video_json(str(video_path))
+
+ if self.get_best_audio():
+ audio_path = self.save_dir / f"best_audio_{timestamp}.json"
+ self.save_best_audio_json(str(audio_path))
+
+ except Exception as e:
+ console.print(f"[red]Error during auto-save: {e}")
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/segments.py b/StreamingCommunity/Lib/DASH/segments.py
index d3c908ad9..48f5e0d40 100644
--- a/StreamingCommunity/Lib/DASH/segments.py
+++ b/StreamingCommunity/Lib/DASH/segments.py
@@ -4,6 +4,8 @@
import asyncio
import time
from typing import Dict, Optional
+from urllib.parse import urlparse
+from pathlib import Path
# External libraries
@@ -18,6 +20,10 @@
from StreamingCommunity.Util import config_manager, Colors
+# DASH single-file MP4 support
+from ..MP4 import MP4_Downloader
+
+
# Config
REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry')
DEFAULT_VIDEO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_video_workers')
@@ -33,7 +39,7 @@
class MPD_Segments:
- def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limit_segments: int = None):
+ def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limit_segments: int = None, custom_headers: Optional[Dict[str, str]] = None):
"""
Initialize MPD_Segments with temp folder, representation, optional pssh, and segment limit.
@@ -46,7 +52,8 @@ def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limi
self.tmp_folder = tmp_folder
self.selected_representation = representation
self.pssh = pssh
-
+ self.custom_headers = custom_headers or {}
+
# Use LIMIT_SEGMENT from config if limit_segments is not specified or is 0
if limit_segments is None or limit_segments == 0:
self.limit_segments = LIMIT_SEGMENT if LIMIT_SEGMENT > 0 else None
@@ -73,17 +80,42 @@ def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limi
# Estimator for progress tracking
self.estimator: Optional[M3U8_Ts_Estimator] = None
+ @staticmethod
+ def _infer_url_ext(url: Optional[str]) -> Optional[str]:
+ """Return lowercased extension without dot from URL path (ignores query/fragment)."""
+ path = urlparse(url).path or ""
+ ext = Path(path).suffix
+ return ext.lstrip(".").lower() if ext else None
+
+ def _get_segment_url_type(self) -> Optional[str]:
+ """Prefer representation field, otherwise infer from first segment URL."""
+ rep = self.selected_representation or {}
+ t = (rep.get("segment_url_type") or "").strip().lower()
+ if t:
+ return t
+ urls = rep.get("segment_urls") or []
+ return self._infer_url_ext(urls[0]) if urls else None
+
+ def _merged_headers(self) -> Dict[str, str]:
+ """Ensure UA exists while keeping caller-provided headers."""
+ h = dict(self.custom_headers or {})
+ h.setdefault("User-Agent", get_userAgent())
+ return h
+
def get_concat_path(self, output_dir: str = None):
"""
Get the path for the concatenated output file.
"""
rep_id = self.selected_representation['id']
- return os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.m4s")
+ ext = "mp4" if (self._get_segment_url_type() == "mp4") else "m4s"
+ return os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.{ext}")
def get_segments_count(self) -> int:
"""
Returns the total number of segments available in the representation.
"""
+ if self._get_segment_url_type() == "mp4":
+ return 1
return len(self.selected_representation.get('segment_urls', []))
def download_streams(self, output_dir: str = None, description: str = "DASH"):
@@ -95,27 +127,72 @@ def download_streams(self, output_dir: str = None, description: str = "DASH"):
- description (str): Description for progress bar (e.g., "Video", "Audio Italian")
"""
concat_path = self.get_concat_path(output_dir)
+ seg_type = (self._get_segment_url_type() or "").lower()
+
+ # Single-file MP4: download directly (no init/segment concat)
+ if seg_type == "mp4":
+ rep = self.selected_representation
+ url = (rep.get("segment_urls") or [None])[0] or rep.get("init_url")
+ if not url:
+ return {
+ "type": description,
+ "nFailed": 1,
+ "stopped": False,
+ "concat_path": concat_path,
+ "representation_id": rep.get("id"),
+ "pssh": self.pssh,
+ }
+
+ os.makedirs(output_dir or self.tmp_folder, exist_ok=True)
+ try:
+ downloaded_file, kill = MP4_Downloader(
+ url=url,
+ path=concat_path,
+ headers_=self._merged_headers(),
+ show_final_info=False
+ )
+ self.download_interrupted = bool(kill)
+ return {
+ "type": description,
+ "nFailed": 0,
+ "stopped": bool(kill),
+ "concat_path": downloaded_file or concat_path,
+ "representation_id": rep.get("id"),
+ "pssh": self.pssh,
+ }
+
+ except KeyboardInterrupt:
+ self.download_interrupted = True
+ console.print("\n[red]Download interrupted by user (Ctrl+C).")
+ return {
+ "type": description,
+ "nFailed": 1,
+ "stopped": True,
+ "concat_path": concat_path,
+ "representation_id": rep.get("id"),
+ "pssh": self.pssh,
+ }
# Apply segment limit if specified
if self.limit_segments is not None:
orig_count = len(self.selected_representation.get('segment_urls', []))
if orig_count > self.limit_segments:
-
- # Limit segment URLs
self.selected_representation['segment_urls'] = self.selected_representation['segment_urls'][:self.limit_segments]
# Run async download in sync mode
try:
- asyncio.run(self.download_segments(output_dir=output_dir, description=description))
+ res = asyncio.run(self.download_segments(output_dir=output_dir, description=description))
except KeyboardInterrupt:
self.download_interrupted = True
console.print("\n[red]Download interrupted by user (Ctrl+C).")
+ res = {"type": description, "nFailed": 0, "stopped": True}
return {
+ **(res or {}),
"concat_path": concat_path,
- "representation_id": self.selected_representation['id'],
- "pssh": self.pssh
+ "representation_id": self.selected_representation.get("id"),
+ "pssh": self.pssh,
}
async def download_segments(self, output_dir: str = None, concurrent_downloads: int = None, description: str = "DASH"):
@@ -133,8 +210,9 @@ async def download_segments(self, output_dir: str = None, concurrent_downloads:
init_url = rep.get('init_url')
os.makedirs(output_dir or self.tmp_folder, exist_ok=True)
- concat_path = os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.m4s")
-
+ concat_path = self.get_concat_path(output_dir)
+
+ # Temp directory for individual .m4s segment files (needed for concat flow)
temp_dir = os.path.join(output_dir or self.tmp_folder, f"{rep_id}_segments")
os.makedirs(temp_dir, exist_ok=True)
@@ -204,9 +282,9 @@ async def _download_init_segment(self, client, init_url, concat_path, progress_b
with open(concat_path, 'wb') as outfile:
pass
return
-
+
try:
- headers = {'User-Agent': get_userAgent()}
+ headers = self._merged_headers()
response = await client.get(init_url, headers=headers, follow_redirects=True)
with open(concat_path, 'wb') as outfile:
@@ -239,7 +317,7 @@ async def _download_segments_batch(self, client, segment_urls, temp_dir, semapho
"""
async def download_single(url, idx):
async with semaphore:
- headers = {'User-Agent': get_userAgent()}
+ headers = self._merged_headers()
temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
for attempt in range(max_retry):
@@ -317,7 +395,7 @@ async def _retry_failed_segments(self, client, segment_urls, temp_dir, semaphore
async def download_single(url, idx):
async with semaphore:
- headers = {'User-Agent': get_userAgent()}
+ headers = self._merged_headers()
temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
for attempt in range(max_retry):
diff --git a/StreamingCommunity/Lib/FFmpeg/util.py b/StreamingCommunity/Lib/FFmpeg/util.py
index d782abf54..5700be361 100644
--- a/StreamingCommunity/Lib/FFmpeg/util.py
+++ b/StreamingCommunity/Lib/FFmpeg/util.py
@@ -166,6 +166,7 @@ def is_png_format_or_codec(file_info):
format_name = file_info.get('format_name')
codec_names = file_info.get('codec_names', [])
+ console.log(f"[cyan]FFMPEG detect format: [green]{format_name}[cyan], codec: [green]{codec_names}")
return format_name == 'png_pipe' or 'png' in codec_names
diff --git a/StreamingCommunity/Lib/HLS/downloader.py b/StreamingCommunity/Lib/HLS/downloader.py
index 4a81baeb7..bdabdfd5a 100644
--- a/StreamingCommunity/Lib/HLS/downloader.py
+++ b/StreamingCommunity/Lib/HLS/downloader.py
@@ -164,10 +164,9 @@ def select_streams(self):
self.sub_streams = []
else:
- # Video selection logic
- if str(FILTER_CUSTOM_RESOLUTION) == "best":
+ if str(FILTER_CUSTOM_RESOLUTION).strip().lower() == "best":
self.video_url, self.video_res = self.parser._video.get_best_uri()
- elif str(FILTER_CUSTOM_RESOLUTION) == "worst":
+ elif str(FILTER_CUSTOM_RESOLUTION).strip().lower() == "worst":
self.video_url, self.video_res = self.parser._video.get_worst_uri()
elif str(FILTER_CUSTOM_RESOLUTION).replace("p", "").replace("px", "").isdigit():
resolution_value = int(str(FILTER_CUSTOM_RESOLUTION).replace("p", "").replace("px", ""))
@@ -399,6 +398,8 @@ def download_subtitle(self, sub: Dict) -> bool:
try:
raw_content = self.client.request(sub['uri'])
if raw_content:
+
+ console.log(f"[cyan]Downloading subtitle[white]: [red]{sub['language']} (vtt)")
sub_path = os.path.join(self.temp_dir, 'subs', f"{sub['language']}.vtt")
subtitle_parser = M3U8_Parser()
diff --git a/StreamingCommunity/Lib/HLS/segments.py b/StreamingCommunity/Lib/HLS/segments.py
index bfa3331a0..964135166 100644
--- a/StreamingCommunity/Lib/HLS/segments.py
+++ b/StreamingCommunity/Lib/HLS/segments.py
@@ -165,7 +165,8 @@ def get_type_stream(self, segments) -> str:
elif self.is_stream_aac:
return "aac"
else:
- return None
+ console.log("[yellow]Warning: Unable to determine stream type.")
+ return "ts" # Default to ts
def get_info(self) -> None:
"""
@@ -275,7 +276,7 @@ async def _download_single_segment(self, client: httpx.AsyncClient, ts_url: str,
except Exception:
if attempt + 1 == max_retry:
- console.print(f" -- [red]Final retry failed for segment: {index}")
+ console.print(f" -- [red]Failed request for segment: {index}")
return index, False, max_retry, 0
sleep_time = 0.5 + attempt * 0.5 if attempt < 2 else min(3.0, 1.02 ** attempt)
@@ -437,9 +438,6 @@ async def download_segments_async(self, description: str, type: str):
finally:
self._cleanup_resources(temp_dir, progress_bar)
- if not self.download_interrupted:
- self._verify_download_completion()
-
return self._generate_results(type)
else:
@@ -524,13 +522,6 @@ def _generate_results(self, stream_type: str, output_path: str = None) -> Dict:
'output_path': output_path if output_path else self.final_output_path
}
- def _verify_download_completion(self) -> None:
- """Validate final download integrity."""
- total = len(self.segments)
- if len(self.downloaded_segments) / total < 0.999:
- missing = sorted(set(range(total)) - self.downloaded_segments)
- raise RuntimeError(f"Download incomplete ({len(self.downloaded_segments)/total:.1%}). Missing segments: {missing}")
-
def _cleanup_resources(self, temp_dir: str, progress_bar: tqdm) -> None:
"""Ensure resource cleanup and final reporting."""
progress_bar.close()
@@ -550,7 +541,7 @@ def _cleanup_resources(self, temp_dir: str, progress_bar: tqdm) -> None:
def _display_error_summary(self) -> None:
"""Generate final error report."""
- console.print(f" [cyan]Max retries: [red]{self.info_maxRetry} [white] | "
+ console.log(f"[cyan]Max retries: [red]{self.info_maxRetry} [white] | "
f"[cyan]Total retries: [red]{self.info_nRetry} [white] | "
f"[cyan]Failed segments: [red]{self.info_nFailed}")
diff --git a/StreamingCommunity/Lib/MEGA/downloader.py b/StreamingCommunity/Lib/MEGA/downloader.py
index b5bc30f42..0a1783ad4 100644
--- a/StreamingCommunity/Lib/MEGA/downloader.py
+++ b/StreamingCommunity/Lib/MEGA/downloader.py
@@ -118,7 +118,7 @@ def _download_folder_megatools(self, url, dest_path=None):
if ep_title:
ep_display += f" - {ep_title}"
- console.print(f"\n[cyan]Download: [yellow]{show} [magenta]{ep_display}[/magenta]\n")
+ console.print(f"\n[cyan]Download: [yellow]{show} [magenta]{ep_display}\n")
process.wait()
if process.returncode != 0:
diff --git a/StreamingCommunity/Lib/__init__.py b/StreamingCommunity/Lib/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/StreamingCommunity/Upload/__init__.py b/StreamingCommunity/Upload/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py
index 83d1e5a20..527281c5f 100644
--- a/StreamingCommunity/Util/config_json.py
+++ b/StreamingCommunity/Util/config_json.py
@@ -5,7 +5,7 @@
import json
import logging
import requests
-from typing import Any, List
+from typing import Any, List, Dict
# External library
@@ -19,23 +19,20 @@
class ConfigManager:
def __init__(self, file_name: str = 'config.json') -> None:
"""
- Initialize the ConfigManager.
+ Initialize the ConfigManager with caching.
Args:
file_name (str, optional): Configuration file name. Default: 'config.json'.
"""
- # Determine the base path - use the current working directory
+ self.base_path = None
if getattr(sys, 'frozen', False):
- # If the application is frozen (e.g., PyInstaller)
- base_path = os.path.dirname(sys.executable)
-
+ self.base_path = os.path.dirname(sys.executable) # PyInstaller
else:
- # Use the current working directory where the script is executed
- base_path = os.getcwd()
+ self.base_path = os.getcwd()
# Initialize file paths
- self.file_path = os.path.join(base_path, file_name)
- self.domains_path = os.path.join(base_path, 'domains.json')
+ self.file_path = os.path.join(self.base_path, file_name)
+ self.domains_path = os.path.join(self.base_path, 'domains.json')
# Display the actual file path for debugging
console.print(f"[cyan]Config path: [green]{self.file_path}")
@@ -46,8 +43,11 @@ def __init__(self, file_name: str = 'config.json') -> None:
# Initialize data structures
self.config = {}
self.configSite = {}
- self.cache = {}
-
+
+ # Enhanced caching system
+ self.cache: Dict[str, Any] = {}
+ self._cache_enabled = True
+
# Load the configuration
self.fetch_domain_online = True
self.load_config()
@@ -64,6 +64,9 @@ def load_config(self) -> None:
with open(self.file_path, 'r') as f:
self.config = json.load(f)
+ # Pre-cache commonly used configuration values
+ self._precache_common_configs()
+
# Update settings from the configuration
self._update_settings_from_config()
@@ -78,6 +81,38 @@ def load_config(self) -> None:
console.print(f"[red]Error loading configuration: {str(e)}")
self._handle_config_error()
+ def _precache_common_configs(self) -> None:
+ common_keys = [
+ ('DEFAULT', 'debug', bool),
+ ('M3U8_CONVERSION', 'use_gpu', bool),
+ ('M3U8_CONVERSION', 'param_video', str),
+ ('M3U8_CONVERSION', 'param_audio', str),
+ ('M3U8_CONVERSION', 'param_final', str),
+ ('M3U8_DOWNLOAD', 'cleanup_tmp_folder', bool),
+ ('M3U8_DOWNLOAD', 'default_video_workers', int),
+ ('M3U8_DOWNLOAD', 'default_audio_workers', int),
+ ('M3U8_DOWNLOAD', 'segment_timeout', int),
+ ('M3U8_DOWNLOAD', 'enable_retry', bool),
+ ('M3U8_DOWNLOAD', 'merge_subs', bool),
+ ('REQUESTS', 'verify', bool),
+ ('REQUESTS', 'timeout', int),
+ ('REQUESTS', 'max_retry', int)
+ ]
+
+ cached_count = 0
+ for section, key, data_type in common_keys:
+ try:
+ cache_key = f"config.{section}.{key}"
+
+ if section in self.config and key in self.config[section]:
+ value = self.config[section][key]
+ converted_value = self._convert_to_data_type(value, data_type)
+ self.cache[cache_key] = converted_value
+ cached_count += 1
+
+ except Exception as e:
+ logging.warning(f"Failed to precache {section}.{key}: {e}")
+
def _handle_config_error(self) -> None:
"""Handle configuration errors by downloading the reference version."""
console.print("[yellow]Attempting to retrieve reference configuration...")
@@ -87,8 +122,12 @@ def _handle_config_error(self) -> None:
try:
with open(self.file_path, 'r') as f:
self.config = json.load(f)
+
+ # Pre-cache after reload
+ self._precache_common_configs()
self._update_settings_from_config()
console.print("[green]Reference configuration loaded successfully")
+
except Exception as e:
console.print(f"[red]Critical configuration error: {str(e)}")
console.print("[red]Unable to proceed. The application will terminate.")
@@ -131,7 +170,7 @@ def _load_site_data(self) -> None:
def _load_site_data_online(self) -> None:
"""Load site data from GitHub and update local domains.json file."""
- domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
+ domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/workflows/script/domains.json"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}
@@ -159,32 +198,20 @@ def _load_site_data_online(self) -> None:
def _save_domains_to_appropriate_location(self) -> None:
"""Save domains to the appropriate location based on existing files."""
- if getattr(sys, 'frozen', False):
- # If the application is frozen (e.g., PyInstaller)
- base_path = os.path.dirname(sys.executable)
- else:
- # Use the current working directory where the script is executed
- base_path = os.getcwd()
-
- # Check for GitHub structure first
- github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+ github_domains_path = os.path.join(self.base_path, '.github', 'workflows', 'script', 'domains.json')
console.print(f"[cyan]Domain path: [green]{github_domains_path}")
try:
if os.path.exists(github_domains_path):
-
- # Update existing GitHub structure file
with open(github_domains_path, 'w', encoding='utf-8') as f:
json.dump(self.configSite, f, indent=4, ensure_ascii=False)
elif not os.path.exists(self.domains_path):
- # Save to root only if it doesn't exist and GitHub structure doesn't exist
with open(self.domains_path, 'w', encoding='utf-8') as f:
json.dump(self.configSite, f, indent=4, ensure_ascii=False)
console.print(f"[green]Domains saved to: {self.domains_path}")
else:
- # Root file exists, don't overwrite it
console.print(f"[yellow]Local domains.json already exists, not overwriting: {self.domains_path}")
console.print("[yellow]Tip: Delete the file if you want to recreate it from GitHub")
@@ -203,18 +230,7 @@ def _save_domains_to_appropriate_location(self) -> None:
def _load_site_data_from_file(self) -> None:
"""Load site data from local domains.json file."""
try:
- # Determine the base path
- if getattr(sys, 'frozen', False):
-
- # If the application is frozen (e.g., PyInstaller)
- base_path = os.path.dirname(sys.executable)
- else:
-
- # Use the current working directory where the script is executed
- base_path = os.getcwd()
-
- # Check for GitHub structure first
- github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+ github_domains_path = os.path.join(self.base_path, '.github', 'workflows', 'script', 'domains.json')
if os.path.exists(github_domains_path):
console.print(f"[cyan]Domain path: [green]{github_domains_path}")
@@ -243,17 +259,7 @@ def _load_site_data_from_file(self) -> None:
def _handle_site_data_fallback(self) -> None:
"""Handle site data fallback in case of error."""
- # Determine the base path
- if getattr(sys, 'frozen', False):
-
- # If the application is frozen (e.g., PyInstaller)
- base_path = os.path.dirname(sys.executable)
- else:
- # Use the current working directory where the script is executed
- base_path = os.getcwd()
-
- # Check for GitHub structure first
- github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+ github_domains_path = os.path.join(self.base_path, '.github', 'workflows', 'script', 'domains.json')
if os.path.exists(github_domains_path):
console.print("[yellow]Attempting fallback to GitHub structure domains.json file...")
@@ -280,7 +286,7 @@ def _handle_site_data_fallback(self) -> None:
def get(self, section: str, key: str, data_type: type = str, from_site: bool = False, default: Any = None) -> Any:
"""
- Read a value from the configuration.
+ Read a value from the configuration with caching.
Args:
section (str): Section in the configuration
@@ -293,12 +299,14 @@ def get(self, section: str, key: str, data_type: type = str, from_site: bool = F
Any: The key value converted to the specified data type, or default if not found
"""
cache_key = f"{'site' if from_site else 'config'}.{section}.{key}"
- logging.info(f"Reading key: {cache_key}")
# Check if the value is in the cache
- if cache_key in self.cache:
+ if self._cache_enabled and cache_key in self.cache:
return self.cache[cache_key]
+ # Log only if not in cache
+ logging.info(f"Reading key: {cache_key}")
+
# Choose the appropriate source
config_source = self.configSite if from_site else self.config
@@ -320,7 +328,8 @@ def get(self, section: str, key: str, data_type: type = str, from_site: bool = F
converted_value = self._convert_to_data_type(value, data_type)
# Save in cache
- self.cache[cache_key] = converted_value
+ if self._cache_enabled:
+ self.cache[cache_key] = converted_value
return converted_value
@@ -361,6 +370,7 @@ def _convert_to_data_type(self, value: Any, data_type: type) -> Any:
raise ValueError(f"Cannot convert {type(value).__name__} to dict")
else:
return value
+
except Exception as e:
logging.error(f"Error converting: {data_type.__name__} to value '{value}' with error: {e}")
raise ValueError(f"Error converting: {data_type.__name__} to value '{value}' with error: {e}")
@@ -393,7 +403,7 @@ def get_site(self, section: str, key: str) -> Any:
def set_key(self, section: str, key: str, value: Any, to_site: bool = False) -> None:
"""
- Set a key in the configuration.
+ Set a key in the configuration and update cache.
Args:
section (str): Section in the configuration
diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py
index 193663ad8..4fa583bd5 100644
--- a/StreamingCommunity/Util/os.py
+++ b/StreamingCommunity/Util/os.py
@@ -4,7 +4,6 @@
import shutil
import logging
import platform
-import inspect
# External library
@@ -252,29 +251,6 @@ def _display_binary_paths(self):
os_summary = OsSummary()
-def get_call_stack():
- """Retrieves the current call stack with details about each call."""
- stack = inspect.stack()
- call_stack = []
-
- for frame_info in stack:
- function_name = frame_info.function
- filename = frame_info.filename
- lineno = frame_info.lineno
- folder_name = os.path.dirname(filename)
- folder_base = os.path.basename(folder_name)
- script_name = os.path.basename(filename)
-
- call_stack.append({
- "function": function_name,
- "folder": folder_name,
- "folder_base": folder_base,
- "script": script_name,
- "line": lineno
- })
-
- return call_stack
-
def get_ffmpeg_path():
"""Returns the path of FFmpeg."""
return os_summary.ffmpeg_path
diff --git a/StreamingCommunity/Util/table.py b/StreamingCommunity/Util/table.py
index c9b05e69b..2bab54f98 100644
--- a/StreamingCommunity/Util/table.py
+++ b/StreamingCommunity/Util/table.py
@@ -1,10 +1,7 @@
# 03.03.24
-import os
import sys
import logging
-import importlib
-from pathlib import Path
from typing import Dict, List, Any
@@ -16,9 +13,7 @@
# Internal utilities
-from .os import get_call_stack
from .message import start_message
-from StreamingCommunity.Api.Template.loader import folder_name as lazy_loader_folder
@@ -105,43 +100,6 @@ def display_data(self, data_slice: List[Dict[str, Any]]) -> None:
table.add_row(*row_data, style=style)
self.console.print(table)
-
- @staticmethod
- def run_back_command(research_func: dict) -> None:
- """
- Executes a back-end search command by dynamically importing a module and invoking its search function.
-
- Args:
- research_func (dict): A dictionary containing:
- - 'folder' (str): The absolute path to the directory containing the module to be executed.
- """
- try:
- # Get site name from folder
- site_name = Path(research_func['folder']).name
-
- # Find the project root directory
- current_path = research_func['folder']
- while not os.path.exists(os.path.join(current_path, 'StreamingCommunity')):
- current_path = os.path.dirname(current_path)
-
- project_root = current_path
- if project_root not in sys.path:
- sys.path.insert(0, project_root)
-
- # Import using full absolute import
- module_path = f'StreamingCommunity.Api.{lazy_loader_folder}.{site_name}'
- module = importlib.import_module(module_path)
-
- # Get and call the search function
- search_func = getattr(module, 'search')
- search_func(None)
-
- except Exception:
- logging.error("Error during search execution")
-
- finally:
- if project_root in sys.path:
- sys.path.remove(project_root)
def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
"""
@@ -179,15 +137,9 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
if result_func == 404:
sys.exit(1)
- # Get research function from call stack
- research_func = next((
- f for f in get_call_stack()
- if f['function'] == 'search' and f['script'] == '__init__.py'
- ), None)
-
# Handle pagination and user input
if self.slice_end < total_items:
- self.console.print("\n[green]Press [red]Enter [green]for next page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
+ self.console.print("\n[green]Press [red]Enter [green]for next page, [red]'q' [green]to quit.")
if not force_int_input:
prompt_msg = ("\n[cyan]Insert media index [yellow](e.g., 1), [red]* [cyan]to download all media, [yellow](e.g., 1-2) [cyan]for a range of media, or [yellow](e.g., 3-*) [cyan]to download from a specific index to the end")
@@ -195,7 +147,7 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
else:
# Include empty string in choices to allow pagination with Enter key
- choices = [""] + [str(i) for i in range(max_int_input + 1)] + ["q", "quit", "b", "back"]
+ choices = [""] + [str(i) for i in range(max_int_input + 1)] + ["q", "quit"]
prompt_msg = "[cyan]Insert media [red]index"
key = Prompt.ask(prompt_msg, choices=choices, show_choices=False)
@@ -203,28 +155,24 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
if key.lower() in ["q", "quit"]:
break
-
elif key == "":
self.slice_start += self.step
self.slice_end += self.step
if self.slice_end > total_items:
self.slice_end = total_items
-
- elif (key.lower() in ["b", "back"]) and research_func:
- TVShowManager.run_back_command(research_func)
else:
break
else:
# Last page handling
- self.console.print("\n[green]You've reached the end. [red]Enter [green]for first page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
+ self.console.print("\n[green]You've reached the end. [red]Enter [green]for first page, [red]'q' [green]to quit.")
if not force_int_input:
prompt_msg = ("\n[cyan]Insert media index [yellow](e.g., 1), [red]* [cyan]to download all media, [yellow](e.g., 1-2) [cyan]for a range of media, or [yellow](e.g., 3-*) [cyan]to download from a specific index to the end")
key = Prompt.ask(prompt_msg)
else:
# Include empty string in choices to allow pagination with Enter key
- choices = [""] + [str(i) for i in range(max_int_input + 1)] + ["q", "quit", "b", "back"]
+ choices = [""] + [str(i) for i in range(max_int_input + 1)] + ["q", "quit"]
prompt_msg = "[cyan]Insert media [red]index"
key = Prompt.ask(prompt_msg, choices=choices, show_choices=False)
@@ -236,9 +184,6 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
elif key == "":
self.slice_start = 0
self.slice_end = self.step
-
- elif (key.lower() in ["b", "back"]) and research_func:
- TVShowManager.run_back_command(research_func)
else:
break
diff --git a/StreamingCommunity/__init__.py b/StreamingCommunity/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/config.json b/config.json
index 702b4b936..c86e2b86e 100644
--- a/config.json
+++ b/config.json
@@ -2,7 +2,7 @@
"DEFAULT": {
"debug": false,
"show_message": true,
- "fetch_domain_online": true
+ "fetch_domain_online": false
},
"OUT_FOLDER": {
"root_path": "Video",
@@ -23,7 +23,7 @@
"merge_subs": true,
"specific_list_subtitles": [
"ita",
- "eng"
+ "it-IT"
],
"limit_segment": 0,
"cleanup_tmp_folder": true,
diff --git a/dockerfile b/dockerfile
index c48a44e36..58898133e 100644
--- a/dockerfile
+++ b/dockerfile
@@ -8,6 +8,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
+RUN groupadd -r appuser && \
+ useradd -r -g appuser -u 1000 -m -d /home/appuser -s /bin/bash appuser
+
WORKDIR /app
COPY requirements.txt ./
@@ -16,10 +19,19 @@ RUN pip install --no-cache-dir -r requirements.txt
COPY GUI/requirements.txt ./GUI/requirements.txt
RUN pip install --no-cache-dir -r GUI/requirements.txt
-COPY . .
+COPY . .
+
+RUN mkdir -p /app/Video /app/logs /app/data \
+ /home/appuser/.local/bin/binary \
+ /home/appuser/.config && \
+ chown -R appuser:appuser /app /home/appuser && \
+ chmod -R 755 /app /home/appuser
+
+USER appuser
-ENV PYTHONPATH="/app:${PYTHONPATH}"
+ENV PYTHONPATH="/app: ${PYTHONPATH}" \
+ HOME=/home/appuser
EXPOSE 8000
-CMD ["python", "GUI/manage.py", "runserver", "0.0.0.0:8000"]
\ No newline at end of file
+CMD ["python", "GUI/manage.py", "runserver", "0.0.0.0:8000"]
diff --git a/requirements.txt b/requirements.txt
index 46c02d212..ca7d202c0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,9 @@ httpx
bs4
rich
tqdm
-m3u8
+m3u8
+lxml
+isodate
psutil
unidecode
curl_cffi
diff --git a/setup.py b/setup.py
index 7e97cb043..5c78b7ca5 100644
--- a/setup.py
+++ b/setup.py
@@ -2,12 +2,20 @@
import re
from setuptools import setup, find_packages
+# Variable
+base_path = os.path.abspath(os.path.dirname(__file__))
+
def read_readme():
- with open("README.md", "r", encoding="utf-8") as fh:
- return fh.read()
-
-with open(os.path.join(os.path.dirname(__file__), "requirements.txt"), "r", encoding="utf-8-sig") as f:
- required_packages = f.read().splitlines()
+ readme_path = os.path.join(base_path, "README.md")
+ if os.path.exists(readme_path):
+ with open(readme_path, "r", encoding="utf-8") as fh:
+ return fh.read()
+
+def read_requirements():
+ req_path = os.path.join(base_path, "requirements.txt")
+ if os.path.exists(req_path):
+ with open(req_path, "r", encoding="utf-8-sig") as f:
+ return [line.strip() for line in f if line.strip() and not line.startswith("#")]
def get_version():
try:
@@ -25,20 +33,38 @@ def get_version():
setup(
name="StreamingCommunity",
version=get_version(),
+ description="Download content from streaming platforms",
long_description=read_readme(),
long_description_content_type="text/markdown",
author="Arrowar",
url="https://github.com/Arrowar/StreamingCommunity",
- packages=find_packages(include=["StreamingCommunity", "StreamingCommunity.*"]),
- install_requires=required_packages,
+ packages=find_packages(
+ exclude=["tests", "tests.*", "docs", "docs.*", "GUI", "GUI.*", "Test", "Test.*"]
+ ),
+ install_requires=read_requirements(),
python_requires='>=3.8',
entry_points={
"console_scripts": [
"streamingcommunity=StreamingCommunity.run:main",
],
},
- include_package_data=True,
+ include_package_data=False,
+ package_data={
+ '': ['*.txt', '*.md', '*.json', '*.yaml', '*.yml', '*.cfg'],
+ 'StreamingCommunity': ['**/*.txt', '**/*.json', '**/*.yaml'],
+ },
keywords="streaming community",
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: End Users/Desktop",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Operating System :: OS Independent",
+ ],
project_urls={
"Bug Reports": "https://github.com/Arrowar/StreamingCommunity/issues",
"Source": "https://github.com/Arrowar/StreamingCommunity",
diff --git a/update.py b/update.py
index b7a1ceae5..05dd3de98 100644
--- a/update.py
+++ b/update.py
@@ -171,7 +171,7 @@ def print_commit_info(commit_info: dict):
# Add stats if available
if 'stats' in commit_info:
stats = commit_info['stats']
- table.add_row("Changes", f"+{stats['additions']} -[red]{stats['deletions']}[/red] ({stats['total']} total)")
+ table.add_row("Changes", f"+{stats['additions']} -[red]{stats['deletions']} ({stats['total']} total)")
# Add URL info
table.add_row("HTML URL", commit_info['html_url'])