Skip to content

Commit 8ea1c13

Browse files
authored
[script.module.youtube.dl@matrix] 23.04.01+matrix.1 (#2451)
1 parent 40bfcf5 commit 8ea1c13

File tree

151 files changed

+11691
-3068
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

151 files changed

+11691
-3068
lines changed

script.module.youtube.dl/addon.xml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
2-
<addon id="script.module.youtube.dl" name="youtube-dl Control" version="21.303.0+matrix.1" provider-name="ytdl-org,ruuk,sy6sy2,wwark">
2+
<addon id="script.module.youtube.dl" name="youtube-dl Control" version="23.04.01+matrix.1" provider-name="ytdl-org,ruuk,sy6sy2,wwark">
33
<requires>
44
<import addon="xbmc.python" version="3.0.0"/>
55
<import addon="script.module.addon.signals" version="0.0.5+matrix.1"/>
@@ -19,7 +19,7 @@
1919
<description lang="es_ES">Módulo que proporciona acceso a YouTube-dl para la extracción de flujos de video de cientos de sitios. La versión está basada en la versión de fecha youtube-dl: YY.MDD.V donde V es la subversión específica del complemento. También permite descargas con la opción de descarga en segundo plano con una cola y un administrador de colas.</description>
2020
<license>LGPL-2.1-only</license>
2121
<forum>https://forum.kodi.tv/showthread.php?tid=200877</forum>
22-
<source>https://github.com/Catch-up-TV-and-More/script.module.youtube.dl</source>
22+
<source>https://github.com/xbmc/repo-scripts/tree/matrix/script.module.youtube.dl</source>
2323
<assets>
2424
<icon>icon.png</icon>
2525
</assets>

script.module.youtube.dl/lib/youtube_dl/YoutubeDL.py

Lines changed: 74 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
compat_str,
4040
compat_tokenize_tokenize,
4141
compat_urllib_error,
42+
compat_urllib_parse,
4243
compat_urllib_request,
4344
compat_urllib_request_DataHandler,
4445
)
@@ -60,6 +61,7 @@
6061
format_bytes,
6162
formatSeconds,
6263
GeoRestrictedError,
64+
HEADRequest,
6365
int_or_none,
6466
ISO3166Utils,
6567
locked_file,
@@ -73,6 +75,8 @@
7375
PostProcessingError,
7476
preferredencoding,
7577
prepend_extension,
78+
process_communicate_or_kill,
79+
PUTRequest,
7680
register_socks_protocols,
7781
render_table,
7882
replace_extension,
@@ -720,7 +724,7 @@ def prepare_filename(self, info_dict):
720724
filename = encodeFilename(filename, True).decode(preferredencoding())
721725
return sanitize_path(filename)
722726
except ValueError as err:
723-
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
727+
self.report_error('Error in output template: ' + error_to_compat_str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
724728
return None
725729

726730
def _match_entry(self, info_dict, incomplete):
@@ -773,11 +777,20 @@ def add_extra_info(info_dict, extra_info):
773777

774778
def extract_info(self, url, download=True, ie_key=None, extra_info={},
775779
process=True, force_generic_extractor=False):
776-
'''
777-
Returns a list with a dictionary for each video we find.
778-
If 'download', also downloads the videos.
779-
extra_info is a dict containing the extra values to add to each result
780-
'''
780+
"""
781+
Return a list with a dictionary for each video extracted.
782+
783+
Arguments:
784+
url -- URL to extract
785+
786+
Keyword arguments:
787+
download -- whether to download videos during extraction
788+
ie_key -- extractor key hint
789+
extra_info -- dictionary containing the extra values to add to each result
790+
process -- whether to resolve all unresolved references (URLs, playlist items),
791+
must be True for download to work.
792+
force_generic_extractor -- force using the generic extractor
793+
"""
781794

782795
if not ie_key and force_generic_extractor:
783796
ie_key = 'Generic'
@@ -1511,14 +1524,18 @@ def sanitize_numeric_fields(info):
15111524
if 'display_id' not in info_dict and 'id' in info_dict:
15121525
info_dict['display_id'] = info_dict['id']
15131526

1514-
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1515-
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
1516-
# see http://bugs.python.org/issue1646728)
1517-
try:
1518-
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1519-
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1520-
except (ValueError, OverflowError, OSError):
1521-
pass
1527+
for ts_key, date_key in (
1528+
('timestamp', 'upload_date'),
1529+
('release_timestamp', 'release_date'),
1530+
):
1531+
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1532+
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
1533+
# see http://bugs.python.org/issue1646728)
1534+
try:
1535+
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1536+
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
1537+
except (ValueError, OverflowError, OSError):
1538+
pass
15221539

15231540
# Auto generate title fields corresponding to the *_number fields when missing
15241541
# in order to always have clean titles. This is very common for TV series.
@@ -1556,9 +1573,6 @@ def sanitize_numeric_fields(info):
15561573
else:
15571574
formats = info_dict['formats']
15581575

1559-
if not formats:
1560-
raise ExtractorError('No video formats found!')
1561-
15621576
def is_wellformed(f):
15631577
url = f.get('url')
15641578
if not url:
@@ -1571,7 +1585,10 @@ def is_wellformed(f):
15711585
return True
15721586

15731587
# Filter out malformed formats for better extraction robustness
1574-
formats = list(filter(is_wellformed, formats))
1588+
formats = list(filter(is_wellformed, formats or []))
1589+
1590+
if not formats:
1591+
raise ExtractorError('No video formats found!')
15751592

15761593
formats_dict = {}
15771594

@@ -1765,10 +1782,9 @@ def process_info(self, info_dict):
17651782

17661783
assert info_dict.get('_type', 'video') == 'video'
17671784

1768-
max_downloads = self.params.get('max_downloads')
1769-
if max_downloads is not None:
1770-
if self._num_downloads >= int(max_downloads):
1771-
raise MaxDownloadsReached()
1785+
max_downloads = int_or_none(self.params.get('max_downloads')) or float('inf')
1786+
if self._num_downloads >= max_downloads:
1787+
raise MaxDownloadsReached()
17721788

17731789
# TODO: backward compatibility, to be removed
17741790
info_dict['fulltitle'] = info_dict['title']
@@ -1893,8 +1909,17 @@ def ensure_dir_exists(path):
18931909

18941910
if not self.params.get('skip_download', False):
18951911
try:
1912+
def checked_get_suitable_downloader(info_dict, params):
1913+
ed_args = params.get('external_downloader_args')
1914+
dler = get_suitable_downloader(info_dict, params)
1915+
if ed_args and not params.get('external_downloader_args'):
1916+
# external_downloader_args was cleared because external_downloader was rejected
1917+
self.report_warning('Requested external downloader cannot be used: '
1918+
'ignoring --external-downloader-args.')
1919+
return dler
1920+
18961921
def dl(name, info):
1897-
fd = get_suitable_downloader(info, self.params)(self, self.params)
1922+
fd = checked_get_suitable_downloader(info, self.params)(self, self.params)
18981923
for ph in self._progress_hooks:
18991924
fd.add_progress_hook(ph)
19001925
if self.params.get('verbose'):
@@ -2036,9 +2061,12 @@ def compatible_formats(formats):
20362061
try:
20372062
self.post_process(filename, info_dict)
20382063
except (PostProcessingError) as err:
2039-
self.report_error('postprocessing: %s' % str(err))
2064+
self.report_error('postprocessing: %s' % error_to_compat_str(err))
20402065
return
20412066
self.record_download_archive(info_dict)
2067+
# avoid possible nugatory search for further items (PR #26638)
2068+
if self._num_downloads >= max_downloads:
2069+
raise MaxDownloadsReached()
20422070

20432071
def download(self, url_list):
20442072
"""Download a given list of URLs."""
@@ -2272,6 +2300,27 @@ def urlopen(self, req):
22722300
""" Start an HTTP download """
22732301
if isinstance(req, compat_basestring):
22742302
req = sanitized_Request(req)
2303+
# an embedded /../ sequence is not automatically handled by urllib2
2304+
# see https://github.com/yt-dlp/yt-dlp/issues/3355
2305+
url = req.get_full_url()
2306+
parts = url.partition('/../')
2307+
if parts[1]:
2308+
url = compat_urllib_parse.urljoin(parts[0] + parts[1][:1], parts[1][1:] + parts[2])
2309+
if url:
2310+
# worse, URL path may have initial /../ against RFCs: work-around
2311+
# by stripping such prefixes, like eg Firefox
2312+
parts = compat_urllib_parse.urlsplit(url)
2313+
path = parts.path
2314+
while path.startswith('/../'):
2315+
path = path[3:]
2316+
url = parts._replace(path=path).geturl()
2317+
# get a new Request with the munged URL
2318+
if url != req.get_full_url():
2319+
req_type = {'HEAD': HEADRequest, 'PUT': PUTRequest}.get(
2320+
req.get_method(), compat_urllib_request.Request)
2321+
req = req_type(
2322+
url, data=req.data, headers=dict(req.header_items()),
2323+
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
22752324
return self._opener.open(req, timeout=self._socket_timeout)
22762325

22772326
def print_debug_header(self):
@@ -2301,7 +2350,7 @@ def print_debug_header(self):
23012350
['git', 'rev-parse', '--short', 'HEAD'],
23022351
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
23032352
cwd=os.path.dirname(os.path.abspath(__file__)))
2304-
out, err = sp.communicate()
2353+
out, err = process_communicate_or_kill(sp)
23052354
out = out.decode().strip()
23062355
if re.match('[0-9a-f]+', out):
23072356
self._write_string('[debug] Git HEAD: ' + out + '\n')

script.module.youtube.dl/lib/youtube_dl/aes.py

Lines changed: 36 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,18 @@
88
BLOCK_SIZE_BYTES = 16
99

1010

11+
def pkcs7_padding(data):
12+
"""
13+
PKCS#7 padding
14+
15+
@param {int[]} data cleartext
16+
@returns {int[]} padding data
17+
"""
18+
19+
remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES
20+
return data + [remaining_length] * remaining_length
21+
22+
1123
def aes_ctr_decrypt(data, key, counter):
1224
"""
1325
Decrypt with aes in counter mode
@@ -76,8 +88,7 @@ def aes_cbc_encrypt(data, key, iv):
7688
previous_cipher_block = iv
7789
for i in range(block_count):
7890
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
79-
remaining_length = BLOCK_SIZE_BYTES - len(block)
80-
block += [remaining_length] * remaining_length
91+
block = pkcs7_padding(block)
8192
mixed_block = xor(block, previous_cipher_block)
8293

8394
encrypted_block = aes_encrypt(mixed_block, expanded_key)
@@ -88,6 +99,28 @@ def aes_cbc_encrypt(data, key, iv):
8899
return encrypted_data
89100

90101

102+
def aes_ecb_encrypt(data, key):
103+
"""
104+
Encrypt with aes in ECB mode. Using PKCS#7 padding
105+
106+
@param {int[]} data cleartext
107+
@param {int[]} key 16/24/32-Byte cipher key
108+
@returns {int[]} encrypted data
109+
"""
110+
expanded_key = key_expansion(key)
111+
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
112+
113+
encrypted_data = []
114+
for i in range(block_count):
115+
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
116+
block = pkcs7_padding(block)
117+
118+
encrypted_block = aes_encrypt(block, expanded_key)
119+
encrypted_data += encrypted_block
120+
121+
return encrypted_data
122+
123+
91124
def key_expansion(data):
92125
"""
93126
Generate key schedule
@@ -303,7 +336,7 @@ def xor(data1, data2):
303336

304337

305338
def rijndael_mul(a, b):
306-
if(a == 0 or b == 0):
339+
if (a == 0 or b == 0):
307340
return 0
308341
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
309342

script.module.youtube.dl/lib/youtube_dl/cache.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,29 @@
1010

1111
from .compat import compat_getenv
1212
from .utils import (
13+
error_to_compat_str,
1314
expand_path,
15+
is_outdated_version,
16+
try_get,
1417
write_json_file,
1518
)
19+
from .version import __version__
1620

1721

1822
class Cache(object):
23+
24+
_YTDL_DIR = 'youtube-dl'
25+
_VERSION_KEY = _YTDL_DIR + '_version'
26+
_DEFAULT_VERSION = '2021.12.17'
27+
1928
def __init__(self, ydl):
2029
self._ydl = ydl
2130

2231
def _get_root_dir(self):
2332
res = self._ydl.params.get('cachedir')
2433
if res is None:
2534
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
26-
res = os.path.join(cache_root, 'youtube-dl')
35+
res = os.path.join(cache_root, self._YTDL_DIR)
2736
return expand_path(res)
2837

2938
def _get_cache_fn(self, section, key, dtype):
@@ -50,13 +59,22 @@ def store(self, section, key, data, dtype='json'):
5059
except OSError as ose:
5160
if ose.errno != errno.EEXIST:
5261
raise
53-
write_json_file(data, fn)
62+
write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn)
5463
except Exception:
5564
tb = traceback.format_exc()
5665
self._ydl.report_warning(
5766
'Writing cache to %r failed: %s' % (fn, tb))
5867

59-
def load(self, section, key, dtype='json', default=None):
68+
def _validate(self, data, min_ver):
69+
version = try_get(data, lambda x: x[self._VERSION_KEY])
70+
if not version: # Backward compatibility
71+
data, version = {'data': data}, self._DEFAULT_VERSION
72+
if not is_outdated_version(version, min_ver or '0', assume_new=False):
73+
return data['data']
74+
self._ydl.to_screen(
75+
'Discarding old cache from version {version} (needs {min_ver})'.format(**locals()))
76+
77+
def load(self, section, key, dtype='json', default=None, min_ver=None):
6078
assert dtype in ('json',)
6179

6280
if not self.enabled:
@@ -66,12 +84,12 @@ def load(self, section, key, dtype='json', default=None):
6684
try:
6785
try:
6886
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
69-
return json.load(cachef)
87+
return self._validate(json.load(cachef), min_ver)
7088
except ValueError:
7189
try:
7290
file_size = os.path.getsize(cache_fn)
7391
except (OSError, IOError) as oe:
74-
file_size = str(oe)
92+
file_size = error_to_compat_str(oe)
7593
self._ydl.report_warning(
7694
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
7795
except IOError:

0 commit comments

Comments
 (0)