2022-04-29 03:48:36 +02:00
|
|
|
import concurrent.futures
|
2022-04-17 22:58:28 +02:00
|
|
|
import contextlib
|
2021-12-20 07:06:46 +01:00
|
|
|
import json
|
|
|
|
import math
|
2015-07-28 22:26:16 +02:00
|
|
|
import os
|
2022-06-24 10:10:17 +02:00
|
|
|
import struct
|
2015-07-28 22:26:16 +02:00
|
|
|
import time
|
|
|
|
|
|
|
|
from .common import FileDownloader
|
|
|
|
from .http import HttpFD
|
2022-01-31 15:49:33 +01:00
|
|
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
2022-06-24 10:10:17 +02:00
|
|
|
from ..compat import compat_os_name
|
2023-07-09 09:53:02 +02:00
|
|
|
from ..networking import Request
|
|
|
|
from ..networking.exceptions import HTTPError, IncompleteRead
|
|
|
|
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
|
|
|
from ..utils.networking import HTTPHeaderDict
|
2015-07-28 22:26:16 +02:00
|
|
|
|
|
|
|
|
|
|
|
class HttpQuietDownloader(HttpFD):
|
|
|
|
def to_screen(self, *args, **kargs):
|
|
|
|
pass
|
|
|
|
|
2022-06-22 00:17:41 +02:00
|
|
|
to_console_title = to_screen
|
2022-04-29 03:48:36 +02:00
|
|
|
|
2015-07-28 22:26:16 +02:00
|
|
|
|
|
|
|
class FragmentFD(FileDownloader):
|
|
|
|
"""
|
|
|
|
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
|
2016-03-19 15:54:21 +01:00
|
|
|
|
|
|
|
Available options:
|
|
|
|
|
2023-05-24 19:59:30 +02:00
|
|
|
fragment_retries: Number of times to retry a fragment for HTTP error
|
|
|
|
(DASH and hlsnative only). Default is 0 for API, but 10 for CLI
|
2016-08-26 23:52:18 +02:00
|
|
|
skip_unavailable_fragments:
|
|
|
|
Skip unavailable fragments (DASH and hlsnative only)
|
2017-04-23 22:09:08 +02:00
|
|
|
keep_fragments: Keep downloaded fragments on disk after downloading is
|
|
|
|
finished
|
2021-11-09 23:42:25 +01:00
|
|
|
concurrent_fragment_downloads: The number of threads to use for native hls and dash downloads
|
2021-05-04 17:54:00 +02:00
|
|
|
_no_ytdl_file: Don't use .ytdl file
|
2017-04-24 18:50:20 +02:00
|
|
|
|
2021-02-24 19:45:56 +01:00
|
|
|
For each incomplete fragment download yt-dlp keeps on disk a special
|
2017-04-24 18:50:20 +02:00
|
|
|
bookkeeping file with download state and metadata (in future such files will
|
2021-02-24 19:45:56 +01:00
|
|
|
be used for any incomplete download handled by yt-dlp). This file is
|
2017-04-24 18:50:20 +02:00
|
|
|
used to properly handle resuming, check download file consistency and detect
|
|
|
|
potential errors. The file has a .ytdl extension and represents a standard
|
|
|
|
JSON file of the following format:
|
|
|
|
|
|
|
|
extractor:
|
|
|
|
Dictionary of extractor related data. TBD.
|
|
|
|
|
|
|
|
downloader:
|
|
|
|
Dictionary of downloader related data. May contain following data:
|
|
|
|
current_fragment:
|
|
|
|
Dictionary with current (being downloaded) fragment data:
|
2017-04-25 18:33:35 +02:00
|
|
|
index: 0-based index of current fragment among all fragments
|
2017-04-24 18:50:20 +02:00
|
|
|
fragment_count:
|
|
|
|
Total count of fragments
|
2017-04-30 17:04:01 +02:00
|
|
|
|
2017-04-25 18:33:35 +02:00
|
|
|
This feature is experimental and file format may change in future.
|
2015-07-28 22:26:16 +02:00
|
|
|
"""
|
|
|
|
|
2016-06-28 19:07:50 +02:00
|
|
|
def report_retry_fragment(self, err, frag_index, count, retries):
|
2022-08-30 17:28:28 +02:00
|
|
|
self.deprecation_warning('yt_dlp.downloader.FragmentFD.report_retry_fragment is deprecated. '
|
|
|
|
'Use yt_dlp.downloader.FileDownloader.report_retry instead')
|
2022-08-01 22:13:18 +02:00
|
|
|
return self.report_retry(err, count, retries, frag_index)
|
2016-03-19 15:41:24 +01:00
|
|
|
|
2021-10-19 18:21:33 +02:00
|
|
|
def report_skip_fragment(self, frag_index, err=None):
|
|
|
|
err = f' {err};' if err else ''
|
|
|
|
self.to_screen(f'[download]{err} Skipping fragment {frag_index:d} ...')
|
2016-08-26 23:52:18 +02:00
|
|
|
|
2016-11-13 16:06:16 +01:00
|
|
|
def _prepare_url(self, info_dict, url):
|
|
|
|
headers = info_dict.get('http_headers')
|
2023-07-09 09:53:02 +02:00
|
|
|
return Request(url, None, headers) if headers else url
|
2016-11-13 16:06:16 +01:00
|
|
|
|
2021-07-21 19:28:43 +02:00
|
|
|
def _prepare_and_start_frag_download(self, ctx, info_dict):
|
2015-07-28 22:26:16 +02:00
|
|
|
self._prepare_frag_download(ctx)
|
2021-07-21 19:28:43 +02:00
|
|
|
self._start_frag_download(ctx, info_dict)
|
2015-07-28 22:26:16 +02:00
|
|
|
|
2021-05-04 17:54:00 +02:00
|
|
|
def __do_ytdl_file(self, ctx):
|
2021-12-20 07:06:46 +01:00
|
|
|
return ctx['live'] is not True and ctx['tmpfilename'] != '-' and not self.params.get('_no_ytdl_file')
|
2017-04-24 18:05:56 +02:00
|
|
|
|
2017-04-23 21:54:17 +02:00
|
|
|
def _read_ytdl_file(self, ctx):
|
2018-04-28 19:33:31 +02:00
|
|
|
assert 'ytdl_corrupt' not in ctx
|
2021-12-23 03:29:03 +01:00
|
|
|
stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
2018-04-28 19:33:31 +02:00
|
|
|
try:
|
2021-04-23 09:11:28 +02:00
|
|
|
ytdl_data = json.loads(stream.read())
|
|
|
|
ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index']
|
|
|
|
if 'extra_state' in ytdl_data['downloader']:
|
|
|
|
ctx['extra_state'] = ytdl_data['downloader']['extra_state']
|
2018-04-28 19:33:31 +02:00
|
|
|
except Exception:
|
|
|
|
ctx['ytdl_corrupt'] = True
|
|
|
|
finally:
|
|
|
|
stream.close()
|
2017-04-23 21:54:17 +02:00
|
|
|
|
|
|
|
def _write_ytdl_file(self, ctx):
|
2021-12-23 03:29:03 +01:00
|
|
|
frag_index_stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
2021-08-09 14:10:24 +02:00
|
|
|
try:
|
|
|
|
downloader = {
|
|
|
|
'current_fragment': {
|
|
|
|
'index': ctx['fragment_index'],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if 'extra_state' in ctx:
|
|
|
|
downloader['extra_state'] = ctx['extra_state']
|
|
|
|
if ctx.get('fragment_count') is not None:
|
|
|
|
downloader['fragment_count'] = ctx['fragment_count']
|
|
|
|
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
|
|
|
finally:
|
|
|
|
frag_index_stream.close()
|
2017-04-23 21:54:17 +02:00
|
|
|
|
2021-02-15 10:57:21 +01:00
|
|
|
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
2017-04-23 21:54:17 +02:00
|
|
|
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
2020-11-26 18:27:34 +01:00
|
|
|
fragment_info_dict = {
|
2016-06-28 19:07:50 +02:00
|
|
|
'url': frag_url,
|
|
|
|
'http_headers': headers or info_dict.get('http_headers'),
|
2021-02-15 10:57:21 +01:00
|
|
|
'request_data': request_data,
|
2021-09-22 16:12:04 +02:00
|
|
|
'ctx_id': ctx.get('ctx_id'),
|
2020-11-26 18:27:34 +01:00
|
|
|
}
|
2023-05-24 20:00:43 +02:00
|
|
|
frag_resume_len = 0
|
|
|
|
if ctx['dl'].params.get('continuedl', True):
|
|
|
|
frag_resume_len = self.filesize_or_none(self.temp_name(fragment_filename))
|
|
|
|
fragment_info_dict['frag_resume_len'] = ctx['frag_resume_len'] = frag_resume_len
|
|
|
|
|
2022-04-29 18:02:31 +02:00
|
|
|
success, _ = ctx['dl'].download(fragment_filename, fragment_info_dict)
|
2016-06-28 19:07:50 +02:00
|
|
|
if not success:
|
2022-03-15 04:27:41 +01:00
|
|
|
return False
|
2020-11-26 18:27:34 +01:00
|
|
|
if fragment_info_dict.get('filetime'):
|
|
|
|
ctx['fragment_filetime'] = fragment_info_dict.get('filetime')
|
2021-06-21 20:59:50 +02:00
|
|
|
ctx['fragment_filename_sanitized'] = fragment_filename
|
2022-03-15 04:27:41 +01:00
|
|
|
return True
|
2021-06-21 20:59:50 +02:00
|
|
|
|
|
|
|
def _read_fragment(self, ctx):
|
2022-04-15 20:13:07 +02:00
|
|
|
if not ctx.get('fragment_filename_sanitized'):
|
|
|
|
return None
|
2022-03-15 04:27:41 +01:00
|
|
|
try:
|
|
|
|
down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
|
|
|
except FileNotFoundError:
|
|
|
|
if ctx.get('live'):
|
|
|
|
return None
|
|
|
|
raise
|
2017-04-23 21:54:17 +02:00
|
|
|
ctx['fragment_filename_sanitized'] = frag_sanitized
|
|
|
|
frag_content = down.read()
|
2016-06-28 19:07:50 +02:00
|
|
|
down.close()
|
2021-06-21 20:59:50 +02:00
|
|
|
return frag_content
|
2016-06-28 19:07:50 +02:00
|
|
|
|
|
|
|
def _append_fragment(self, ctx, frag_content):
|
2017-04-23 21:54:17 +02:00
|
|
|
try:
|
|
|
|
ctx['dest_stream'].write(frag_content)
|
2017-12-02 15:15:45 +01:00
|
|
|
ctx['dest_stream'].flush()
|
2017-04-23 21:54:17 +02:00
|
|
|
finally:
|
2017-04-24 18:05:56 +02:00
|
|
|
if self.__do_ytdl_file(ctx):
|
2017-04-23 21:54:17 +02:00
|
|
|
self._write_ytdl_file(ctx)
|
2017-04-23 22:09:08 +02:00
|
|
|
if not self.params.get('keep_fragments', False):
|
2022-03-03 15:33:32 +01:00
|
|
|
self.try_remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
2017-04-23 21:54:17 +02:00
|
|
|
del ctx['fragment_filename_sanitized']
|
2016-06-28 19:07:50 +02:00
|
|
|
|
2015-07-28 22:26:16 +02:00
|
|
|
def _prepare_frag_download(self, ctx):
|
2023-05-24 20:00:43 +02:00
|
|
|
if not ctx.setdefault('live', False):
|
2017-10-15 01:13:07 +02:00
|
|
|
total_frags_str = '%d' % ctx['total_frags']
|
|
|
|
ad_frags = ctx.get('ad_frags', 0)
|
|
|
|
if ad_frags:
|
|
|
|
total_frags_str += ' (not including %d ad)' % ad_frags
|
|
|
|
else:
|
|
|
|
total_frags_str = 'unknown (live)'
|
2022-04-11 17:10:28 +02:00
|
|
|
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
|
2015-07-28 22:26:16 +02:00
|
|
|
self.report_destination(ctx['filename'])
|
2022-05-20 17:19:30 +02:00
|
|
|
dl = HttpQuietDownloader(self.ydl, {
|
|
|
|
**self.params,
|
|
|
|
'noprogress': True,
|
|
|
|
'test': False,
|
2023-06-20 23:13:10 +02:00
|
|
|
'sleep_interval': 0,
|
|
|
|
'max_sleep_interval': 0,
|
|
|
|
'sleep_interval_subtitles': 0,
|
2022-05-20 17:19:30 +02:00
|
|
|
})
|
2015-07-28 22:26:16 +02:00
|
|
|
tmpfilename = self.temp_name(ctx['filename'])
|
2016-06-28 19:07:50 +02:00
|
|
|
open_mode = 'wb'
|
2017-04-23 21:54:17 +02:00
|
|
|
|
2016-06-28 19:07:50 +02:00
|
|
|
# Establish possible resume length
|
2023-05-24 20:00:43 +02:00
|
|
|
resume_len = self.filesize_or_none(tmpfilename)
|
|
|
|
if resume_len > 0:
|
2016-06-28 19:07:50 +02:00
|
|
|
open_mode = 'ab'
|
2017-04-23 21:54:17 +02:00
|
|
|
|
2017-04-24 18:05:56 +02:00
|
|
|
# Should be initialized before ytdl file check
|
|
|
|
ctx.update({
|
|
|
|
'tmpfilename': tmpfilename,
|
|
|
|
'fragment_index': 0,
|
|
|
|
})
|
2017-04-23 21:54:17 +02:00
|
|
|
|
2017-04-24 18:05:56 +02:00
|
|
|
if self.__do_ytdl_file(ctx):
|
2023-05-24 20:00:43 +02:00
|
|
|
ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename'])))
|
|
|
|
continuedl = self.params.get('continuedl', True)
|
|
|
|
if continuedl and ytdl_file_exists:
|
2017-04-24 18:05:56 +02:00
|
|
|
self._read_ytdl_file(ctx)
|
2018-04-28 19:33:31 +02:00
|
|
|
is_corrupt = ctx.get('ytdl_corrupt') is True
|
|
|
|
is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
|
|
|
|
if is_corrupt or is_inconsistent:
|
|
|
|
message = (
|
|
|
|
'.ytdl file is corrupt' if is_corrupt else
|
|
|
|
'Inconsistent state of incomplete fragment download')
|
2017-10-17 17:53:34 +02:00
|
|
|
self.report_warning(
|
2021-03-01 01:09:50 +01:00
|
|
|
'%s. Restarting from the beginning ...' % message)
|
2017-09-14 18:19:53 +02:00
|
|
|
ctx['fragment_index'] = resume_len = 0
|
2018-04-28 19:33:31 +02:00
|
|
|
if 'ytdl_corrupt' in ctx:
|
|
|
|
del ctx['ytdl_corrupt']
|
2017-09-14 18:19:53 +02:00
|
|
|
self._write_ytdl_file(ctx)
|
2023-05-24 20:00:43 +02:00
|
|
|
|
2017-04-24 18:05:56 +02:00
|
|
|
else:
|
2023-05-24 20:00:43 +02:00
|
|
|
if not continuedl:
|
|
|
|
if ytdl_file_exists:
|
|
|
|
self._read_ytdl_file(ctx)
|
|
|
|
ctx['fragment_index'] = resume_len = 0
|
2017-04-24 18:05:56 +02:00
|
|
|
self._write_ytdl_file(ctx)
|
2017-09-14 18:19:53 +02:00
|
|
|
assert ctx['fragment_index'] == 0
|
2017-04-23 21:54:17 +02:00
|
|
|
|
2021-12-23 03:29:03 +01:00
|
|
|
dest_stream, tmpfilename = self.sanitize_open(tmpfilename, open_mode)
|
2016-06-28 19:07:50 +02:00
|
|
|
|
2015-07-28 22:26:16 +02:00
|
|
|
ctx.update({
|
|
|
|
'dl': dl,
|
|
|
|
'dest_stream': dest_stream,
|
|
|
|
'tmpfilename': tmpfilename,
|
2016-06-28 19:07:50 +02:00
|
|
|
# Total complete fragments downloaded so far in bytes
|
|
|
|
'complete_frags_downloaded_bytes': resume_len,
|
2015-07-28 22:26:16 +02:00
|
|
|
})
|
|
|
|
|
2021-07-21 19:28:43 +02:00
|
|
|
def _start_frag_download(self, ctx, info_dict):
|
2019-08-11 01:57:43 +02:00
|
|
|
resume_len = ctx['complete_frags_downloaded_bytes']
|
2015-07-28 22:26:16 +02:00
|
|
|
total_frags = ctx['total_frags']
|
2021-09-22 16:12:04 +02:00
|
|
|
ctx_id = ctx.get('ctx_id')
|
2015-07-28 22:26:16 +02:00
|
|
|
# This dict stores the download progress, it's updated by the progress
|
|
|
|
# hook
|
|
|
|
state = {
|
|
|
|
'status': 'downloading',
|
2019-08-11 01:57:43 +02:00
|
|
|
'downloaded_bytes': resume_len,
|
2017-04-22 17:42:24 +02:00
|
|
|
'fragment_index': ctx['fragment_index'],
|
|
|
|
'fragment_count': total_frags,
|
2015-07-28 22:26:16 +02:00
|
|
|
'filename': ctx['filename'],
|
|
|
|
'tmpfilename': ctx['tmpfilename'],
|
2016-01-12 19:00:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
start = time.time()
|
|
|
|
ctx.update({
|
|
|
|
'started': start,
|
2021-11-03 11:58:45 +01:00
|
|
|
'fragment_started': start,
|
2016-01-12 18:18:38 +01:00
|
|
|
# Amount of fragment's bytes downloaded by the time of the previous
|
|
|
|
# frag progress hook invocation
|
2016-01-12 19:00:31 +01:00
|
|
|
'prev_frag_downloaded_bytes': 0,
|
|
|
|
})
|
2015-07-28 22:26:16 +02:00
|
|
|
|
|
|
|
def frag_progress_hook(s):
|
|
|
|
if s['status'] not in ('downloading', 'finished'):
|
|
|
|
return
|
|
|
|
|
2022-06-07 14:44:08 +02:00
|
|
|
if not total_frags and ctx.get('fragment_count'):
|
|
|
|
state['fragment_count'] = ctx['fragment_count']
|
|
|
|
|
2021-09-22 16:12:04 +02:00
|
|
|
if ctx_id is not None and s.get('ctx_id') != ctx_id:
|
|
|
|
return
|
|
|
|
|
|
|
|
state['max_progress'] = ctx.get('max_progress')
|
|
|
|
state['progress_idx'] = ctx.get('progress_idx')
|
|
|
|
|
2016-01-30 14:20:52 +01:00
|
|
|
time_now = time.time()
|
2016-01-30 14:30:31 +01:00
|
|
|
state['elapsed'] = time_now - start
|
2016-01-10 14:32:53 +01:00
|
|
|
frag_total_bytes = s.get('total_bytes') or 0
|
2021-07-21 19:28:43 +02:00
|
|
|
s['fragment_info_dict'] = s.pop('info_dict', {})
|
2016-01-30 14:20:52 +01:00
|
|
|
if not ctx['live']:
|
|
|
|
estimated_size = (
|
2019-05-10 22:56:22 +02:00
|
|
|
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
|
|
|
/ (state['fragment_index'] + 1) * total_frags)
|
2016-01-30 14:20:52 +01:00
|
|
|
state['total_bytes_estimate'] = estimated_size
|
2015-07-28 22:26:16 +02:00
|
|
|
|
2016-01-12 18:18:38 +01:00
|
|
|
if s['status'] == 'finished':
|
2017-04-22 17:42:24 +02:00
|
|
|
state['fragment_index'] += 1
|
|
|
|
ctx['fragment_index'] = state['fragment_index']
|
2016-01-12 19:00:31 +01:00
|
|
|
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
|
|
|
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
2021-11-03 11:58:45 +01:00
|
|
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
|
|
|
ctx['fragment_started'], time_now, frag_total_bytes)
|
|
|
|
ctx['fragment_started'] = time.time()
|
2016-01-12 19:00:31 +01:00
|
|
|
ctx['prev_frag_downloaded_bytes'] = 0
|
2016-01-12 18:18:38 +01:00
|
|
|
else:
|
|
|
|
frag_downloaded_bytes = s['downloaded_bytes']
|
2016-01-12 19:00:31 +01:00
|
|
|
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
2021-11-03 11:58:45 +01:00
|
|
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
2023-05-25 22:52:44 +02:00
|
|
|
ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx.get('frag_resume_len', 0))
|
2023-05-24 20:00:43 +02:00
|
|
|
if not ctx['live']:
|
|
|
|
state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes'])
|
2016-01-12 19:00:31 +01:00
|
|
|
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
2021-07-21 19:28:43 +02:00
|
|
|
self._hook_progress(state, info_dict)
|
2015-07-28 22:26:16 +02:00
|
|
|
|
|
|
|
ctx['dl'].add_progress_hook(frag_progress_hook)
|
|
|
|
|
|
|
|
return start
|
|
|
|
|
2021-07-21 19:28:43 +02:00
|
|
|
def _finish_frag_download(self, ctx, info_dict):
|
2015-07-28 22:26:16 +02:00
|
|
|
ctx['dest_stream'].close()
|
2017-04-24 18:05:56 +02:00
|
|
|
if self.__do_ytdl_file(ctx):
|
2023-07-06 16:39:42 +02:00
|
|
|
self.try_remove(self.ytdl_filename(ctx['filename']))
|
2015-07-28 22:26:16 +02:00
|
|
|
elapsed = time.time() - ctx['started']
|
2018-03-24 09:59:48 +01:00
|
|
|
|
2022-10-18 15:03:00 +02:00
|
|
|
to_file = ctx['tmpfilename'] != '-'
|
|
|
|
if to_file:
|
2023-05-25 22:52:44 +02:00
|
|
|
downloaded_bytes = self.filesize_or_none(ctx['tmpfilename'])
|
2018-03-24 09:59:48 +01:00
|
|
|
else:
|
2022-10-18 15:03:00 +02:00
|
|
|
downloaded_bytes = ctx['complete_frags_downloaded_bytes']
|
|
|
|
|
|
|
|
if not downloaded_bytes:
|
|
|
|
if to_file:
|
|
|
|
self.try_remove(ctx['tmpfilename'])
|
|
|
|
self.report_error('The downloaded file is empty')
|
|
|
|
return False
|
|
|
|
elif to_file:
|
2018-03-24 09:59:48 +01:00
|
|
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
2022-10-18 15:03:00 +02:00
|
|
|
filetime = ctx.get('fragment_filetime')
|
|
|
|
if self.params.get('updatetime', True) and filetime:
|
|
|
|
with contextlib.suppress(Exception):
|
|
|
|
os.utime(ctx['filename'], (time.time(), filetime))
|
2015-07-28 22:26:16 +02:00
|
|
|
|
|
|
|
self._hook_progress({
|
2018-03-24 09:59:48 +01:00
|
|
|
'downloaded_bytes': downloaded_bytes,
|
|
|
|
'total_bytes': downloaded_bytes,
|
2015-07-28 22:26:16 +02:00
|
|
|
'filename': ctx['filename'],
|
|
|
|
'status': 'finished',
|
|
|
|
'elapsed': elapsed,
|
2021-09-22 16:12:04 +02:00
|
|
|
'ctx_id': ctx.get('ctx_id'),
|
|
|
|
'max_progress': ctx.get('max_progress'),
|
|
|
|
'progress_idx': ctx.get('progress_idx'),
|
2021-07-21 19:28:43 +02:00
|
|
|
}, info_dict)
|
2022-10-18 15:03:00 +02:00
|
|
|
return True
|
2021-02-08 17:46:01 +01:00
|
|
|
|
|
|
|
def _prepare_external_frag_download(self, ctx):
|
|
|
|
if 'live' not in ctx:
|
|
|
|
ctx['live'] = False
|
|
|
|
if not ctx['live']:
|
|
|
|
total_frags_str = '%d' % ctx['total_frags']
|
|
|
|
ad_frags = ctx.get('ad_frags', 0)
|
|
|
|
if ad_frags:
|
|
|
|
total_frags_str += ' (not including %d ad)' % ad_frags
|
|
|
|
else:
|
|
|
|
total_frags_str = 'unknown (live)'
|
2022-04-11 17:10:28 +02:00
|
|
|
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
|
2021-02-08 17:46:01 +01:00
|
|
|
|
|
|
|
tmpfilename = self.temp_name(ctx['filename'])
|
|
|
|
|
|
|
|
# Should be initialized before ytdl file check
|
|
|
|
ctx.update({
|
|
|
|
'tmpfilename': tmpfilename,
|
|
|
|
'fragment_index': 0,
|
|
|
|
})
|
2021-06-21 20:59:50 +02:00
|
|
|
|
2021-09-22 01:57:07 +02:00
|
|
|
def decrypter(self, info_dict):
|
|
|
|
_key_cache = {}
|
|
|
|
|
|
|
|
def _get_key(url):
|
|
|
|
if url not in _key_cache:
|
|
|
|
_key_cache[url] = self.ydl.urlopen(self._prepare_url(info_dict, url)).read()
|
|
|
|
return _key_cache[url]
|
|
|
|
|
|
|
|
def decrypt_fragment(fragment, frag_content):
|
2022-08-01 22:13:18 +02:00
|
|
|
if frag_content is None:
|
|
|
|
return
|
2021-09-22 01:57:07 +02:00
|
|
|
decrypt_info = fragment.get('decrypt_info')
|
|
|
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
|
|
|
return frag_content
|
2022-06-24 10:10:17 +02:00
|
|
|
iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence'])
|
2023-02-08 06:33:54 +01:00
|
|
|
decrypt_info['KEY'] = (decrypt_info.get('KEY')
|
|
|
|
or _get_key(traverse_obj(info_dict, ('hls_aes', 'uri')) or decrypt_info['URI']))
|
2021-09-22 01:57:07 +02:00
|
|
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
|
|
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
|
|
|
# not what it decrypts to.
|
|
|
|
if self.params.get('test', False):
|
|
|
|
return frag_content
|
2022-01-31 15:49:33 +01:00
|
|
|
return unpad_pkcs7(aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv))
|
2021-09-22 01:57:07 +02:00
|
|
|
|
|
|
|
return decrypt_fragment
|
|
|
|
|
2022-10-18 15:03:00 +02:00
|
|
|
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
2021-09-22 16:12:04 +02:00
|
|
|
'''
|
|
|
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
|
|
|
all args must be either tuple or list
|
|
|
|
'''
|
2021-12-20 07:06:46 +01:00
|
|
|
interrupt_trigger = [True]
|
2021-09-22 16:12:04 +02:00
|
|
|
max_progress = len(args)
|
|
|
|
if max_progress == 1:
|
2022-10-18 15:03:00 +02:00
|
|
|
return self.download_and_append_fragments(*args[0], **kwargs)
|
2021-12-20 07:06:46 +01:00
|
|
|
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
2021-10-22 22:37:20 +02:00
|
|
|
if max_progress > 1:
|
|
|
|
self._prepare_multiline_status(max_progress)
|
2023-02-09 23:26:26 +01:00
|
|
|
is_live = any(traverse_obj(args, (..., 2, 'is_live')))
|
2021-09-22 16:12:04 +02:00
|
|
|
|
|
|
|
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
|
|
|
ctx['max_progress'] = max_progress
|
|
|
|
ctx['progress_idx'] = idx
|
2021-12-20 07:06:46 +01:00
|
|
|
return self.download_and_append_fragments(
|
2022-10-18 15:03:00 +02:00
|
|
|
ctx, fragments, info_dict, **kwargs, tpe=tpe, interrupt_trigger=interrupt_trigger)
|
2021-09-22 16:12:04 +02:00
|
|
|
|
|
|
|
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
|
|
|
# has to stop this or it's going to wait on the worker thread itself
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
pass
|
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
if compat_os_name == 'nt':
|
2022-03-27 04:20:43 +02:00
|
|
|
def future_result(future):
|
2022-02-24 18:00:46 +01:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
return future.result(0.1)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
raise
|
|
|
|
except concurrent.futures.TimeoutError:
|
|
|
|
continue
|
|
|
|
else:
|
2022-03-27 04:20:43 +02:00
|
|
|
def future_result(future):
|
2022-02-24 18:00:46 +01:00
|
|
|
return future.result()
|
|
|
|
|
2022-02-25 05:22:17 +01:00
|
|
|
def interrupt_trigger_iter(fg):
|
|
|
|
for f in fg:
|
|
|
|
if not interrupt_trigger[0]:
|
|
|
|
break
|
|
|
|
yield f
|
|
|
|
|
2022-02-24 18:00:46 +01:00
|
|
|
spins = []
|
2021-09-22 16:12:04 +02:00
|
|
|
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
2021-12-20 07:06:46 +01:00
|
|
|
tpe = FTPE(math.ceil(max_workers / max_progress))
|
2022-02-25 05:22:17 +01:00
|
|
|
job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe)
|
2021-09-22 16:12:04 +02:00
|
|
|
spins.append((tpe, job))
|
|
|
|
|
|
|
|
result = True
|
|
|
|
for tpe, job in spins:
|
|
|
|
try:
|
2022-03-27 04:20:43 +02:00
|
|
|
result = result and future_result(job)
|
2021-12-20 07:06:46 +01:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
interrupt_trigger[0] = False
|
2021-09-22 16:12:04 +02:00
|
|
|
finally:
|
|
|
|
tpe.shutdown(wait=True)
|
2022-02-24 18:00:46 +01:00
|
|
|
if not interrupt_trigger[0] and not is_live:
|
2021-12-20 07:06:46 +01:00
|
|
|
raise KeyboardInterrupt()
|
2022-02-24 18:00:46 +01:00
|
|
|
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
|
|
|
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
2021-10-08 21:11:59 +02:00
|
|
|
return result
|
2021-09-22 16:12:04 +02:00
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
def download_and_append_fragments(
|
2022-10-18 15:03:00 +02:00
|
|
|
self, ctx, fragments, info_dict, *, is_fatal=(lambda idx: False),
|
|
|
|
pack_func=(lambda content, idx: content), finish_func=None,
|
|
|
|
tpe=None, interrupt_trigger=(True, )):
|
2021-12-20 07:06:46 +01:00
|
|
|
|
2022-10-18 15:03:00 +02:00
|
|
|
if not self.params.get('skip_unavailable_fragments', True):
|
|
|
|
is_fatal = lambda _: True
|
2021-06-21 20:59:50 +02:00
|
|
|
|
|
|
|
def download_fragment(fragment, ctx):
|
2022-02-24 18:00:46 +01:00
|
|
|
if not interrupt_trigger[0]:
|
2022-03-15 04:27:41 +01:00
|
|
|
return
|
2022-02-24 18:00:46 +01:00
|
|
|
|
2021-06-21 20:59:50 +02:00
|
|
|
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
2021-12-26 11:19:35 +01:00
|
|
|
ctx['last_error'] = None
|
2023-07-09 09:53:02 +02:00
|
|
|
headers = HTTPHeaderDict(info_dict.get('http_headers'))
|
2021-06-21 20:59:50 +02:00
|
|
|
byte_range = fragment.get('byte_range')
|
|
|
|
if byte_range:
|
|
|
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
|
|
|
|
|
|
|
# Never skip the first fragment
|
2022-08-01 22:13:18 +02:00
|
|
|
fatal = is_fatal(fragment.get('index') or (frag_index - 1))
|
|
|
|
|
|
|
|
def error_callback(err, count, retries):
|
|
|
|
if fatal and count > retries:
|
|
|
|
ctx['dest_stream'].close()
|
|
|
|
self.report_retry(err, count, retries, frag_index, fatal)
|
|
|
|
ctx['last_error'] = err
|
|
|
|
|
|
|
|
for retry in RetryManager(self.params.get('fragment_retries'), error_callback):
|
2021-06-21 20:59:50 +02:00
|
|
|
try:
|
2022-06-07 14:44:08 +02:00
|
|
|
ctx['fragment_count'] = fragment.get('fragment_count')
|
2023-02-28 19:04:43 +01:00
|
|
|
if not self._download_fragment(
|
|
|
|
ctx, fragment['url'], info_dict, headers, info_dict.get('request_data')):
|
2022-08-01 22:13:18 +02:00
|
|
|
return
|
2023-07-09 09:53:02 +02:00
|
|
|
except (HTTPError, IncompleteRead) as err:
|
2022-08-01 22:13:18 +02:00
|
|
|
retry.error = err
|
|
|
|
continue
|
|
|
|
except DownloadError: # has own retry settings
|
|
|
|
if fatal:
|
|
|
|
raise
|
2021-06-21 20:59:50 +02:00
|
|
|
|
|
|
|
def append_fragment(frag_content, frag_index, ctx):
|
2022-03-27 04:20:43 +02:00
|
|
|
if frag_content:
|
|
|
|
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
|
|
|
elif not is_fatal(frag_index - 1):
|
|
|
|
self.report_skip_fragment(frag_index, 'fragment not found')
|
|
|
|
else:
|
|
|
|
ctx['dest_stream'].close()
|
|
|
|
self.report_error(f'fragment {frag_index} not found, unable to continue')
|
|
|
|
return False
|
2021-06-21 20:59:50 +02:00
|
|
|
return True
|
|
|
|
|
2021-09-22 01:57:07 +02:00
|
|
|
decrypt_fragment = self.decrypter(info_dict)
|
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
max_workers = math.ceil(
|
|
|
|
self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1))
|
2022-04-29 03:48:36 +02:00
|
|
|
if max_workers > 1:
|
2021-06-21 20:59:50 +02:00
|
|
|
def _download_fragment(fragment):
|
2021-07-06 22:25:54 +02:00
|
|
|
ctx_copy = ctx.copy()
|
2022-03-15 04:27:41 +01:00
|
|
|
download_fragment(fragment, ctx_copy)
|
|
|
|
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
2021-06-21 20:59:50 +02:00
|
|
|
|
2023-02-28 19:01:02 +01:00
|
|
|
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
2021-09-22 16:12:04 +02:00
|
|
|
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
2022-06-03 17:59:03 +02:00
|
|
|
try:
|
|
|
|
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
|
|
|
ctx.update({
|
|
|
|
'fragment_filename_sanitized': frag_filename,
|
|
|
|
'fragment_index': frag_index,
|
|
|
|
})
|
|
|
|
if not append_fragment(decrypt_fragment(fragment, self._read_fragment(ctx)), frag_index, ctx):
|
|
|
|
return False
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
self._finish_multiline_status()
|
|
|
|
self.report_error(
|
|
|
|
'Interrupted by user. Waiting for all threads to shutdown...', is_error=False, tb=False)
|
|
|
|
pool.shutdown(wait=False)
|
|
|
|
raise
|
2021-06-21 20:59:50 +02:00
|
|
|
else:
|
|
|
|
for fragment in fragments:
|
2021-12-20 07:06:46 +01:00
|
|
|
if not interrupt_trigger[0]:
|
|
|
|
break
|
2022-04-16 14:11:09 +02:00
|
|
|
try:
|
|
|
|
download_fragment(fragment, ctx)
|
2022-04-17 22:58:28 +02:00
|
|
|
result = append_fragment(
|
|
|
|
decrypt_fragment(fragment, self._read_fragment(ctx)), fragment['frag_index'], ctx)
|
2022-04-16 14:11:09 +02:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
if info_dict.get('is_live'):
|
|
|
|
break
|
|
|
|
raise
|
2021-06-21 20:59:50 +02:00
|
|
|
if not result:
|
|
|
|
return False
|
|
|
|
|
2021-08-09 22:22:30 +02:00
|
|
|
if finish_func is not None:
|
|
|
|
ctx['dest_stream'].write(finish_func())
|
|
|
|
ctx['dest_stream'].flush()
|
2022-10-18 15:03:00 +02:00
|
|
|
return self._finish_frag_download(ctx, info_dict)
|