mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-12-15 21:57:53 +01:00
Merge remote-tracking branch 'jtwaleson/master'
This commit is contained in:
commit
784b6d3a9b
@ -9,16 +9,17 @@ import youtube_dl
|
||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
opts_flag = []
|
||||
for group in opt_parser.option_groups:
|
||||
for option in group.option_list:
|
||||
#for every long flag
|
||||
# for every long flag
|
||||
opts_flag.append(option.get_opt_string())
|
||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||
#just using the special char
|
||||
# just using the special char
|
||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||
f.write(filled_template)
|
||||
|
||||
|
@ -233,6 +233,7 @@ def rmtree(path):
|
||||
|
||||
#==============================================================================
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
def __init__(self, output, code=500):
|
||||
self.output = output
|
||||
@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
|
||||
|
||||
|
||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
||||
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
|
||||
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
||||
|
||||
def do_GET(self):
|
||||
path = urlparse.urlparse(self.path)
|
||||
|
@ -23,6 +23,7 @@ EXTRA_ARGS = {
|
||||
'batch-file': ['--require-parameter'],
|
||||
}
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
commands = []
|
||||
|
||||
|
@ -1,8 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
import hashlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import urllib.request
|
||||
import json
|
||||
|
||||
|
@ -11,22 +11,22 @@ except NameError:
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
del versions_info['signature']
|
||||
|
||||
print('Enter the PKCS1 private key, followed by a blank line:')
|
||||
privkey = b''
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except EOFError:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
try:
|
||||
line = input()
|
||||
except EOFError:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
||||
|
||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
||||
print('signature: ' + signature)
|
||||
|
||||
versions_info['signature'] = signature
|
||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
||||
|
@ -5,7 +5,7 @@ from __future__ import with_statement
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import io # For Python 2 compatibilty
|
||||
import io # For Python 2 compatibilty
|
||||
import os
|
||||
import re
|
||||
|
||||
|
@ -73,4 +73,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
|
||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
||||
|
@ -9,6 +9,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
|
||||
|
||||
import youtube_dl
|
||||
|
||||
|
||||
def main():
|
||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
|
@ -4,7 +4,7 @@ import sys, os
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
@ -12,9 +12,9 @@ sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorr
|
||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
||||
|
||||
try:
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
|
@ -9,4 +9,4 @@ py2exe_options = {
|
||||
"dll_excludes": ['w9xpopen.exe']
|
||||
}
|
||||
|
||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
||||
setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)
|
||||
|
@ -4,13 +4,17 @@ import sys, os
|
||||
import urllib2
|
||||
import json, hashlib
|
||||
|
||||
|
||||
def rsa_verify(message, signature, key):
|
||||
from struct import pack
|
||||
from hashlib import sha256
|
||||
from sys import version_info
|
||||
|
||||
def b(x):
|
||||
if version_info[0] == 2: return x
|
||||
else: return x.encode('latin1')
|
||||
if version_info[0] == 2:
|
||||
return x
|
||||
else:
|
||||
return x.encode('latin1')
|
||||
assert(type(message) == type(b('')))
|
||||
block_size = 0
|
||||
n = key[0]
|
||||
@ -23,13 +27,17 @@ def rsa_verify(message, signature, key):
|
||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||
signature >>= 8
|
||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||
if signature[0:2] != b('\x00\x01'): return False
|
||||
if signature[0:2] != b('\x00\x01'):
|
||||
return False
|
||||
signature = signature[2:]
|
||||
if not b('\x00') in signature: return False
|
||||
signature = signature[signature.index(b('\x00'))+1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
||||
if not b('\x00') in signature:
|
||||
return False
|
||||
signature = signature[signature.index(b('\x00')) + 1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||
return False
|
||||
signature = signature[19:]
|
||||
if signature != sha256(message).digest(): return False
|
||||
if signature != sha256(message).digest():
|
||||
return False
|
||||
return True
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
@ -92,7 +100,7 @@ echo Updating youtube-dl...
|
||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
||||
move /Y "%s.new" "%s"
|
||||
del "%s"
|
||||
\n""" %(exe, exe, bat))
|
||||
\n""" % (exe, exe, bat))
|
||||
b.close()
|
||||
|
||||
os.startfile(bat)
|
||||
|
@ -59,7 +59,7 @@ class FakeYDL(YoutubeDL):
|
||||
params = get_params(override=override)
|
||||
super(FakeYDL, self).__init__(params, auto_init=False)
|
||||
self.result = []
|
||||
|
||||
|
||||
def to_screen(self, s, skip_eol=None):
|
||||
print(s)
|
||||
|
||||
@ -72,8 +72,10 @@ class FakeYDL(YoutubeDL):
|
||||
def expect_warning(self, regex):
|
||||
# Silence an expected warning matching a regex
|
||||
old_report_warning = self.report_warning
|
||||
|
||||
def report_warning(self, message):
|
||||
if re.match(regex, message): return
|
||||
if re.match(regex, message):
|
||||
return
|
||||
old_report_warning(message)
|
||||
self.report_warning = types.MethodType(report_warning, self)
|
||||
|
||||
|
@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
'ext': 'mp4',
|
||||
'width': None,
|
||||
}
|
||||
|
||||
def fname(templ):
|
||||
ydl = YoutubeDL({'outtmpl': templ})
|
||||
return ydl.prepare_filename(info)
|
||||
|
@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
def test_youtube_playlist_matching(self):
|
||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
||||
assertPlaylist('PL63F0C78739B09958')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||
# Top tracks
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||
|
||||
def test_youtube_matching(self):
|
||||
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
|
||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||
|
@ -40,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor
|
||||
|
||||
RETRIES = 3
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.to_stderr = self.to_screen
|
||||
self.processed_info_dicts = []
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
|
||||
def report_warning(self, message):
|
||||
# Don't accept warnings during tests
|
||||
raise ExtractorError(message)
|
||||
|
||||
def process_info(self, info_dict):
|
||||
self.processed_info_dicts.append(info_dict)
|
||||
return super(YoutubeDL, self).process_info(info_dict)
|
||||
|
||||
|
||||
def _file_md5(fn):
|
||||
with open(fn, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
@ -61,10 +65,13 @@ defs = gettestcases()
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
maxDiff = None
|
||||
|
||||
def setUp(self):
|
||||
self.defs = defs
|
||||
|
||||
### Dynamically generate tests
|
||||
# Dynamically generate tests
|
||||
|
||||
|
||||
def generator(test_case):
|
||||
|
||||
def test_template(self):
|
||||
@ -101,6 +108,7 @@ def generator(test_case):
|
||||
ydl = YoutubeDL(params, auto_init=False)
|
||||
ydl.add_default_info_extractors()
|
||||
finished_hook_called = set()
|
||||
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
@ -111,6 +119,7 @@ def generator(test_case):
|
||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
|
||||
res_dict = None
|
||||
|
||||
def try_rm_tcs_files(tcs=None):
|
||||
if tcs is None:
|
||||
tcs = test_cases
|
||||
@ -206,7 +215,7 @@ def generator(test_case):
|
||||
|
||||
return test_template
|
||||
|
||||
### And add them to TestDownload
|
||||
# And add them to TestDownload
|
||||
for n, test_case in enumerate(defs):
|
||||
test_method = generator(test_case)
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
|
@ -23,6 +23,7 @@ from youtube_dl.extractor import (
|
||||
class BaseTestSubtitles(unittest.TestCase):
|
||||
url = None
|
||||
IE = None
|
||||
|
||||
def setUp(self):
|
||||
self.DL = FakeYDL()
|
||||
self.ie = self.IE(self.DL)
|
||||
|
@ -45,7 +45,6 @@ from youtube_dl.utils import (
|
||||
escape_rfc3986,
|
||||
escape_url,
|
||||
js_to_json,
|
||||
get_filesystem_encoding,
|
||||
intlist_to_bytes,
|
||||
args_to_str,
|
||||
)
|
||||
@ -120,16 +119,16 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||
self.assertEqual(orderedSet([]), [])
|
||||
self.assertEqual(orderedSet([1]), [1])
|
||||
#keep the list ordered
|
||||
# keep the list ordered
|
||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||
|
||||
def test_unescape_html(self):
|
||||
self.assertEqual(unescapeHTML('%20;'), '%20;')
|
||||
self.assertEqual(
|
||||
unescapeHTML('é'), 'é')
|
||||
|
||||
|
||||
def test_daterange(self):
|
||||
_20century = DateRange("19000101","20000101")
|
||||
_20century = DateRange("19000101", "20000101")
|
||||
self.assertFalse("17890714" in _20century)
|
||||
_ac = DateRange("00010101")
|
||||
self.assertTrue("19690721" in _ac)
|
||||
|
@ -31,19 +31,18 @@ params = get_params({
|
||||
})
|
||||
|
||||
|
||||
|
||||
TEST_ID = 'gr51aVj-mLg'
|
||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||
|
||||
|
||||
class TestAnnotations(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
||||
|
||||
def test_info_json(self):
|
||||
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
|
||||
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
||||
ie = youtube_dl.extractor.YoutubeIE()
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_info_extractor(ie)
|
||||
@ -51,7 +50,7 @@ class TestAnnotations(unittest.TestCase):
|
||||
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
|
||||
annoxml = None
|
||||
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
|
||||
annoxml = xml.etree.ElementTree.parse(annof)
|
||||
annoxml = xml.etree.ElementTree.parse(annof)
|
||||
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
|
||||
root = annoxml.getroot()
|
||||
self.assertEqual(root.tag, 'document')
|
||||
@ -59,18 +58,17 @@ class TestAnnotations(unittest.TestCase):
|
||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
||||
annotations = annotationsTag.findall('annotation')
|
||||
|
||||
#Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||
# Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||
for a in annotations:
|
||||
self.assertEqual(a.tag, 'annotation')
|
||||
if a.get('type') == 'text':
|
||||
textTag = a.find('TEXT')
|
||||
text = textTag.text
|
||||
self.assertTrue(text in expected) #assertIn only added in python 2.7
|
||||
#remove the first occurance, there could be more than one annotation with the same text
|
||||
expected.remove(text)
|
||||
#We should have seen (and removed) all the expected annotation texts.
|
||||
self.assertEqual(a.tag, 'annotation')
|
||||
if a.get('type') == 'text':
|
||||
textTag = a.find('TEXT')
|
||||
text = textTag.text
|
||||
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
||||
# remove the first occurance, there could be more than one annotation with the same text
|
||||
expected.remove(text)
|
||||
# We should have seen (and removed) all the expected annotation texts.
|
||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
try_rm(ANNOTATIONS_FILE)
|
||||
|
@ -12,10 +12,6 @@ from test.helper import FakeYDL
|
||||
from youtube_dl.extractor import (
|
||||
YoutubePlaylistIE,
|
||||
YoutubeIE,
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeTopListIE,
|
||||
YoutubeSearchURLIE,
|
||||
)
|
||||
|
||||
|
||||
@ -31,7 +27,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(result['_type'], 'url')
|
||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
|
||||
|
||||
def test_youtube_course(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
|
@ -29,7 +29,6 @@ from .compat import (
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
shlex_quote,
|
||||
)
|
||||
from .utils import (
|
||||
escape_url,
|
||||
@ -552,7 +551,7 @@ class YoutubeDL(object):
|
||||
|
||||
try:
|
||||
ie_result = ie.extract(url)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
break
|
||||
if isinstance(ie_result, list):
|
||||
# Backwards compatibility: old IE result format
|
||||
@ -565,7 +564,7 @@ class YoutubeDL(object):
|
||||
return self.process_ie_result(ie_result, download, extra_info)
|
||||
else:
|
||||
return ie_result
|
||||
except ExtractorError as de: # An error we somewhat expected
|
||||
except ExtractorError as de: # An error we somewhat expected
|
||||
self.report_error(compat_str(de), de.format_traceback())
|
||||
break
|
||||
except MaxDownloadsReached:
|
||||
@ -700,6 +699,7 @@ class YoutubeDL(object):
|
||||
self.report_warning(
|
||||
'Extractor %s returned a compat_list result. '
|
||||
'It needs to be updated.' % ie_result.get('extractor'))
|
||||
|
||||
def _fixup(r):
|
||||
self.add_extra_info(r,
|
||||
{
|
||||
@ -1010,7 +1010,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write subtitles file ' + sub_filename)
|
||||
return
|
||||
@ -1111,7 +1111,7 @@ class YoutubeDL(object):
|
||||
|
||||
for url in url_list:
|
||||
try:
|
||||
#It also downloads the videos
|
||||
# It also downloads the videos
|
||||
res = self.extract_info(url)
|
||||
except UnavailableVideoError:
|
||||
self.report_error('unable to download video')
|
||||
@ -1428,4 +1428,3 @@ class YoutubeDL(object):
|
||||
if encoding is None:
|
||||
encoding = preferredencoding()
|
||||
return encoding
|
||||
|
||||
|
@ -76,10 +76,10 @@ def _real_main(argv=None):
|
||||
if opts.headers is not None:
|
||||
for h in opts.headers:
|
||||
if h.find(':', 1) < 0:
|
||||
parser.error('wrong header formatting, it should be key:value, not "%s"'%h)
|
||||
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
|
||||
key, value = h.split(':', 2)
|
||||
if opts.verbose:
|
||||
write_string('[debug] Adding header from command line option %s:%s\n'%(key, value))
|
||||
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
|
||||
std_headers[key] = value
|
||||
|
||||
# Dump user agent
|
||||
@ -128,7 +128,6 @@ def _real_main(argv=None):
|
||||
compat_print(desc)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Conflicting, missing and erroneous options
|
||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||
parser.error('using .netrc conflicts with giving username/password')
|
||||
@ -197,14 +196,14 @@ def _real_main(argv=None):
|
||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||
if opts.outtmpl is not None:
|
||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
||||
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.useid and '%(id)s.%(ext)s')
|
||||
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
|
||||
or DEFAULT_OUTTMPL)
|
||||
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
|
||||
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.useid and '%(id)s.%(ext)s')
|
||||
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
|
||||
or DEFAULT_OUTTMPL)
|
||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||
parser.error('Cannot download a video and extract audio into the same'
|
||||
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||
@ -317,7 +316,6 @@ def _real_main(argv=None):
|
||||
ydl.add_post_processor(FFmpegAudioFixPP())
|
||||
ydl.add_post_processor(AtomicParsleyPP())
|
||||
|
||||
|
||||
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
|
||||
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
|
||||
if opts.exec_cmd:
|
||||
|
@ -7,10 +7,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, counter):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
|
||||
|
||||
@param {int[]} data cipher
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
||||
@ -19,23 +20,24 @@ def aes_ctr_decrypt(data, key, counter):
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
decrypted_data=[]
|
||||
|
||||
decrypted_data = []
|
||||
for i in range(block_count):
|
||||
counter_block = counter.next_value()
|
||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
decrypted_data += xor(block, cipher_counter_block)
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
|
||||
|
||||
return decrypted_data
|
||||
|
||||
|
||||
def aes_cbc_decrypt(data, key, iv):
|
||||
"""
|
||||
Decrypt with aes in CBC mode
|
||||
|
||||
|
||||
@param {int[]} data cipher
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv 16-Byte IV
|
||||
@ -43,94 +45,98 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
decrypted_data=[]
|
||||
|
||||
decrypted_data = []
|
||||
previous_cipher_block = iv
|
||||
for i in range(block_count):
|
||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
decrypted_block = aes_decrypt(block, expanded_key)
|
||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||
previous_cipher_block = block
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
|
||||
|
||||
return decrypted_data
|
||||
|
||||
|
||||
def key_expansion(data):
|
||||
"""
|
||||
Generate key schedule
|
||||
|
||||
|
||||
@param {int[]} data 16/24/32-Byte cipher key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
"""
|
||||
data = data[:] # copy
|
||||
data = data[:] # copy
|
||||
rcon_iteration = 1
|
||||
key_size_bytes = len(data)
|
||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||
|
||||
|
||||
while len(data) < expanded_key_size_bytes:
|
||||
temp = data[-4:]
|
||||
temp = key_schedule_core(temp, rcon_iteration)
|
||||
rcon_iteration += 1
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
if key_size_bytes == 32:
|
||||
temp = data[-4:]
|
||||
temp = sub_bytes(temp)
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_encrypt(data, expanded_key):
|
||||
"""
|
||||
Encrypt one block with aes
|
||||
|
||||
|
||||
@param {int[]} data 16-Byte state
|
||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||
@returns {int[]} 16-Byte cipher
|
||||
"""
|
||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
for i in range(1, rounds+1):
|
||||
for i in range(1, rounds + 1):
|
||||
data = sub_bytes(data)
|
||||
data = shift_rows(data)
|
||||
if i != rounds:
|
||||
data = mix_columns(data)
|
||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_decrypt(data, expanded_key):
|
||||
"""
|
||||
Decrypt one block with aes
|
||||
|
||||
|
||||
@param {int[]} data 16-Byte cipher
|
||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||
@returns {int[]} 16-Byte state
|
||||
"""
|
||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||
|
||||
|
||||
for i in range(rounds, 0, -1):
|
||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
if i != rounds:
|
||||
data = mix_columns_inv(data)
|
||||
data = shift_rows_inv(data)
|
||||
data = sub_bytes_inv(data)
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_decrypt_text(data, password, key_size_bytes):
|
||||
"""
|
||||
Decrypt text
|
||||
@ -138,33 +144,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
|
||||
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
|
||||
- Mode of operation is 'counter'
|
||||
|
||||
|
||||
@param {str} data Base64 encoded string
|
||||
@param {str,unicode} password Password (will be encoded with utf-8)
|
||||
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
|
||||
@returns {str} Decrypted data
|
||||
"""
|
||||
NONCE_LENGTH_BYTES = 8
|
||||
|
||||
|
||||
data = bytes_to_intlist(base64.b64decode(data))
|
||||
password = bytes_to_intlist(password.encode('utf-8'))
|
||||
|
||||
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
|
||||
|
||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||
|
||||
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
|
||||
class Counter:
|
||||
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
return temp
|
||||
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
||||
plaintext = intlist_to_bytes(decrypted_data)
|
||||
|
||||
|
||||
return plaintext
|
||||
|
||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||
@ -200,14 +207,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
|
||||
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
||||
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
|
||||
(0x1,0x2,0x3,0x1),
|
||||
(0x1,0x1,0x2,0x3),
|
||||
(0x3,0x1,0x1,0x2))
|
||||
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
|
||||
(0x9,0xE,0xB,0xD),
|
||||
(0xD,0x9,0xE,0xB),
|
||||
(0xB,0xD,0x9,0xE))
|
||||
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
|
||||
(0x1, 0x2, 0x3, 0x1),
|
||||
(0x1, 0x1, 0x2, 0x3),
|
||||
(0x3, 0x1, 0x1, 0x2))
|
||||
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
|
||||
(0x9, 0xE, 0xB, 0xD),
|
||||
(0xD, 0x9, 0xE, 0xB),
|
||||
(0xB, 0xD, 0x9, 0xE))
|
||||
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
||||
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
||||
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
||||
@ -241,30 +248,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
||||
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||
|
||||
|
||||
def sub_bytes(data):
|
||||
return [SBOX[x] for x in data]
|
||||
|
||||
|
||||
def sub_bytes_inv(data):
|
||||
return [SBOX_INV[x] for x in data]
|
||||
|
||||
|
||||
def rotate(data):
|
||||
return data[1:] + [data[0]]
|
||||
|
||||
|
||||
def key_schedule_core(data, rcon_iteration):
|
||||
data = rotate(data)
|
||||
data = sub_bytes(data)
|
||||
data[0] = data[0] ^ RCON[rcon_iteration]
|
||||
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def xor(data1, data2):
|
||||
return [x^y for x, y in zip(data1, data2)]
|
||||
return [x ^ y for x, y in zip(data1, data2)]
|
||||
|
||||
|
||||
def rijndael_mul(a, b):
|
||||
if(a==0 or b==0):
|
||||
if(a == 0 or b == 0):
|
||||
return 0
|
||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||
|
||||
|
||||
def mix_column(data, matrix):
|
||||
data_mixed = []
|
||||
for row in range(4):
|
||||
@ -275,33 +289,38 @@ def mix_column(data, matrix):
|
||||
data_mixed.append(mixed)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||
data_mixed = []
|
||||
for i in range(4):
|
||||
column = data[i*4 : (i+1)*4]
|
||||
column = data[i * 4: (i + 1) * 4]
|
||||
data_mixed += mix_column(column, matrix)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns_inv(data):
|
||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||
|
||||
|
||||
def shift_rows(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
||||
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||
return data_shifted
|
||||
|
||||
|
||||
def shift_rows_inv(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
|
||||
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||
return data_shifted
|
||||
|
||||
|
||||
def inc(data):
|
||||
data = data[:] # copy
|
||||
for i in range(len(data)-1,-1,-1):
|
||||
data = data[:] # copy
|
||||
for i in range(len(data) - 1, -1, -1):
|
||||
if data[i] == 255:
|
||||
data[i] = 0
|
||||
else:
|
||||
|
@ -10,47 +10,47 @@ import sys
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
try:
|
||||
import urllib.error as compat_urllib_error
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_error
|
||||
|
||||
try:
|
||||
import urllib.parse as compat_urllib_parse
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib as compat_urllib_parse
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse as compat_urllib_parse_urlparse
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
from urlparse import urlparse as compat_urllib_parse_urlparse
|
||||
|
||||
try:
|
||||
import urllib.parse as compat_urlparse
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urlparse as compat_urlparse
|
||||
|
||||
try:
|
||||
import http.cookiejar as compat_cookiejar
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import cookielib as compat_cookiejar
|
||||
|
||||
try:
|
||||
import html.entities as compat_html_entities
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import htmlentitydefs as compat_html_entities
|
||||
|
||||
try:
|
||||
import html.parser as compat_html_parser
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import HTMLParser as compat_html_parser
|
||||
|
||||
try:
|
||||
import http.client as compat_http_client
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import httplib as compat_http_client
|
||||
|
||||
try:
|
||||
@ -111,7 +111,7 @@ except ImportError:
|
||||
|
||||
try:
|
||||
from urllib.parse import parse_qs as compat_parse_qs
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
|
||||
# Python 2's version is apparently totally broken
|
||||
|
||||
@ -157,12 +157,12 @@ except ImportError: # Python 2
|
||||
return parsed_result
|
||||
|
||||
try:
|
||||
compat_str = unicode # Python 2
|
||||
compat_str = unicode # Python 2
|
||||
except NameError:
|
||||
compat_str = str
|
||||
|
||||
try:
|
||||
compat_chr = unichr # Python 2
|
||||
compat_chr = unichr # Python 2
|
||||
except NameError:
|
||||
compat_chr = chr
|
||||
|
||||
@ -182,8 +182,10 @@ except ImportError: # Python < 3.3
|
||||
|
||||
|
||||
def compat_ord(c):
|
||||
if type(c) is int: return c
|
||||
else: return ord(c)
|
||||
if type(c) is int:
|
||||
return c
|
||||
else:
|
||||
return ord(c)
|
||||
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
@ -254,7 +256,7 @@ else:
|
||||
drive = ''
|
||||
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
||||
|
||||
if i != 1: #~user
|
||||
if i != 1: # ~user
|
||||
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
||||
|
||||
return userhome + path[i:]
|
||||
|
@ -81,7 +81,7 @@ class FileDownloader(object):
|
||||
if total is None:
|
||||
return None
|
||||
dif = now - start
|
||||
if current == 0 or dif < 0.001: # One millisecond
|
||||
if current == 0 or dif < 0.001: # One millisecond
|
||||
return None
|
||||
rate = float(current) / dif
|
||||
return int((float(total) - float(current)) / rate)
|
||||
@ -95,7 +95,7 @@ class FileDownloader(object):
|
||||
@staticmethod
|
||||
def calc_speed(start, now, bytes):
|
||||
dif = now - start
|
||||
if bytes == 0 or dif < 0.001: # One millisecond
|
||||
if bytes == 0 or dif < 0.001: # One millisecond
|
||||
return None
|
||||
return float(bytes) / dif
|
||||
|
||||
@ -108,7 +108,7 @@ class FileDownloader(object):
|
||||
@staticmethod
|
||||
def best_block_size(elapsed_time, bytes):
|
||||
new_min = max(bytes / 2.0, 1.0)
|
||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||
if elapsed_time < 0.001:
|
||||
return int(new_max)
|
||||
rate = bytes / elapsed_time
|
||||
|
@ -55,7 +55,7 @@ class FlvReader(io.BytesIO):
|
||||
if size == 1:
|
||||
real_size = self.read_unsigned_long_long()
|
||||
header_end = 16
|
||||
return real_size, box_type, self.read(real_size-header_end)
|
||||
return real_size, box_type, self.read(real_size - header_end)
|
||||
|
||||
def read_asrt(self):
|
||||
# version
|
||||
@ -180,7 +180,7 @@ def build_fragments_list(boot_info):
|
||||
n_frags = segment_run_entry[1]
|
||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||
first_frag_number = fragment_run_entry_table[0]['first']
|
||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
||||
for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
|
||||
res.append((1, frag_number))
|
||||
return res
|
||||
|
||||
|
@ -101,4 +101,3 @@ class NativeHlsFD(FileDownloader):
|
||||
})
|
||||
self.try_rename(tmpfilename, filename)
|
||||
return True
|
||||
|
||||
|
@ -46,13 +46,13 @@ class RtmpFD(FileDownloader):
|
||||
continue
|
||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
||||
if mobj:
|
||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||
percent = float(mobj.group(2))
|
||||
if not resume_percent:
|
||||
resume_percent = percent
|
||||
resume_downloaded_data_len = downloaded_data_len
|
||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
||||
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
|
||||
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
|
||||
data_len = None
|
||||
if percent > 0:
|
||||
data_len = int(downloaded_data_len * 100 / percent)
|
||||
@ -72,7 +72,7 @@ class RtmpFD(FileDownloader):
|
||||
# no percent for live streams
|
||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||
if mobj:
|
||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||
time_now = time.time()
|
||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||
@ -88,7 +88,7 @@ class RtmpFD(FileDownloader):
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen('')
|
||||
cursor_in_new_line = True
|
||||
self.to_screen('[rtmpdump] '+line)
|
||||
self.to_screen('[rtmpdump] ' + line)
|
||||
proc.wait()
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen('')
|
||||
@ -180,7 +180,7 @@ class RtmpFD(FileDownloader):
|
||||
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('[rtmpdump] %s bytes' % prevsize)
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
|
||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
if prevsize == cursize and retval == RD_FAILED:
|
||||
|
@ -529,4 +529,4 @@ def gen_extractors():
|
||||
|
||||
def get_info_extractor(ie_name):
|
||||
"""Returns the info extractor class with the given ie_name"""
|
||||
return globals()[ie_name+'IE']
|
||||
return globals()[ie_name + 'IE']
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class AdultSwimIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
|
||||
_TEST = {
|
||||
|
@ -1,4 +1,4 @@
|
||||
#coding: utf-8
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
@ -70,11 +70,13 @@ class AppleTrailersIE(InfoExtractor):
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||
|
||||
def fix_html(s):
|
||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||
|
||||
def _clean_json(m):
|
||||
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||
|
@ -192,4 +192,3 @@ class ARDIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ from ..utils import (
|
||||
qualities,
|
||||
)
|
||||
|
||||
# There are different sources of video in arte.tv, the extraction process
|
||||
# There are different sources of video in arte.tv, the extraction process
|
||||
# is different for each one. The videos usually expire in 7 days, so we can't
|
||||
# add tests.
|
||||
|
||||
|
@ -12,17 +12,17 @@ class AudiomackIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack'
|
||||
_TESTS = [
|
||||
#hosted on audiomack
|
||||
# hosted on audiomack
|
||||
{
|
||||
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
|
||||
'info_dict':
|
||||
{
|
||||
'id' : 'roosh-williams/extraordinary',
|
||||
'id': 'roosh-williams/extraordinary',
|
||||
'ext': 'mp3',
|
||||
'title': 'Roosh Williams - Extraordinary'
|
||||
}
|
||||
},
|
||||
#hosted on soundcloud via audiomack
|
||||
# hosted on soundcloud via audiomack
|
||||
{
|
||||
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
||||
'file': '172419696.mp3',
|
||||
@ -49,7 +49,7 @@ class AudiomackIE(InfoExtractor):
|
||||
raise ExtractorError("Unable to deduce api url of song")
|
||||
realurl = api_response["url"]
|
||||
|
||||
#Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||
# - if so, pass the work off to the soundcloud extractor
|
||||
if SoundcloudIE.suitable(realurl):
|
||||
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
|
||||
|
@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
|
||||
_TEST = {
|
||||
'url': 'http://bambuser.com/v/4050584',
|
||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||
# u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||
'info_dict': {
|
||||
'id': '4050584',
|
||||
'ext': 'flv',
|
||||
|
@ -83,12 +83,12 @@ class BandcampIE(InfoExtractor):
|
||||
initial_url = mp3_info['url']
|
||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||
m_url = re.match(re_url, initial_url)
|
||||
#We build the url we will use to get the final track url
|
||||
# We build the url we will use to get the final track url
|
||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
||||
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||
# If we could correctly generate the .rand field the url would be
|
||||
#in the "download_url" key
|
||||
# in the "download_url" key
|
||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||
|
||||
return {
|
||||
|
@ -195,7 +195,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
duration = int(item.get('duration'))
|
||||
|
||||
media_selection = self._download_xml(
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
programme_id, 'Downloading media selection XML')
|
||||
|
||||
for media in self._extract_medias(media_selection):
|
||||
@ -220,4 +220,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ class BeegIE(InfoExtractor):
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
|
||||
|
||||
|
||||
description = self._html_search_regex(
|
||||
r'<meta name="description" content="([^"]*)"',
|
||||
webpage, 'description', fatal=False)
|
||||
|
@ -1,4 +1,4 @@
|
||||
#coding: utf-8
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
@ -112,4 +112,4 @@ class CanalplusIE(InfoExtractor):
|
||||
'like_count': int(infos.find('NB_LIKES').text),
|
||||
'comment_count': int(infos.find('NB_COMMENTS').text),
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -84,4 +84,4 @@ class CBSNewsIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
req.add_header('Referer', url)
|
||||
|
||||
playlist = self._download_xml(req, video_id)
|
||||
|
||||
|
||||
formats = []
|
||||
for i in playlist.find('smilRoot/body'):
|
||||
if 'AD' not in i.attrib['id']:
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class Channel9IE(InfoExtractor):
|
||||
'''
|
||||
Common extractor for channel9.msdn.com.
|
||||
@ -31,7 +32,7 @@ class Channel9IE(InfoExtractor):
|
||||
'session_code': 'KOS002',
|
||||
'session_day': 'Day 1',
|
||||
'session_room': 'Arena 1A',
|
||||
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
|
||||
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -44,7 +45,7 @@ class Channel9IE(InfoExtractor):
|
||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||
'duration': 1540,
|
||||
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||
'authors': [ 'Mike Wilmot' ],
|
||||
'authors': ['Mike Wilmot'],
|
||||
},
|
||||
}
|
||||
]
|
||||
@ -83,7 +84,7 @@ class Channel9IE(InfoExtractor):
|
||||
'format_id': x.group('quality'),
|
||||
'format_note': x.group('note'),
|
||||
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||
'preference': self._known_formats.index(x.group('quality')),
|
||||
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
|
||||
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
||||
@ -202,17 +203,17 @@ class Channel9IE(InfoExtractor):
|
||||
|
||||
if slides is not None:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title + '-Slides', 'url': slides })
|
||||
d.update({'title': title + '-Slides', 'url': slides})
|
||||
result.append(d)
|
||||
|
||||
if zip_ is not None:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title + '-Zip', 'url': zip_ })
|
||||
d.update({'title': title + '-Zip', 'url': zip_})
|
||||
result.append(d)
|
||||
|
||||
if len(formats) > 0:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title, 'formats': formats })
|
||||
d.update({'title': title, 'formats': formats})
|
||||
result.append(d)
|
||||
|
||||
return result
|
||||
@ -270,5 +271,5 @@ class Channel9IE(InfoExtractor):
|
||||
else:
|
||||
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
|
||||
|
||||
else: # Assuming list
|
||||
else: # Assuming list
|
||||
return self._extract_list(content_path)
|
||||
|
@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor):
|
||||
if videolist_url:
|
||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||
formats = []
|
||||
baseurl = vidurl[:vidurl.rfind('/')+1]
|
||||
baseurl = vidurl[:vidurl.rfind('/') + 1]
|
||||
for video in videolist.findall('.//video'):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
|
@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
|
||||
transform_source=fix_xml_ampersands)
|
||||
|
||||
track_doc = pdoc.find('trackList/track')
|
||||
|
||||
def find_param(name):
|
||||
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
||||
if node is not None:
|
||||
|
@ -423,17 +423,18 @@ class InfoExtractor(object):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen('Logging in')
|
||||
|
||||
#Methods for following #608
|
||||
# Methods for following #608
|
||||
@staticmethod
|
||||
def url_result(url, ie=None, video_id=None):
|
||||
"""Returns a url that points to a page that should be processed"""
|
||||
#TODO: ie should be the class used for getting the info
|
||||
# TODO: ie should be the class used for getting the info
|
||||
video_info = {'_type': 'url',
|
||||
'url': url,
|
||||
'ie_key': ie}
|
||||
if video_id is not None:
|
||||
video_info['id'] = video_id
|
||||
return video_info
|
||||
|
||||
@staticmethod
|
||||
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
||||
"""Returns a playlist"""
|
||||
@ -517,7 +518,7 @@ class InfoExtractor(object):
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
|
||||
|
||||
|
||||
return (username, password)
|
||||
|
||||
def _get_tfa_info(self):
|
||||
|
@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url':video_url,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
@ -62,4 +62,4 @@ class CrackedIE(InfoExtractor):
|
||||
'comment_count': comment_count,
|
||||
'height': height,
|
||||
'width': width,
|
||||
}
|
||||
}
|
||||
|
@ -69,11 +69,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
|
||||
def _decrypt_subtitles(self, data, iv, id):
|
||||
data = bytes_to_intlist(data)
|
||||
iv = bytes_to_intlist(iv)
|
||||
@ -99,8 +97,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||
return shaHash + [0] * 12
|
||||
|
||||
key = obfuscate_key(id)
|
||||
|
||||
class Counter:
|
||||
__value = iv
|
||||
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
@ -183,7 +183,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
return output
|
||||
|
||||
def _real_extract(self,url):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
formats = []
|
||||
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||
video_format = fmt+'p'
|
||||
video_format = fmt + 'p'
|
||||
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||
# urlencode doesn't work!
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||
streamdata = self._download_xml(
|
||||
@ -248,8 +248,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
subtitles = {}
|
||||
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
|
||||
video_id, note='Downloading subtitles for '+sub_name)
|
||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
@ -274,14 +274,14 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'thumbnail': video_thumbnail,
|
||||
'uploader': video_uploader,
|
||||
'thumbnail': video_thumbnail,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': video_upload_date,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#coding: utf-8
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
@ -18,6 +18,7 @@ from ..utils import (
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
@staticmethod
|
||||
def _build_request(url):
|
||||
@ -27,6 +28,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
request.add_header('Cookie', 'ff=off')
|
||||
return request
|
||||
|
||||
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
|
@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor):
|
||||
video_id = self._search_regex(
|
||||
r"flashvars.pvg_id=\"(\d+)\";",
|
||||
webpage, 'ID')
|
||||
|
||||
|
||||
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
||||
+ video_id)
|
||||
info = self._download_webpage(json_url, title,
|
||||
'Downloading JSON config')
|
||||
video_url = json.loads(info)['renditions'][0]['url']
|
||||
|
||||
|
||||
return {'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
|
@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||
info = self._download_json(info_url, video_id)
|
||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||
date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -40,7 +40,7 @@ class FC2IE(InfoExtractor):
|
||||
|
||||
info_url = (
|
||||
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
|
||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
|
||||
|
||||
info_webpage = self._download_webpage(
|
||||
info_url, video_id, note='Downloading info page')
|
||||
|
@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor):
|
||||
'duration': int_or_none(duration),
|
||||
'like_count': int_or_none(like_count),
|
||||
'dislike_count': int_or_none(dislike_count),
|
||||
}
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ class FlickrIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '5645318632',
|
||||
'ext': 'mp4',
|
||||
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||
"uploader_id": "forestwander-nature-pictures",
|
||||
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||
"uploader_id": "forestwander-nature-pictures",
|
||||
"title": "Dark Hollow Waterfalls"
|
||||
}
|
||||
}
|
||||
|
@ -92,4 +92,4 @@ class FourTubeIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'age_limit': 18,
|
||||
'webpage_url': webpage_url,
|
||||
}
|
||||
}
|
||||
|
@ -733,7 +733,7 @@ class GenericIE(InfoExtractor):
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
}
|
||||
|
||||
|
||||
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
|
||||
if match:
|
||||
return {
|
||||
@ -748,7 +748,7 @@ class GenericIE(InfoExtractor):
|
||||
# Look for embedded blip.tv player
|
||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||
if mobj:
|
||||
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
|
||||
return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
|
||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
|
||||
if mobj:
|
||||
return self.url_result(mobj.group(1), 'BlipTV')
|
||||
@ -1025,4 +1025,3 @@ class GenericIE(InfoExtractor):
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
|
@ -397,4 +397,4 @@ class GloboIE(InfoExtractor):
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
'formats': formats
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ class GorillaVidIE(InfoExtractor):
|
||||
(?:id="[^"]+"\s+)?
|
||||
value="([^"]*)"
|
||||
''', webpage))
|
||||
|
||||
|
||||
if fields['op'] == 'download1':
|
||||
post = compat_urllib_parse.urlencode(fields)
|
||||
|
||||
|
@ -37,7 +37,7 @@ class HornBunnyIE(InfoExtractor):
|
||||
webpage2 = self._download_webpage(redirect_url, video_id)
|
||||
video_url = self._html_search_regex(
|
||||
r'flvMask:(.*?);', webpage2, 'video_url')
|
||||
|
||||
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
|
||||
webpage, 'duration', fatal=False))
|
||||
|
@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '390161',
|
||||
'ext': 'mp4',
|
||||
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
||||
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
||||
'title': 'How to Tie a Square Knot Properly',
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ class ImdbListIE(InfoExtractor):
|
||||
},
|
||||
'playlist_count': 7,
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
list_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, list_id)
|
||||
|
@ -32,7 +32,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
def _clean_query(query):
|
||||
NEEDED_ARGS = ['publishedid', 'customerid']
|
||||
query_dic = compat_urlparse.parse_qs(query)
|
||||
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
|
||||
cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
|
||||
# Other player ids return m3u8 urls
|
||||
cleaned_dic['playerid'] = '247'
|
||||
cleaned_dic['videokbrate'] = '100000'
|
||||
|
@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor):
|
||||
|
||||
player_url = (
|
||||
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
|
||||
(floor(random()*1073741824), floor(random()*1073741824))
|
||||
(floor(random() * 1073741824), floor(random() * 1073741824))
|
||||
)
|
||||
|
||||
req = compat_urllib_request.Request(player_url)
|
||||
|
@ -102,7 +102,7 @@ class IviIE(InfoExtractor):
|
||||
compilation = result['compilation']
|
||||
title = result['title']
|
||||
|
||||
title = '%s - %s' % (compilation, title) if compilation is not None else title
|
||||
title = '%s - %s' % (compilation, title) if compilation is not None else title
|
||||
|
||||
previews = result['preview']
|
||||
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
|
||||
@ -152,17 +152,17 @@ class IviCompilationIE(InfoExtractor):
|
||||
compilation_id = mobj.group('compilationid')
|
||||
season_id = mobj.group('seasonid')
|
||||
|
||||
if season_id is not None: # Season link
|
||||
if season_id is not None: # Season link
|
||||
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
|
||||
playlist_id = '%s/season%s' % (compilation_id, season_id)
|
||||
playlist_title = self._html_search_meta('title', season_page, 'title')
|
||||
entries = self._extract_entries(season_page, compilation_id)
|
||||
else: # Compilation link
|
||||
else: # Compilation link
|
||||
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
|
||||
playlist_id = compilation_id
|
||||
playlist_title = self._html_search_meta('title', compilation_page, 'title')
|
||||
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
|
||||
if len(seasons) == 0: # No seasons in this compilation
|
||||
if len(seasons) == 0: # No seasons in this compilation
|
||||
entries = self._extract_entries(compilation_page, compilation_id)
|
||||
else:
|
||||
entries = []
|
||||
@ -172,4 +172,4 @@ class IviCompilationIE(InfoExtractor):
|
||||
compilation_id, 'Downloading season %s web page' % season_id)
|
||||
entries.extend(self._extract_entries(season_page, compilation_id))
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||
|
@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
|
||||
'title': title,
|
||||
'description': description,
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ class JeuxVideoIE(InfoExtractor):
|
||||
xml_link = self._html_search_regex(
|
||||
r'<param name="flashvars" value="config=(.*?)" />',
|
||||
webpage, 'config URL')
|
||||
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||
xml_link, 'video ID')
|
||||
@ -38,7 +38,7 @@ class JeuxVideoIE(InfoExtractor):
|
||||
xml_link, title, 'Downloading XML config')
|
||||
info_json = config.find('format.json').text
|
||||
info = json.loads(info_json)['versions'][0]
|
||||
|
||||
|
||||
video_url = 'http://video720.jeuxvideo.com/' + info['file']
|
||||
|
||||
return {
|
||||
|
@ -10,7 +10,7 @@ _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
|
||||
class KankanIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
|
||||
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
|
||||
'file': '48863.flv',
|
||||
|
@ -63,4 +63,4 @@ class KontrTubeIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'view_count': int_or_none(view_count),
|
||||
'comment_count': int_or_none(comment_count),
|
||||
}
|
||||
}
|
||||
|
@ -30,4 +30,3 @@ class Ku6IE(InfoExtractor):
|
||||
'title': title,
|
||||
'url': downloadUrl
|
||||
}
|
||||
|
||||
|
@ -75,4 +75,3 @@ class Laola1TvIE(InfoExtractor):
|
||||
'categories': categories,
|
||||
'ext': 'mp4',
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ class LifeNewsIE(InfoExtractor):
|
||||
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
|
||||
|
||||
upload_date = self._html_search_regex(
|
||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
|
||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
|
||||
if upload_date is not None:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
|
||||
@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
|
||||
if len(videos) == 1:
|
||||
return make_entry(video_id, videos[0])
|
||||
else:
|
||||
return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
|
||||
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
|
||||
|
@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor):
|
||||
'uploader': 'ljfriel2',
|
||||
'title': 'Most unlucky car accident'
|
||||
}
|
||||
},
|
||||
{
|
||||
}, {
|
||||
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
||||
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
||||
'info_dict': {
|
||||
@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor):
|
||||
'uploader': 'ARD_Stinkt',
|
||||
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
||||
}
|
||||
},
|
||||
{
|
||||
}, {
|
||||
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
||||
'md5': '42c6d97d54f1db107958760788c5f48f',
|
||||
'info_dict': {
|
||||
|
@ -109,7 +109,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
'password': password,
|
||||
'remember': 'false',
|
||||
'stayPut': 'false'
|
||||
}
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
|
||||
|
||||
@ -117,7 +117,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
|
||||
if m is not None:
|
||||
response = m.group('json')
|
||||
response_json = json.loads(response)
|
||||
response_json = json.loads(response)
|
||||
state = response_json['state']
|
||||
|
||||
if state == 'notlogged':
|
||||
@ -187,7 +187,7 @@ class LyndaCourseIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
course_path = mobj.group('coursepath')
|
||||
course_id = mobj.group('courseid')
|
||||
|
||||
|
||||
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||
course_id, 'Downloading course JSON')
|
||||
course_json = json.loads(page)
|
||||
@ -221,4 +221,4 @@ class LyndaCourseIE(InfoExtractor):
|
||||
|
||||
course_title = course_json['Title']
|
||||
|
||||
return self.playlist_result(entries, course_id, course_title)
|
||||
return self.playlist_result(entries, course_id, course_title)
|
||||
|
@ -53,4 +53,4 @@ class M6IE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class MalemotionIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
|
||||
_TEST = {
|
||||
|
@ -7,7 +7,7 @@ from .common import InfoExtractor
|
||||
|
||||
class MDRIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
|
||||
|
||||
|
||||
# No tests, MDR regularily deletes its videos
|
||||
_TEST = {
|
||||
'url': 'http://www.mdr.de/fakt/video189002.html',
|
||||
|
@ -22,7 +22,7 @@ class MetacafeIE(InfoExtractor):
|
||||
# Youtube video
|
||||
{
|
||||
'add_ie': ['Youtube'],
|
||||
'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
|
||||
'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
|
||||
'info_dict': {
|
||||
'id': '_aUehQsCQtM',
|
||||
'ext': 'mp4',
|
||||
|
@ -55,4 +55,4 @@ class MojvideoIE(InfoExtractor):
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ class MonikerIE(InfoExtractor):
|
||||
|
||||
title = os.path.splitext(data['fname'])[0]
|
||||
|
||||
#Could be several links with different quality
|
||||
# Could be several links with different quality
|
||||
links = re.findall(r'"file" : "?(.+?)",', webpage)
|
||||
# Assume the links are ordered in quality
|
||||
formats = [{
|
||||
|
@ -111,4 +111,4 @@ class MooshareIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ class MotherlessIE(InfoExtractor):
|
||||
like_count = str_to_int(self._html_search_regex(
|
||||
r'<strong>Favorited</strong>\s+([^<]+)<',
|
||||
webpage, 'like count', fatal=False))
|
||||
|
||||
|
||||
upload_date = self._html_search_regex(
|
||||
r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
|
||||
if 'Ago' in upload_date:
|
||||
|
@ -27,7 +27,7 @@ class MoviezineIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
|
||||
|
||||
formats =[{
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
|
||||
'quality': 0,
|
||||
|
@ -24,4 +24,4 @@ class MovShareIE(NovaMovIE):
|
||||
'title': 'dissapeared image',
|
||||
'description': 'optical illusion dissapeared image magic illusion',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ class MporaIE(InfoExtractor):
|
||||
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
|
||||
False, default=None)
|
||||
vcodec = src['type'].partition('/')[2]
|
||||
|
||||
|
||||
formats.append({
|
||||
'format_id': encoding_id + '-' + vcodec,
|
||||
'url': src['src'],
|
||||
|
@ -60,7 +60,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||
url = response.geturl()
|
||||
# Transform the url to get the best quality:
|
||||
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
|
||||
return [{'url': url,'ext': 'mp4'}]
|
||||
return [{'url': url, 'ext': 'mp4'}]
|
||||
|
||||
def _extract_video_formats(self, mdoc, mtvn_id):
|
||||
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
|
||||
@ -240,15 +240,15 @@ class MTVIE(MTVServicesInfoExtractor):
|
||||
uri = mobj.groupdict().get('mgid')
|
||||
if uri is None:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
|
||||
# Some videos come from Vevo.com
|
||||
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
|
||||
webpage, re.DOTALL)
|
||||
if m_vevo:
|
||||
vevo_id = m_vevo.group(1);
|
||||
vevo_id = m_vevo.group(1)
|
||||
self.to_screen('Vevo video detected: %s' % vevo_id)
|
||||
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
|
||||
|
||||
|
||||
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
|
||||
return self._get_videos_info(uri)
|
||||
|
||||
|
@ -73,4 +73,3 @@ class MuenchenTVIE(InfoExtractor):
|
||||
'is_live': True,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
||||
|
@ -72,4 +72,4 @@ class MusicPlayOnIE(InfoExtractor):
|
||||
'duration': int_or_none(duration),
|
||||
'view_count': int_or_none(view_count),
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ class MuzuTVIE(InfoExtractor):
|
||||
player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
|
||||
video_id, u'Downloading player info')
|
||||
video_info = json.loads(player_info_page)['videos'][0]
|
||||
for quality in ['1080' , '720', '480', '360']:
|
||||
for quality in ['1080', '720', '480', '360']:
|
||||
if video_info.get('v%s' % quality):
|
||||
break
|
||||
|
||||
|
@ -33,7 +33,7 @@ class MyVideoIE(InfoExtractor):
|
||||
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
|
||||
# Released into the Public Domain by Tristan Fischer on 2013-05-19
|
||||
# https://github.com/rg3/youtube-dl/pull/842
|
||||
def __rc4crypt(self,data, key):
|
||||
def __rc4crypt(self, data, key):
|
||||
x = 0
|
||||
box = list(range(256))
|
||||
for i in list(range(256)):
|
||||
@ -49,10 +49,10 @@ class MyVideoIE(InfoExtractor):
|
||||
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
|
||||
return out
|
||||
|
||||
def __md5(self,s):
|
||||
def __md5(self, s):
|
||||
return hashlib.md5(s).hexdigest().encode()
|
||||
|
||||
def _real_extract(self,url):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
@ -173,4 +173,3 @@ class MyVideoIE(InfoExtractor):
|
||||
'play_path': video_playpath,
|
||||
'player_url': video_swfobj,
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ class NaverIE(InfoExtractor):
|
||||
raise ExtractorError('couldn\'t extract vid and key')
|
||||
vid = m_id.group(1)
|
||||
key = m_id.group(2)
|
||||
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
|
||||
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
|
||||
query_urls = compat_urllib_parse.urlencode({
|
||||
'masterVid': vid,
|
||||
'protocol': 'p2p',
|
||||
@ -65,7 +65,7 @@ class NaverIE(InfoExtractor):
|
||||
if domain.startswith('rtmp'):
|
||||
f.update({
|
||||
'ext': 'flv',
|
||||
'rtmp_protocol': '1', # rtmpt
|
||||
'rtmp_protocol': '1', # rtmpt
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
@ -39,7 +39,6 @@ class NBAIE(InfoExtractor):
|
||||
duration = parse_duration(
|
||||
self._html_search_meta('duration', webpage, 'duration', fatal=False))
|
||||
|
||||
|
||||
return {
|
||||
'id': shortened_video_id,
|
||||
'url': video_url,
|
||||
|
@ -91,4 +91,4 @@ class NDRIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -23,12 +23,12 @@ class NewgroundsIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
music_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, music_id)
|
||||
|
||||
|
||||
title = self._html_search_regex(
|
||||
r',"name":"([^"]+)",', webpage, 'music title')
|
||||
uploader = self._html_search_regex(
|
||||
r',"artist":"([^"]+)",', webpage, 'music uploader')
|
||||
|
||||
|
||||
music_url_json_string = self._html_search_regex(
|
||||
r'({"url":"[^"]+"),', webpage, 'music url') + '}'
|
||||
music_url_json = json.loads(music_url_json_string)
|
||||
|
@ -89,4 +89,4 @@ class NewstubeIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -93,4 +93,4 @@ class NFBIE(InfoExtractor):
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ class NHLBaseInfoExtractor(InfoExtractor):
|
||||
path_url, video_id, 'Downloading final video url')
|
||||
video_url = path_doc.find('path').text
|
||||
else:
|
||||
video_url = initial_video_url
|
||||
video_url = initial_video_url
|
||||
|
||||
join = compat_urlparse.urljoin
|
||||
return {
|
||||
|
@ -163,4 +163,4 @@ class NocoIE(InfoExtractor):
|
||||
'uploader_id': uploader_id,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
@ -66,4 +66,4 @@ class NovaMovIE(InfoExtractor):
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'description': description
|
||||
}
|
||||
}
|
||||
|
@ -25,4 +25,4 @@ class NowVideoIE(NovaMovIE):
|
||||
'title': 'youtubedl test video _BaW_jenozKc.mp4',
|
||||
'description': 'Description',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -145,4 +145,4 @@ class NTVIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user