diff --git a/test/parameters.json b/test/parameters.json index 8215d25c5..96998b5c3 100644 --- a/test/parameters.json +++ b/test/parameters.json @@ -29,6 +29,7 @@ "simulate": false, "skip_download": false, "subtitleslang": null, + "subtitlesformat": "srt", "test": true, "updatetime": true, "usenetrc": false, @@ -36,5 +37,8 @@ "verbose": true, "writedescription": false, "writeinfojson": true, - "writesubtitles": false -} \ No newline at end of file + "writesubtitles": false, + "onlysubtitles": false, + "allsubtitles": false, + "listssubtitles": false +} diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py index 5d3566a35..30f2246dd 100644 --- a/test/test_youtube_subtitles.py +++ b/test/test_youtube_subtitles.py @@ -38,20 +38,63 @@ def download(self, x): md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() class TestYoutubeSubtitles(unittest.TestCase): + def setUp(self): + DL = FakeDownloader() + DL.params['allsubtitles'] = False + DL.params['writesubtitles'] = False + DL.params['subtitlesformat'] = 'srt' + DL.params['listsubtitles'] = False + def test_youtube_no_subtitles(self): + DL = FakeDownloader() + DL.params['writesubtitles'] = False + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + subtitles = info_dict[0]['subtitles'] + self.assertEqual(subtitles, None) def test_youtube_subtitles(self): DL = FakeDownloader() DL.params['writesubtitles'] = True IE = YoutubeIE(DL) info_dict = IE.extract('QRS8MkLhQmM') - self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033') - + sub = info_dict[0]['subtitles'][0] + self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260') def test_youtube_subtitles_it(self): DL = FakeDownloader() DL.params['writesubtitles'] = True DL.params['subtitleslang'] = 'it' IE = YoutubeIE(DL) info_dict = IE.extract('QRS8MkLhQmM') - self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a') + sub = info_dict[0]['subtitles'][0] + self.assertEqual(md5(sub[2]), '164a51f16f260476a05b50fe4c2f161d') + def test_youtube_onlysubtitles(self): + DL = FakeDownloader() + DL.params['writesubtitles'] = True + DL.params['onlysubtitles'] = True + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + sub = info_dict[0]['subtitles'][0] + self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260') + def test_youtube_allsubtitles(self): + DL = FakeDownloader() + DL.params['allsubtitles'] = True + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + subtitles = info_dict[0]['subtitles'] + self.assertEqual(len(subtitles), 12) + def test_youtube_subtitles_format(self): + DL = FakeDownloader() + DL.params['writesubtitles'] = True + DL.params['subtitlesformat'] = 'sbv' + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + sub = info_dict[0]['subtitles'][0] + self.assertEqual(md5(sub[2]), '13aeaa0c245a8bed9a451cb643e3ad8b') + def test_youtube_list_subtitles(self): + DL = FakeDownloader() + DL.params['listsubtitles'] = True + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + self.assertEqual(info_dict, None) if __name__ == '__main__': unittest.main() diff --git a/test/tests.json b/test/tests.json index 7af3c2892..fd9d33332 100644 --- a/test/tests.json +++ b/test/tests.json @@ -293,5 +293,20 @@ "info_dict": { "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2" } + }, + { + "name": "Generic", + "url": "http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html", + "file": "13601338388002.mp4", + "md5": "85b90ccc9d73b4acd9138d3af4c27f89" + }, + { + "name": "Spiegel", + "url": "http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html", + "file": "1259285.mp4", + "md5": "2c2754212136f35fb4b19767d242f66e", + "info_dict": { + "title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv" + } } ] diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 68fad11bc..6af2acbee 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -78,7 +78,11 @@ class FileDownloader(object): updatetime: Use the Last-modified header to set output file timestamps. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file - writesubtitles: Write the video subtitles to a .srt file + writesubtitles: Write the video subtitles to a file + onlysubtitles: Downloads only the subtitles of the video + allsubtitles: Downloads all the subtitles of the video + listsubtitles: Lists all available subtitles for the video + subtitlesformat: Subtitle format [sbv/srt] (default=srt) subtitleslang: Language of the subtitles to download test: Download only first bytes to test the downloader. keepvideo: Keep the video file after post-processing @@ -301,9 +305,9 @@ def report_writedescription(self, descfn): """ Report that the description file is being written """ self.to_screen(u'[info] Writing video description to: ' + descfn) - def report_writesubtitles(self, srtfn): + def report_writesubtitles(self, sub_filename): """ Report that the subtitles file is being written """ - self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) + self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename) def report_writeinfojson(self, infofn): """ Report that the metadata file has been written """ @@ -372,8 +376,11 @@ def prepare_filename(self, info_dict): filename = self.params['outtmpl'] % template_dict return filename - except (ValueError, KeyError) as err: - self.trouble(u'ERROR: invalid system charset or erroneous output template') + except KeyError as err: + self.trouble(u'ERROR: Erroneous output template') + return None + except ValueError as err: + self.trouble(u'ERROR: Insufficient system charset ' + repr(preferredencoding())) return None def _match_entry(self, info_dict): @@ -519,14 +526,35 @@ def process_info(self, info_dict): if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE + subtitle = info_dict['subtitles'][0] + (sub_error, sub_lang, sub) = subtitle + sub_format = self.params.get('subtitlesformat') try: - srtfn = filename.rsplit('.', 1)[0] + u'.srt' - self.report_writesubtitles(srtfn) - with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: - srtfile.write(info_dict['subtitles']) + sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + self.report_writesubtitles(sub_filename) + with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: + subfile.write(sub) except (OSError, IOError): self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) return + if self.params.get('onlysubtitles', False): + return + + if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: + subtitles = info_dict['subtitles'] + sub_format = self.params.get('subtitlesformat') + for subtitle in subtitles: + (sub_error, sub_lang, sub) = subtitle + try: + sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + self.report_writesubtitles(sub_filename) + with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: + subfile.write(sub) + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) + return + if self.params.get('onlysubtitles', False): + return if self.params.get('writeinfojson', False): infofn = filename + u'.info.json' diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index e714fa6b0..dd4a776e4 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -48,7 +48,7 @@ class InfoExtractor(object): uploader_id: Nickname or id of the video uploader. location: Physical location of the video. player_url: SWF Player URL (used for rtmpdump). - subtitles: The .srt file contents. + subtitles: The subtitle file contents. urlhandle: [internal] The urlHandle to be used to download the file, like returned by urllib.request.urlopen @@ -126,8 +126,14 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None): def _download_webpage(self, url_or_request, video_id, note=None, errnote=None): """ Returns the data of the page as a string """ urlh = self._request_webpage(url_or_request, video_id, note, errnote) + content_type = urlh.headers.get('Content-Type', '') + m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) + if m: + encoding = m.group(1) + else: + encoding = 'utf-8' webpage_bytes = urlh.read() - return webpage_bytes.decode('utf-8', 'replace') + return webpage_bytes.decode(encoding, 'replace') #Methods for following #608 #They set the correct value of the '_type' key @@ -236,7 +242,16 @@ def report_video_info_webpage_download(self, video_id): def report_video_subtitles_download(self, video_id): """Report attempt to download video info webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) + self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id) + + def report_video_subtitles_request(self, video_id, sub_lang, format): + """Report attempt to download video info webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format)) + + def report_video_subtitles_available(self, video_id, sub_lang_list): + """Report available subtitles.""" + sub_lang = ",".join(list(sub_lang_list.keys())) + self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang)) def report_information_extraction(self, video_id): """Report attempt to extract video information.""" @@ -250,55 +265,63 @@ def report_rtmp_download(self): """Indicate the download will use the RTMP protocol.""" self._downloader.to_screen(u'[youtube] RTMP download detected') - def _closed_captions_xml_to_srt(self, xml_string): - srt = '' - texts = re.findall(r'([^<]+)', xml_string, re.MULTILINE) - # TODO parse xml instead of regex - for n, (start, dur_tag, dur, caption) in enumerate(texts): - if not dur: dur = '4' - start = float(start) - end = start + float(dur) - start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) - end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) - caption = unescapeHTML(caption) - caption = unescapeHTML(caption) # double cycle, intentional - srt += str(n+1) + '\n' - srt += start + ' --> ' + end + '\n' - srt += caption + '\n\n' - return srt - - def _extract_subtitles(self, video_id): + def _get_available_subtitles(self, video_id): self.report_video_subtitles_download(video_id) request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) try: - srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') + sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) - srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) - srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) - if not srt_lang_list: - return (u'WARNING: video has no closed captions', None) - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = list(srt_lang_list.keys())[0] - if not srt_lang in srt_lang_list: - return (u'WARNING: no closed captions found in the specified language', None) + sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) + sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) + if not sub_lang_list: + return (u'WARNING: video doesn\'t have subtitles', None) + return sub_lang_list + + def _list_available_subtitles(self, video_id): + sub_lang_list = self._get_available_subtitles(video_id) + self.report_video_subtitles_available(video_id, sub_lang_list) + + def _request_subtitle(self, sub_lang, sub_name, video_id, format): + self.report_video_subtitles_request(video_id, sub_lang, format) params = compat_urllib_parse.urlencode({ - 'lang': srt_lang, - 'name': srt_lang_list[srt_lang].encode('utf-8'), + 'lang': sub_lang, + 'name': sub_name, 'v': video_id, + 'fmt': format, }) url = 'http://www.youtube.com/api/timedtext?' + params try: - srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8') + sub = compat_urllib_request.urlopen(url).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) - if not srt_xml: + if not sub: return (u'WARNING: Did not fetch video subtitles', None) - return (None, self._closed_captions_xml_to_srt(srt_xml)) + return (None, sub_lang, sub) + + def _extract_subtitle(self, video_id): + sub_lang_list = self._get_available_subtitles(video_id) + sub_format = self._downloader.params.get('subtitlesformat') + if self._downloader.params.get('subtitleslang', False): + sub_lang = self._downloader.params.get('subtitleslang') + elif 'en' in sub_lang_list: + sub_lang = 'en' + else: + sub_lang = list(sub_lang_list.keys())[0] + if not sub_lang in sub_lang_list: + return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None) + + subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) + return [subtitle] + + def _extract_all_subtitles(self, video_id): + sub_lang_list = self._get_available_subtitles(video_id) + sub_format = self._downloader.params.get('subtitlesformat') + subtitles = [] + for sub_lang in sub_lang_list: + subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) + subtitles.append(subtitle) + return subtitles def _print_formats(self, formats): print('Available formats:') @@ -519,12 +542,26 @@ def _real_extract(self, url): else: video_description = '' - # closed captions + # subtitles video_subtitles = None + if self._downloader.params.get('writesubtitles', False): - (srt_error, video_subtitles) = self._extract_subtitles(video_id) - if srt_error: - self._downloader.trouble(srt_error) + video_subtitles = self._extract_subtitle(video_id) + if video_subtitles: + (sub_error, sub_lang, sub) = video_subtitles[0] + if sub_error: + self._downloader.trouble(sub_error) + + if self._downloader.params.get('allsubtitles', False): + video_subtitles = self._extract_all_subtitles(video_id) + for video_subtitle in video_subtitles: + (sub_error, sub_lang, sub) = video_subtitle + if sub_error: + self._downloader.trouble(sub_error) + + if self._downloader.params.get('listsubtitles', False): + sub_lang_list = self._list_available_subtitles(video_id) + return if 'length_seconds' not in video_info: self._downloader.trouble(u'WARNING: unable to extract video duration') @@ -1299,7 +1336,8 @@ def __init__(self, downloader=None): def report_download_webpage(self, video_id): """Report webpage download.""" - self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') + if not self._downloader.params.get('test', False): + self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) def report_extraction(self, video_id): @@ -1369,13 +1407,8 @@ def _real_extract(self, url): if new_url: return [self.url_result(new_url)] video_id = url.split('/')[-1] - request = compat_urllib_request.Request(url) try: - self.report_download_webpage(video_id) - webpage = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) except ValueError as err: # since this is the last-resort InfoExtractor, if # this error is thrown, it'll be thrown here @@ -2576,7 +2609,7 @@ def _real_extract(self, url): 'uploader': showName, 'upload_date': None, 'title': showName, - 'ext': 'flv', + 'ext': 'mp4', 'thumbnail': imgUrl, 'description': description, 'player_url': playerUrl, @@ -3972,11 +4005,11 @@ def _real_extract(self, url): webpage = self._download_webpage(url, video_id) m = re.search(r'[\s\n]+

(?P\w+)

', webpage) - uploader = unescapeHTML(m.group('uploader')) + m = re.search(r'
[\S\s]+?

(?P.+?)

', webpage) + uploader = clean_html(m.group('uploader')) info = { - 'id':video_id, - 'url':video_url, + 'id': video_id, + 'url': video_url, 'ext': 'mp4', 'title': title, 'thumbnail': thumbnail, @@ -4113,6 +4146,40 @@ def _real_extract(self, url): } return [info] +class SpiegelIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P[0-9]+)(?:\.html)?$' + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('videoID') + + webpage = self._download_webpage(url, video_id) + m = re.search(r'
(.*?)
', webpage) + if not m: + raise ExtractorError(u'Cannot find title') + video_title = unescapeHTML(m.group(1)) + + xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml' + xml_code = self._download_webpage(xml_url, video_id, + note=u'Downloading XML', errnote=u'Failed to download XML') + + idoc = xml.etree.ElementTree.fromstring(xml_code) + last_type = idoc[-1] + filename = last_type.findall('./filename')[0].text + duration = float(last_type.findall('./duration')[0].text) + + video_url = 'http://video2.spiegel.de/flash/' + filename + video_ext = filename.rpartition('.')[2] + info = { + 'id': video_id, + 'url': video_url, + 'ext': video_ext, + 'title': video_title, + 'duration': duration, + } + return [info] + + def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. @@ -4161,6 +4228,7 @@ def gen_extractors(): KeekIE(), TEDIE(), MySpassIE(), + SpiegelIE(), GenericIE() ] diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 3983e2f0e..807b73541 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -173,12 +173,24 @@ def _find_term_columns(): action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') video_format.add_option('-F', '--list-formats', action='store_true', dest='listformats', help='list all available formats (currently youtube only)') - video_format.add_option('--write-srt', + video_format.add_option('--write-sub', '--write-srt', action='store_true', dest='writesubtitles', - help='write video closed captions to a .srt file (currently youtube only)', default=False) - video_format.add_option('--srt-lang', + help='write subtitle file (currently youtube only)', default=False) + video_format.add_option('--only-sub', + action='store_true', dest='onlysubtitles', + help='downloads only the subtitles (no video)', default=False) + video_format.add_option('--all-subs', + action='store_true', dest='allsubtitles', + help='downloads all the available subtitles of the video (currently youtube only)', default=False) + video_format.add_option('--list-subs', + action='store_true', dest='listsubtitles', + help='lists all available subtitles for the video (currently youtube only)', default=False) + video_format.add_option('--sub-format', + action='store', dest='subtitlesformat', metavar='LANG', + help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt') + video_format.add_option('--sub-lang', '--srt-lang', action='store', dest='subtitleslang', metavar='LANG', - help='language of the closed captions to download (optional) use IETF language tags like \'en\'') + help='language of the subtitles to download (optional) use IETF language tags like \'en\'') verbosity.add_option('-q', '--quiet', action='store_true', dest='quiet', help='activates quiet mode', default=False) @@ -274,12 +286,20 @@ def _find_term_columns(): xdg_config_home = os.environ.get('XDG_CONFIG_HOME') if xdg_config_home: - userConf = os.path.join(xdg_config_home, 'youtube-dl.conf') + userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') else: - userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') - argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:] + userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') + systemConf = _readOptions('/etc/youtube-dl.conf') + userConf = _readOptions(userConfFile) + commandLineConf = sys.argv[1:] + argv = systemConf + userConf + commandLineConf opts, args = parser.parse_args(argv) + if opts.verbose: + print(u'[debug] System config: ' + repr(systemConf)) + print(u'[debug] User config: ' + repr(userConf)) + print(u'[debug] Command-line args: ' + repr(commandLineConf)) + return parser, opts, args def _real_main(): @@ -450,6 +470,10 @@ def _real_main(): 'writedescription': opts.writedescription, 'writeinfojson': opts.writeinfojson, 'writesubtitles': opts.writesubtitles, + 'onlysubtitles': opts.onlysubtitles, + 'allsubtitles': opts.allsubtitles, + 'listsubtitles': opts.listsubtitles, + 'subtitlesformat': opts.subtitlesformat, 'subtitleslang': opts.subtitleslang, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle),