From 723e04d0be85fbdbbbda52512f322331d8fda760 Mon Sep 17 00:00:00 2001 From: Adam Date: Fri, 29 Aug 2014 22:32:03 +0100 Subject: [PATCH 1/5] Add login support to Crunchyroll extractor --- youtube_dl/extractor/crunchyroll.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 026a9177e..7642b868e 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -17,6 +17,7 @@ intlist_to_bytes, unified_strdate, clean_html, + urlencode_postdata, ) from ..aes import ( aes_cbc_decrypt, @@ -51,6 +52,24 @@ class CrunchyrollIE(InfoExtractor): '1080': ('80', '108'), } + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + self.report_login() + login_url = 'https://www.crunchyroll.com/?a=formhandler' + data = urlencode_postdata({ + 'formname': 'RpcApiUser_Login', + 'name': username, + 'password': password, + }) + login_request = compat_urllib_request.Request(login_url, data) + login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') + self._download_webpage(login_request, None, False, 'Wrong login info') + + def _real_initialize(self): + self._login() + def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(data) iv = bytes_to_intlist(iv) From 78272a076e96bea48525e415e42fa4d95d4d66fa Mon Sep 17 00:00:00 2001 From: Adam Date: Sat, 30 Aug 2014 12:48:56 +0100 Subject: [PATCH 2/5] Add SSA (.ass) subtitle output for Crunchyroll --- youtube_dl/extractor/crunchyroll.py | 78 ++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 7642b868e..4903764f7 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -5,6 +5,7 @@ import json import base64 import zlib +import xml.etree.ElementTree from hashlib import sha1 from math import pow, sqrt, floor @@ -67,9 +68,11 @@ def _login(self): login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(login_request, None, False, 'Wrong login info') + def _real_initialize(self): self._login() + def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(data) iv = bytes_to_intlist(iv) @@ -116,6 +119,75 @@ def _convert_subtitles_to_srt(self, subtitles): output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) return output + def _convert_subtitles_to_ass(self, subtitles): + output = '' + + def ass_bool(strvalue): + assvalue = '0' + if strvalue == '1': + assvalue = '-1' + return assvalue + + sub_root = xml.etree.ElementTree.fromstring(subtitles) + if not sub_root: + return output + + output = '[Script Info]\n' + output += 'Title: %s\n' % sub_root.attrib["title"] + output += 'ScriptType: v4.00+\n' + output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"] + output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"] + output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"] + output += """ScaledBorderAndShadow: yes + +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +""" + for style in sub_root.findall('./styles/style'): + output += 'Style: ' + style.attrib["name"] + output += ',' + style.attrib["font_name"] + output += ',' + style.attrib["font_size"] + output += ',' + style.attrib["primary_colour"] + output += ',' + style.attrib["secondary_colour"] + output += ',' + style.attrib["outline_colour"] + output += ',' + style.attrib["back_colour"] + output += ',' + ass_bool(style.attrib["bold"]) + output += ',' + ass_bool(style.attrib["italic"]) + output += ',' + ass_bool(style.attrib["underline"]) + output += ',' + ass_bool(style.attrib["strikeout"]) + output += ',' + style.attrib["scale_x"] + output += ',' + style.attrib["scale_y"] + output += ',' + style.attrib["spacing"] + output += ',' + style.attrib["angle"] + output += ',' + style.attrib["border_style"] + output += ',' + style.attrib["outline"] + output += ',' + style.attrib["shadow"] + output += ',' + style.attrib["alignment"] + output += ',' + style.attrib["margin_l"] + output += ',' + style.attrib["margin_r"] + output += ',' + style.attrib["margin_v"] + output += ',' + style.attrib["encoding"] + output += '\n' + + output += """ +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +""" + for event in sub_root.findall('./events/event'): + output += 'Dialogue: 0' + output += ',' + event.attrib["start"] + output += ',' + event.attrib["end"] + output += ',' + event.attrib["style"] + output += ',' + event.attrib["name"] + output += ',' + event.attrib["margin_l"] + output += ',' + event.attrib["margin_r"] + output += ',' + event.attrib["margin_v"] + output += ',' + event.attrib["effect"] + output += ',' + event.attrib["text"] + output += '\n' + + return output + def _real_extract(self,url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') @@ -177,6 +249,7 @@ def _real_extract(self,url): }) subtitles = {} + sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ video_id, note='Downloading subtitles for '+sub_name) @@ -193,7 +266,10 @@ def _real_extract(self,url): lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) if not lang_code: continue - subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) + if sub_format == 'ass': + subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle) + else: + subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) return { 'id': video_id, From bbc9dc56f663ba8a61ce2d5c8aa3fdde7992fb78 Mon Sep 17 00:00:00 2001 From: peugeot Date: Sat, 30 Aug 2014 20:43:03 +0200 Subject: [PATCH 3/5] Add support for HornBunny --- youtube_dl/extractor/__init__.py | 1 + youtube_dl/extractor/hornbunny.py | 44 +++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 youtube_dl/extractor/hornbunny.py diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 1479d998a..17891fb80 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -134,6 +134,7 @@ from .hark import HarkIE from .helsinki import HelsinkiIE from .hentaistigma import HentaiStigmaIE +from .hornbunny import HornBunnyIE from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE diff --git a/youtube_dl/extractor/hornbunny.py b/youtube_dl/extractor/hornbunny.py new file mode 100644 index 000000000..a42fba0cb --- /dev/null +++ b/youtube_dl/extractor/hornbunny.py @@ -0,0 +1,44 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + +class HornBunnyIE(InfoExtractor): + _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P[a-z-]+)-(?P\d+)\.html' + _TEST = { + 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html', + 'md5': '95e40865aedd08eff60272b704852ad7', + 'info_dict': { + 'id': '5227', + 'ext': 'flv', + 'title': 'panty slut jerk off instruction', + 'duration': 550 + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'class="title">(.*?)', webpage, 'title') + redirect_url = self._html_search_regex(r'pg&settings=(.*?)\|0"\);', webpage, 'title') + webpage2 = self._download_webpage(redirect_url, video_id) + video_url = self._html_search_regex(r'flvMask:(.*?);', webpage2, 'video_url') + + mobj = re.search(r'Runtime: (?P\d+):(?P\d+)', webpage) + duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None + + view_count = self._html_search_regex(r'Views: (\d+)', webpage, 'view count', fatal=False) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'ext': 'flv', + 'duration': duration, + 'view_count': int_or_none(view_count), + } From 3e7c12240c7b23913a8a9df2ded21dfae7624d84 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Sun, 31 Aug 2014 18:10:05 +0200 Subject: [PATCH 4/5] [youtube] Test for like_count and dislike_count (#3633) --- youtube_dl/extractor/youtube.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 13676c49f..bdabcaa27 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -316,6 +316,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): u"upload_date": u"20121002", u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .", u"categories": [u'Science & Technology'], + 'like_count': int, + 'dislike_count': int, } }, { From 55f7bd2dccb9b0ff24074ba8e8dbe6d67ed49c0a Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Sun, 31 Aug 2014 23:26:19 +0200 Subject: [PATCH 5/5] [youtube] Fix category extraction --- youtube_dl/extractor/youtube.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index bdabcaa27..3417c1275 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -786,7 +786,9 @@ def _real_extract(self, url): upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = unified_strdate(upload_date) - m_cat_container = get_element_by_id("eow-category", video_webpage) + m_cat_container = self._search_regex( + r'(?s)]*>\s*Category\s*\s*]*>(.*?)', + video_webpage, 'categories', fatal=False) if m_cat_container: category = self._html_search_regex( r'(?s)(.*?)', m_cat_container, 'category',