mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-08 09:10:09 +01:00
[VBox7] move into own file
This commit is contained in:
parent
45aef47281
commit
01c10ca26e
@ -55,6 +55,7 @@
|
|||||||
from .extractor.ted import TEDIE
|
from .extractor.ted import TEDIE
|
||||||
from .extractor.tumblr import TumblrIE
|
from .extractor.tumblr import TumblrIE
|
||||||
from .extractor.ustream import UstreamIE
|
from .extractor.ustream import UstreamIE
|
||||||
|
from .extractor.vbox7 import Vbox7IE
|
||||||
from .extractor.vimeo import VimeoIE
|
from .extractor.vimeo import VimeoIE
|
||||||
from .extractor.worldstarhiphop import WorldStarHipHopIE
|
from .extractor.worldstarhiphop import WorldStarHipHopIE
|
||||||
from .extractor.xnxx import XNXXIE
|
from .extractor.xnxx import XNXXIE
|
||||||
@ -413,41 +414,6 @@ def _real_extract(self, url):
|
|||||||
'artist': artist,
|
'artist': artist,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
class Vbox7IE(InfoExtractor):
|
|
||||||
"""Information Extractor for Vbox7"""
|
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
|
|
||||||
|
|
||||||
def _real_extract(self,url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
video_id = mobj.group(1)
|
|
||||||
|
|
||||||
redirect_page, urlh = self._download_webpage_handle(url, video_id)
|
|
||||||
new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
|
|
||||||
redirect_url = urlh.geturl() + new_location
|
|
||||||
webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
|
|
||||||
|
|
||||||
title = self._html_search_regex(r'<title>(.*)</title>',
|
|
||||||
webpage, u'title').split('/')[0].strip()
|
|
||||||
|
|
||||||
ext = "flv"
|
|
||||||
info_url = "http://vbox7.com/play/magare.do"
|
|
||||||
data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
|
|
||||||
info_request = compat_urllib_request.Request(info_url, data)
|
|
||||||
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
|
||||||
info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
|
|
||||||
if info_response is None:
|
|
||||||
raise ExtractorError(u'Unable to extract the media url')
|
|
||||||
(final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
|
|
||||||
|
|
||||||
return [{
|
|
||||||
'id': video_id,
|
|
||||||
'url': final_url,
|
|
||||||
'ext': ext,
|
|
||||||
'title': title,
|
|
||||||
'thumbnail': thumbnail_url,
|
|
||||||
}]
|
|
||||||
|
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
|
46
youtube_dl/extractor/vbox7.py
Normal file
46
youtube_dl/extractor/vbox7.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
|
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Vbox7IE(InfoExtractor):
|
||||||
|
"""Information Extractor for Vbox7"""
|
||||||
|
_VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
|
||||||
|
|
||||||
|
def _real_extract(self,url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||||
|
video_id = mobj.group(1)
|
||||||
|
|
||||||
|
redirect_page, urlh = self._download_webpage_handle(url, video_id)
|
||||||
|
new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
|
||||||
|
redirect_url = urlh.geturl() + new_location
|
||||||
|
webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
|
||||||
|
|
||||||
|
title = self._html_search_regex(r'<title>(.*)</title>',
|
||||||
|
webpage, u'title').split('/')[0].strip()
|
||||||
|
|
||||||
|
ext = "flv"
|
||||||
|
info_url = "http://vbox7.com/play/magare.do"
|
||||||
|
data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
|
||||||
|
info_request = compat_urllib_request.Request(info_url, data)
|
||||||
|
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
|
||||||
|
if info_response is None:
|
||||||
|
raise ExtractorError(u'Unable to extract the media url')
|
||||||
|
(final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
|
||||||
|
|
||||||
|
return [{
|
||||||
|
'id': video_id,
|
||||||
|
'url': final_url,
|
||||||
|
'ext': ext,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail_url,
|
||||||
|
}]
|
Loading…
Reference in New Issue
Block a user