mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-12-12 14:26:49 +01:00
[viki] Convert to new subtitles system
This commit is contained in:
parent
afbdd3acc3
commit
4f7cea6c53
@ -23,6 +23,7 @@
|
||||
ComedyCentralIE,
|
||||
NRKTVIE,
|
||||
RaiIE,
|
||||
VikiIE,
|
||||
)
|
||||
|
||||
|
||||
@ -372,5 +373,17 @@ def test_allsubtitles(self):
|
||||
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
|
||||
|
||||
|
||||
class TestVikiSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
||||
IE = VikiIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), 'b0b781eeb45efd3f6398a925b259150b')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -2,16 +2,17 @@
|
||||
|
||||
import re
|
||||
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
US_RATINGS,
|
||||
)
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class VikiIE(SubtitlesInfoExtractor):
|
||||
class VikiIE(InfoExtractor):
|
||||
IE_NAME = 'viki'
|
||||
|
||||
_VALID_URL = r'^https?://(?:www\.)?viki\.com/videos/(?P<id>[0-9]+v)'
|
||||
@ -69,9 +70,6 @@ def _real_extract(self, url):
|
||||
|
||||
# subtitles
|
||||
video_subtitles = self.extract_subtitles(video_id, info_webpage)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, info_webpage)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -85,12 +83,15 @@ def _real_extract(self, url):
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
def _get_available_subtitles(self, video_id, info_webpage):
|
||||
def _get_subtitles(self, video_id, info_webpage):
|
||||
res = {}
|
||||
for sturl_html in re.findall(r'<track src="([^"]+)"/>', info_webpage):
|
||||
for sturl_html in re.findall(r'<track src="([^"]+)"', info_webpage):
|
||||
sturl = unescapeHTML(sturl_html)
|
||||
m = re.search(r'/(?P<lang>[a-z]+)\.vtt', sturl)
|
||||
if not m:
|
||||
continue
|
||||
res[m.group('lang')] = sturl
|
||||
res[m.group('lang')] = [{
|
||||
'url': compat_urlparse.urljoin('http://www.viki.com', sturl),
|
||||
'ext': 'vtt',
|
||||
}]
|
||||
return res
|
||||
|
Loading…
Reference in New Issue
Block a user