mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-12-13 20:57:14 +01:00
[sohu] Modernize and extract all formats and more metadata (Closes #4409, closes #2056, closes #3009)
This commit is contained in:
parent
39ac7c9435
commit
5ac71f0b27
@ -1,11 +1,10 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError
|
from .common import compat_str
|
||||||
|
|
||||||
|
|
||||||
class SohuIE(InfoExtractor):
|
class SohuIE(InfoExtractor):
|
||||||
@ -29,60 +28,73 @@ class SohuIE(InfoExtractor):
|
|||||||
base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
|
base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
|
||||||
else:
|
else:
|
||||||
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
|
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
|
||||||
data_url = base_data_url + str(vid_id)
|
|
||||||
data_json = self._download_webpage(
|
return self._download_json(
|
||||||
data_url, video_id,
|
base_data_url + vid_id, video_id,
|
||||||
note='Downloading JSON data for ' + str(vid_id))
|
'Downloading JSON data for %s' % vid_id)
|
||||||
return json.loads(data_json)
|
|
||||||
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
mytv = mobj.group('mytv') is not None
|
mytv = mobj.group('mytv') is not None
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
raw_title = self._html_search_regex(r'(?s)<title>(.+?)</title>',
|
raw_title = self._html_search_regex(
|
||||||
webpage, 'video title')
|
r'(?s)<title>(.+?)</title>',
|
||||||
|
webpage, 'video title')
|
||||||
title = raw_title.partition('-')[0].strip()
|
title = raw_title.partition('-')[0].strip()
|
||||||
|
|
||||||
vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage,
|
vid = self._html_search_regex(
|
||||||
'video path')
|
r'var vid ?= ?["\'](\d+)["\']',
|
||||||
data = _fetch_data(vid, mytv)
|
webpage, 'video path')
|
||||||
|
vid_data = _fetch_data(vid, mytv)
|
||||||
|
|
||||||
QUALITIES = ('ori', 'super', 'high', 'nor')
|
formats_json = {}
|
||||||
vid_ids = [data['data'][q + 'Vid']
|
for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
|
||||||
for q in QUALITIES
|
vid_id = vid_data['data'].get('%sVid' % format_id)
|
||||||
if data['data'][q + 'Vid'] != 0]
|
if not vid_id:
|
||||||
if not vid_ids:
|
continue
|
||||||
raise ExtractorError('No formats available for this video')
|
vid_id = compat_str(vid_id)
|
||||||
|
formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
|
||||||
|
|
||||||
# For now, we just pick the highest available quality
|
part_count = vid_data['data']['totalBlocks']
|
||||||
vid_id = vid_ids[-1]
|
|
||||||
|
|
||||||
format_data = data if vid == vid_id else _fetch_data(vid_id, mytv)
|
|
||||||
part_count = format_data['data']['totalBlocks']
|
|
||||||
allot = format_data['allot']
|
|
||||||
prot = format_data['prot']
|
|
||||||
clipsURL = format_data['data']['clipsURL']
|
|
||||||
su = format_data['data']['su']
|
|
||||||
|
|
||||||
playlist = []
|
playlist = []
|
||||||
for i in range(part_count):
|
for i in range(part_count):
|
||||||
part_url = ('http://%s/?prot=%s&file=%s&new=%s' %
|
formats = []
|
||||||
(allot, prot, clipsURL[i], su[i]))
|
for format_id, format_data in formats_json.items():
|
||||||
part_str = self._download_webpage(
|
allot = format_data['allot']
|
||||||
part_url, video_id,
|
prot = format_data['prot']
|
||||||
note='Downloading part %d of %d' % (i + 1, part_count))
|
|
||||||
|
|
||||||
part_info = part_str.split('|')
|
data = format_data['data']
|
||||||
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
clips_url = data['clipsURL']
|
||||||
|
su = data['su']
|
||||||
|
|
||||||
video_info = {
|
part_str = self._download_webpage(
|
||||||
'id': '%s_part%02d' % (video_id, i + 1),
|
'http://%s/?prot=%s&file=%s&new=%s' %
|
||||||
|
(allot, prot, clips_url[i], su[i]),
|
||||||
|
video_id,
|
||||||
|
'Downloading %s video URL part %d of %d'
|
||||||
|
% (format_id, i + 1, part_count))
|
||||||
|
|
||||||
|
part_info = part_str.split('|')
|
||||||
|
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'filesize': data['clipsBytes'][i],
|
||||||
|
'width': data['width'],
|
||||||
|
'height': data['height'],
|
||||||
|
'fps': data['fps'],
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
playlist.append({
|
||||||
|
'id': '%s_part%d' % (video_id, i + 1),
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': video_url,
|
'duration': vid_data['data']['clipsDuration'][i],
|
||||||
'ext': 'mp4',
|
'formats': formats,
|
||||||
}
|
})
|
||||||
playlist.append(video_info)
|
|
||||||
|
|
||||||
if len(playlist) == 1:
|
if len(playlist) == 1:
|
||||||
info = playlist[0]
|
info = playlist[0]
|
||||||
|
Loading…
Reference in New Issue
Block a user