Merge remote-tracking branch 'origin/master'

Conflicts:
	youtube_dl/YoutubeDL.py
master
Philipp Hagemeister 11 years ago
commit 7853cc5ae1

@ -94,6 +94,40 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0] downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'format_id'], u'excellent') self.assertEqual(downloaded[u'format_id'], u'excellent')
def test_format_selection(self):
formats = [
{u'format_id': u'35', u'ext': u'mp4'},
{u'format_id': u'45', u'ext': u'webm'},
{u'format_id': u'47', u'ext': u'webm'},
{u'format_id': u'2', u'ext': u'flv'},
]
info_dict = {u'formats': formats, u'extractor': u'test'}
ydl = YDL({'format': u'20/47'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'47')
ydl = YDL({'format': u'20/71/worst'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'35')
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'2')
ydl = YDL({'format': u'webm/mp4'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'47')
ydl = YDL({'format': u'3gp/40/mp4'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'35')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

@ -91,7 +91,7 @@ class YoutubeDL(object):
downloadarchive: File name of a file where all downloads are recorded. downloadarchive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded Videos already present in the file are not downloaded
again. again.
The following parameters are not used by YoutubeDL itself, they are used by The following parameters are not used by YoutubeDL itself, they are used by
the FileDownloader: the FileDownloader:
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
@ -216,10 +216,10 @@ class YoutubeDL(object):
If stderr is a tty file the 'WARNING:' will be colored If stderr is a tty file the 'WARNING:' will be colored
''' '''
if sys.stderr.isatty() and os.name != 'nt': if sys.stderr.isatty() and os.name != 'nt':
_msg_header=u'\033[0;33mWARNING:\033[0m' _msg_header = u'\033[0;33mWARNING:\033[0m'
else: else:
_msg_header=u'WARNING:' _msg_header = u'WARNING:'
warning_message=u'%s %s' % (_msg_header,message) warning_message = u'%s %s' % (_msg_header, message)
self.to_stderr(warning_message) self.to_stderr(warning_message)
def report_error(self, message, tb=None): def report_error(self, message, tb=None):
@ -234,19 +234,6 @@ class YoutubeDL(object):
error_message = u'%s %s' % (_msg_header, message) error_message = u'%s %s' % (_msg_header, message)
self.trouble(error_message, tb) self.trouble(error_message, tb)
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
def report_writedescription(self, descfn): def report_writedescription(self, descfn):
""" Report that the description file is being written """ """ Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: ' + descfn) self.to_screen(u'[info] Writing video description to: ' + descfn)
@ -330,14 +317,14 @@ class YoutubeDL(object):
return (u'%(title)s has already been recorded in archive' return (u'%(title)s has already been recorded in archive'
% info_dict) % info_dict)
return None return None
def extract_info(self, url, download=True, ie_key=None, extra_info={}): def extract_info(self, url, download=True, ie_key=None, extra_info={}):
''' '''
Returns a list with a dictionary for each video we find. Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos. If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result extra_info is a dict containing the extra values to add to each result
''' '''
if ie_key: if ie_key:
ies = [self.get_info_extractor(ie_key)] ies = [self.get_info_extractor(ie_key)]
else: else:
@ -379,7 +366,7 @@ class YoutubeDL(object):
raise raise
else: else:
self.report_error(u'no suitable InfoExtractor: %s' % url) self.report_error(u'no suitable InfoExtractor: %s' % url)
def process_ie_result(self, ie_result, download=True, extra_info={}): def process_ie_result(self, ie_result, download=True, extra_info={}):
""" """
Take the result of the ie(may be modified) and resolve all unresolved Take the result of the ie(may be modified) and resolve all unresolved
@ -403,7 +390,7 @@ class YoutubeDL(object):
elif result_type == 'playlist': elif result_type == 'playlist':
# We process each entry in the playlist # We process each entry in the playlist
playlist = ie_result.get('title', None) or ie_result.get('id', None) playlist = ie_result.get('title', None) or ie_result.get('id', None)
self.to_screen(u'[download] Downloading playlist: %s' % playlist) self.to_screen(u'[download] Downloading playlist: %s' % playlist)
playlist_results = [] playlist_results = []
@ -421,12 +408,12 @@ class YoutubeDL(object):
self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" % self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
(ie_result['extractor'], playlist, n_all_entries, n_entries)) (ie_result['extractor'], playlist, n_all_entries, n_entries))
for i,entry in enumerate(entries,1): for i, entry in enumerate(entries, 1):
self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries)) self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
extra = { extra = {
'playlist': playlist, 'playlist': playlist,
'playlist_index': i + playliststart, 'playlist_index': i + playliststart,
} }
if not 'extractor' in entry: if not 'extractor' in entry:
# We set the extractor, if it's an url it will be set then to # We set the extractor, if it's an url it will be set then to
# the new extractor, but if it's already a video we must make # the new extractor, but if it's already a video we must make
@ -450,6 +437,22 @@ class YoutubeDL(object):
else: else:
raise Exception('Invalid result type: %s' % result_type) raise Exception('Invalid result type: %s' % result_type)
def select_format(self, format_spec, available_formats):
if format_spec == 'best' or format_spec is None:
return available_formats[-1]
elif format_spec == 'worst':
return available_formats[0]
else:
extensions = [u'mp4', u'flv', u'webm', u'3gp']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, available_formats))
if matches:
return matches[-1]
return None
def process_video_result(self, info_dict, download=True): def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video' assert info_dict.get('_type', 'video') == 'video'
@ -460,7 +463,8 @@ class YoutubeDL(object):
# This extractors handle format selection themselves # This extractors handle format selection themselves
if info_dict['extractor'] in [u'youtube', u'Youku', u'YouPorn', u'mixcloud']: if info_dict['extractor'] in [u'youtube', u'Youku', u'YouPorn', u'mixcloud']:
self.process_info(info_dict) if download:
self.process_info(info_dict)
return info_dict return info_dict
# We now pick which formats have to be downloaded # We now pick which formats have to be downloaded
@ -472,17 +476,14 @@ class YoutubeDL(object):
# We check that all the formats have the format and format_id fields # We check that all the formats have the format and format_id fields
for (i, format) in enumerate(formats): for (i, format) in enumerate(formats):
if format.get('format') is None:
if format.get('height') is not None:
if format.get('width') is not None:
format_desc = u'%sx%s' % (format['width'], format['height'])
else:
format_desc = u'%sp' % format['height']
else:
format_desc = '???'
format['format'] = format_desc
if format.get('format_id') is None: if format.get('format_id') is None:
format['format_id'] = compat_str(i) format['format_id'] = compat_str(i)
if format.get('format') is None:
format['format'] = u'{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=u' ({})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
if self.params.get('listformats', None): if self.params.get('listformats', None):
self.list_formats(info_dict) self.list_formats(info_dict)
@ -504,22 +505,20 @@ class YoutubeDL(object):
formats = sorted(formats, key=_free_formats_key) formats = sorted(formats, key=_free_formats_key)
req_format = self.params.get('format', 'best') req_format = self.params.get('format', 'best')
if req_format is None:
req_format = 'best'
formats_to_download = [] formats_to_download = []
if req_format == 'best' or req_format is None:
formats_to_download = [formats[-1]]
elif req_format == 'worst':
formats_to_download = [formats[0]]
# The -1 is for supporting YoutubeIE # The -1 is for supporting YoutubeIE
elif req_format in ('-1', 'all'): if req_format in ('-1', 'all'):
formats_to_download = formats formats_to_download = formats
else: else:
# We can accept formats requestd in the format: 34/10/5, we pick # We can accept formats requestd in the format: 34/5/best, we pick
# the first that is available, starting from left # the first that is available, starting from left
req_formats = req_format.split('/') req_formats = req_format.split('/')
for rf in req_formats: for rf in req_formats:
matches = filter(lambda f:f['format_id'] == rf ,formats) selected_format = self.select_format(rf, formats)
if matches: if selected_format is not None:
formats_to_download = [matches[0]] formats_to_download = [selected_format]
break break
if not formats_to_download: if not formats_to_download:
raise ExtractorError(u'requested format not available') raise ExtractorError(u'requested format not available')
@ -610,20 +609,20 @@ class YoutubeDL(object):
if self.params.get('writeannotations', False): if self.params.get('writeannotations', False):
try: try:
annofn = filename + u'.annotations.xml' annofn = filename + u'.annotations.xml'
self.report_writeannotations(annofn) self.report_writeannotations(annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations']) annofile.write(info_dict['annotations'])
except (KeyError, TypeError): except (KeyError, TypeError):
self.report_warning(u'There are no annotations to write.') self.report_warning(u'There are no annotations to write.')
except (OSError, IOError): except (OSError, IOError):
self.report_error(u'Cannot write annotations file: ' + annofn) self.report_error(u'Cannot write annotations file: ' + annofn)
return return
subtitles_are_requested = any([self.params.get('writesubtitles', False), subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')]) self.params.get('writeautomaticsub')])
if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']: if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
# subtitles download errors are already managed as troubles in relevant IE # subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE # that way it will silently go on when used with unsupporting IE
subtitles = info_dict['subtitles'] subtitles = info_dict['subtitles']
@ -645,7 +644,7 @@ class YoutubeDL(object):
infofn = filename + u'.info.json' infofn = filename + u'.info.json'
self.report_writeinfojson(infofn) self.report_writeinfojson(infofn)
try: try:
json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle']) json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
write_json_file(json_info_dict, encodeFilename(infofn)) write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError): except (OSError, IOError):
self.report_error(u'Cannot write metadata to JSON file ' + infofn) self.report_error(u'Cannot write metadata to JSON file ' + infofn)
@ -715,7 +714,7 @@ class YoutubeDL(object):
keep_video = None keep_video = None
for pp in self._pps: for pp in self._pps:
try: try:
keep_video_wish,new_info = pp.run(info) keep_video_wish, new_info = pp.run(info)
if keep_video_wish is not None: if keep_video_wish is not None:
if keep_video_wish: if keep_video_wish:
keep_video = keep_video_wish keep_video = keep_video_wish
@ -754,16 +753,31 @@ class YoutubeDL(object):
with locked_file(fn, 'a', encoding='utf-8') as archive_file: with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + u'\n') archive_file.write(vid_id + u'\n')
@staticmethod
def format_resolution(format):
if format.get('height') is not None:
if format.get('width') is not None:
res = u'%sx%s' % (format['width'], format['height'])
else:
res = u'%sp' % format['height']
else:
res = '???'
return res
def list_formats(self, info_dict): def list_formats(self, info_dict):
formats_s = [] formats_s = []
for format in info_dict.get('formats', [info_dict]): for format in info_dict.get('formats', [info_dict]):
formats_s.append("%s\t:\t%s\t[%s]" % (format['format_id'], formats_s.append(u'%-15s: %-5s %-15s[%s]' % (
format['ext'], format['format_id'],
format.get('format', '???'), format['ext'],
) format.get('format_note') or '-',
) self.format_resolution(format),
)
)
if len(formats_s) != 1: if len(formats_s) != 1:
formats_s[0] += ' (worst)' formats_s[0] += ' (worst)'
formats_s[-1] += ' (best)' formats_s[-1] += ' (best)'
formats_s = "\n".join(formats_s) formats_s = "\n".join(formats_s)
self.to_screen(u"[info] Available formats for %s:\nformat code\textension\n%s" % (info_dict['id'], formats_s)) self.to_screen(u'[info] Available formats for %s:\n'
u'format code extension note resolution\n%s' % (
info_dict['id'], formats_s))

@ -174,12 +174,27 @@ class ArteTVPlus7IE(InfoExtractor):
# Some formats use the m3u8 protocol # Some formats use the m3u8 protocol
formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats) formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats)
# We order the formats by quality # We order the formats by quality
formats = sorted(formats, key=lambda f: int(f.get('height',-1))) formats = list(formats) # in python3 filter returns an iterator
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
sort_key = lambda f: ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else:
sort_key = lambda f: int(f.get('height',-1))
formats = sorted(formats, key=sort_key)
# Prefer videos without subtitles in the same language # Prefer videos without subtitles in the same language
formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None) formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None)
# Pick the best quality # Pick the best quality
def _format(format_info): def _format(format_info):
quality = format_info['quality']
m_quality = re.match(r'\w*? - (\d*)p', quality)
if m_quality is not None:
quality = m_quality.group(1)
if format_info.get('versionCode') is not None:
format_id = u'%s-%s' % (quality, format_info['versionCode'])
else:
format_id = quality
info = { info = {
'format_id': format_id,
'format_note': format_info.get('versionLibelle'),
'width': format_info.get('width'), 'width': format_info.get('width'),
'height': format_info.get('height'), 'height': format_info.get('height'),
} }
@ -192,8 +207,6 @@ class ArteTVPlus7IE(InfoExtractor):
info['ext'] = determine_ext(info['url']) info['ext'] = determine_ext(info['url'])
return info return info
info_dict['formats'] = [_format(f) for f in formats] info_dict['formats'] = [_format(f) for f in formats]
# TODO: Remove when #980 has been merged
info_dict.update(info_dict['formats'][-1])
return info_dict return info_dict
@ -207,7 +220,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
u'file': u'050489-002.mp4', u'file': u'050489-002.mp4',
u'info_dict': { u'info_dict': {
u'title': u'Agentur Amateur #2 - Corporate Design', u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
}, },
} }

@ -61,9 +61,12 @@ class InfoExtractor(object):
* ext Will be calculated from url if missing * ext Will be calculated from url if missing
* format A human-readable description of the format * format A human-readable description of the format
("mp4 container with h264/opus"). ("mp4 container with h264/opus").
Calculated from width and height if missing. Calculated from the format_id, width, height
and format_note fields if missing.
* format_id A short description of the format * format_id A short description of the format
("mp4_h264_opus" or "19") ("mp4_h264_opus" or "19")
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known * width Width of the video, if known
* height Height of the video, if known * height Height of the video, if known

@ -41,9 +41,9 @@ class GooglePlusIE(InfoExtractor):
# Extract update date # Extract update date
upload_date = self._html_search_regex( upload_date = self._html_search_regex(
r'''(?x)<a.+?class="o-T-s\s[^"]+"\s+style="display:\s*none"\s*> r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''', ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, u'upload date', fatal=False) webpage, u'upload date', fatal=False, flags=re.VERBOSE)
if upload_date: if upload_date:
# Convert timestring to a format suitable for filename # Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")

@ -19,7 +19,7 @@ class InternetVideoArchiveIE(InfoExtractor):
u'info_dict': { u'info_dict': {
u'title': u'SKYFALL', u'title': u'SKYFALL',
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.', u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
u'duration': 156, u'duration': 153,
}, },
} }
@ -74,7 +74,7 @@ class InternetVideoArchiveIE(InfoExtractor):
}) })
formats = sorted(formats, key=lambda f: f['bitrate']) formats = sorted(formats, key=lambda f: f['bitrate'])
info = { return {
'id': video_id, 'id': video_id,
'title': item.find('title').text, 'title': item.find('title').text,
'formats': formats, 'formats': formats,
@ -82,6 +82,3 @@ class InternetVideoArchiveIE(InfoExtractor):
'description': item.find('description').text, 'description': item.find('description').text,
'duration': int(attr['duration']), 'duration': int(attr['duration']),
} }
# TODO: Remove when #980 has been merged
info.update(formats[-1])
return info

@ -90,8 +90,8 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
r'{statusIndex:0,index:0,.*?id:(.*?),'], r'{statusIndex:0,index:0,.*?id:(.*?),'],
webpage, u'category id') webpage, u'category id')
playlist_title = self._html_search_regex( playlist_title = self._html_search_regex(
r'\?catid=%s">(.*?)</a>' % cat_id, r'tab0"[^>]*?>(.*?)</td>',
webpage, u'playlist title', flags=re.DOTALL) webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()
data = compat_urllib_parse.urlencode({ data = compat_urllib_parse.urlencode({
'cid': cat_id, 'cid': cat_id,

@ -16,7 +16,7 @@ class VideoDetectiveIE(InfoExtractor):
u'info_dict': { u'info_dict': {
u'title': u'KICK-ASS 2', u'title': u'KICK-ASS 2',
u'description': u'md5:65ba37ad619165afac7d432eaded6013', u'description': u'md5:65ba37ad619165afac7d432eaded6013',
u'duration': 138, u'duration': 135,
}, },
} }

@ -179,46 +179,45 @@ class VimeoIE(InfoExtractor):
# Vimeo specific: extract video codec and quality information # Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything # First consider quality, then codecs, then take everything
# TODO bind to format param codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
files = { 'hd': [], 'sd': [], 'other': []} files = { 'hd': [], 'sd': [], 'other': []}
config_files = config["video"].get("files") or config["request"].get("files") config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs: for codec_name, codec_extension in codecs:
if codec_name in config_files: for quality in config_files.get(codec_name, []):
if 'hd' in config_files[codec_name]: format_id = '-'.join((codec_name, quality)).lower()
files['hd'].append((codec_name, codec_extension, 'hd')) key = quality if quality in files else 'other'
elif 'sd' in config_files[codec_name]: video_url = None
files['sd'].append((codec_name, codec_extension, 'sd')) if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get('url')
else: else:
files['other'].append((codec_name, codec_extension, config_files[codec_name][0])) file_info = {}
if video_url is None:
for quality in ('hd', 'sd', 'other'): video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
if len(files[quality]) > 0: %(video_id, sig, timestamp, quality, codec_name.upper())
video_quality = files[quality][0][2]
video_codec = files[quality][0][0] files[key].append({
video_extension = files[quality][0][1] 'ext': codec_extension,
self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) 'url': video_url,
break 'format_id': format_id,
else: 'width': file_info.get('width'),
'height': file_info.get('height'),
})
formats = []
for key in ('other', 'sd', 'hd'):
formats += files[key]
if len(formats) == 0:
raise ExtractorError(u'No known codec found') raise ExtractorError(u'No known codec found')
video_url = None
if isinstance(config_files[video_codec], dict):
video_url = config_files[video_codec][video_quality].get("url")
if video_url is None:
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, video_quality, video_codec.upper())
return [{ return [{
'id': video_id, 'id': video_id,
'url': video_url,
'uploader': video_uploader, 'uploader': video_uploader,
'uploader_id': video_uploader_id, 'uploader_id': video_uploader_id,
'upload_date': video_upload_date, 'upload_date': video_upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
'formats': formats,
}] }]

@ -1150,7 +1150,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
list_page = self._download_webpage(list_url, video_id) list_page = self._download_webpage(list_url, video_id)
caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8')) caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8'))
original_lang_node = caption_list.find('track') original_lang_node = caption_list.find('track')
if not original_lang_node or original_lang_node.attrib.get('kind') != 'asr' : if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
self._downloader.report_warning(u'Video doesn\'t have automatic captions') self._downloader.report_warning(u'Video doesn\'t have automatic captions')
return {} return {}
original_lang = original_lang_node.attrib['lang_code'] original_lang = original_lang_node.attrib['lang_code']

Loading…
Cancel
Save