Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/yt-dlp/yt-dlp into ytdlp
Browse files Browse the repository at this point in the history
* 'master' of https://github.com/yt-dlp/yt-dlp:
  Improve error handling of bad config files
  [cleanup] Deprecate `YoutubeDL.parse_outtmpl`
  [cleanup, utils] Don't use kwargs for `format_field`
  • Loading branch information
Lesmiscore committed Jun 18, 2022
2 parents dc7cc81 + 44a6fcf commit 0612117
Show file tree
Hide file tree
Showing 27 changed files with 60 additions and 56 deletions.
34 changes: 16 additions & 18 deletions yt_dlp/YoutubeDL.py
Expand Up @@ -724,7 +724,7 @@ def check_deprecated(param, option, suggestion):
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True

self.outtmpl_dict = self.parse_outtmpl()
self._parse_outtmpl()

# Creating format selector here allows us to catch syntax errors before the extraction
self.format_selector = (
Expand Down Expand Up @@ -1054,21 +1054,19 @@ def raise_no_formats(self, info, forced=False, *, msg=None):
self.report_warning(msg)

def parse_outtmpl(self):
outtmpl_dict = self.params.get('outtmpl', {})
if not isinstance(outtmpl_dict, dict):
outtmpl_dict = {'default': outtmpl_dict}
# Remove spaces in the default template
if self.params.get('restrictfilenames'):
self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
self._parse_outtmpl()
return self.params['outtmpl']

def _parse_outtmpl(self):
sanitize = lambda x: x
if self.params.get('restrictfilenames'): # Remove spaces in the default template
sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
else:
sanitize = lambda x: x
outtmpl_dict.update({
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
if outtmpl_dict.get(k) is None})
for _, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning('Parameter outtmpl is bytes, but should be a unicode string')
return outtmpl_dict

outtmpl = self.params.setdefault('outtmpl', {})
if not isinstance(outtmpl, dict):
self.params['outtmpl'] = outtmpl = {'default': outtmpl}
outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})

def get_output_path(self, dir_type='', filename=None):
paths = self.params.get('paths', {})
Expand Down Expand Up @@ -1309,7 +1307,7 @@ def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
if outtmpl is None:
outtmpl = self.outtmpl_dict.get(tmpl_type or 'default', self.outtmpl_dict['default'])
outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
try:
outtmpl = self._outtmpl_expandpath(outtmpl)
filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
Expand Down Expand Up @@ -1955,7 +1953,7 @@ def can_merge():
and (
not can_merge()
or info_dict.get('is_live') and not self.params.get('live_from_start')
or self.outtmpl_dict['default'] == '-'))
or self.params['outtmpl']['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
Expand Down Expand Up @@ -3391,7 +3389,7 @@ def wrapper(*args, **kwargs):
def download(self, url_list):
"""Download a given list of URLs."""
url_list = variadic(url_list) # Passing a single URL is a common mistake
outtmpl = self.outtmpl_dict['default']
outtmpl = self.params['outtmpl']['default']
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/arnes.py
Expand Up @@ -90,7 +90,7 @@ def _real_extract(self, url):
'timestamp': parse_iso8601(video.get('creationTime')),
'channel': channel.get('name'),
'channel_id': channel_id,
'channel_url': format_field(channel_id, template=f'{self._BASE_URL}/?channel=%s'),
'channel_url': format_field(channel_id, None, f'{self._BASE_URL}/?channel=%s'),
'duration': float_or_none(video.get('duration'), 1000),
'view_count': int_or_none(video.get('views')),
'tags': video.get('hashtags'),
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/awaan.py
Expand Up @@ -41,7 +41,7 @@ def _parse_video_data(self, video_data, video_id, is_live):
'id': video_id,
'title': title,
'description': video_data.get('description_en') or video_data.get('description_ar'),
'thumbnail': format_field(img, template='http://admin.mangomolo.com/analytics/%s'),
'thumbnail': format_field(img, None, 'http://admin.mangomolo.com/analytics/%s'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': parse_iso8601(video_data.get('create_time'), ' '),
'is_live': is_live,
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/common.py
Expand Up @@ -1106,7 +1106,7 @@ def _download_webpage(
self._sleep(timeout, video_id)

def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
idstr = format_field(video_id, template='%s: ')
idstr = format_field(video_id, None, '%s: ')
msg = f'[{self.IE_NAME}] {idstr}{msg}'
if only_once:
if f'WARNING: {msg}' in self._printed_messages:
Expand Down Expand Up @@ -1152,7 +1152,7 @@ def raise_login_required(
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
self.report_warning(msg)
return
msg += format_field(self._login_hint(method), template='. %s')
msg += format_field(self._login_hint(method), None, '. %s')
raise ExtractorError(msg, expected=True)

def raise_geo_restricted(
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/flickr.py
Expand Up @@ -94,7 +94,7 @@ def _real_extract(self, url):
owner = video_info.get('owner', {})
uploader_id = owner.get('nsid')
uploader_path = owner.get('path_alias') or uploader_id
uploader_url = format_field(uploader_path, template='https://www.flickr.com/photos/%s/')
uploader_url = format_field(uploader_path, None, 'https://www.flickr.com/photos/%s/')

return {
'id': video_id,
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/instagram.py
Expand Up @@ -410,7 +410,7 @@ def _real_extract(self, url):
if nodes:
return self.playlist_result(
self._extract_nodes(nodes, True), video_id,
format_field(username, template='Post by %s'), description)
format_field(username, None, 'Post by %s'), description)

video_url = self._og_search_video_url(webpage, secure=False)

Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/iqiyi.py
Expand Up @@ -610,7 +610,7 @@ def _real_extract(self, url):
preview_time = traverse_obj(
initial_format_data, ('boss_ts', (None, 'data'), ('previewTime', 'rtime')), expected_type=float_or_none, get_all=False)
if traverse_obj(initial_format_data, ('boss_ts', 'data', 'prv'), expected_type=int_or_none):
self.report_warning('This preview video is limited%s' % format_field(preview_time, template=' to %s seconds'))
self.report_warning('This preview video is limited%s' % format_field(preview_time, None, ' to %s seconds'))

# TODO: Extract audio-only formats
for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none, default=[])):
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/joj.py
Expand Up @@ -70,7 +70,7 @@ def _real_extract(self, url):
r'(\d+)[pP]\.', format_url, 'height', default=None)
formats.append({
'url': format_url,
'format_id': format_field(height, template='%sp'),
'format_id': format_field(height, None, '%sp'),
'height': int(height),
})
if not formats:
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/keezmovies.py
Expand Up @@ -68,7 +68,7 @@ def extract_format(format_url, height=None):
video_url, title, 32).decode('utf-8')
formats.append({
'url': format_url,
'format_id': format_field(height, template='%dp'),
'format_id': format_field(height, None, '%dp'),
'height': height,
'tbr': tbr,
})
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/lastfm.py
Expand Up @@ -15,7 +15,7 @@ def _entries(self, url, playlist_id):
for page_number in range(start_page_number, (last_page_number or start_page_number) + 1):
webpage = self._download_webpage(
url, playlist_id,
note='Downloading page %d%s' % (page_number, format_field(last_page_number, template=' of %d')),
note='Downloading page %d%s' % (page_number, format_field(last_page_number, None, ' of %d')),
query={'page': page_number})
page_entries = [
self.url_result(player_url, 'Youtube')
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/line.py
Expand Up @@ -34,7 +34,7 @@ def _parse_broadcast_item(self, item):
'timestamp': int_or_none(item.get('createdAt')),
'channel': channel.get('name'),
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://live.line.me/channels/%s'),
'channel_url': format_field(channel_id, None, 'https://live.line.me/channels/%s'),
'duration': int_or_none(item.get('archiveDuration')),
'view_count': int_or_none(item.get('viewerCount')),
'comment_count': int_or_none(item.get('chatCount')),
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/medaltv.py
Expand Up @@ -116,7 +116,7 @@ def add_item(container, item_url, height, id_key='format_id', item_id=None):
author = try_get(
hydration_data, lambda x: list(x['profiles'].values())[0], dict) or {}
author_id = str_or_none(author.get('id'))
author_url = format_field(author_id, template='https://medal.tv/users/%s')
author_url = format_field(author_id, None, 'https://medal.tv/users/%s')

return {
'id': video_id,
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/minds.py
Expand Up @@ -118,7 +118,7 @@ def _real_extract(self, url):
'timestamp': int_or_none(entity.get('time_created')),
'uploader': strip_or_none(owner.get('name')),
'uploader_id': uploader_id,
'uploader_url': format_field(uploader_id, template='https://www.minds.com/%s'),
'uploader_url': format_field(uploader_id, None, 'https://www.minds.com/%s'),
'view_count': int_or_none(entity.get('play:count')),
'like_count': int_or_none(entity.get('thumbs:up:count')),
'dislike_count': int_or_none(entity.get('thumbs:down:count')),
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/pornhub.py
Expand Up @@ -429,7 +429,7 @@ def add_format(format_url, height=None):
default=None))
formats.append({
'url': format_url,
'format_id': format_field(height, template='%dp'),
'format_id': format_field(height, None, '%dp'),
'height': height,
})

Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/radlive.py
Expand Up @@ -80,7 +80,7 @@ def _real_extract(self, url):
'release_timestamp': release_date,
'channel': channel.get('name'),
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://rad.live/content/channel/%s'),
'channel_url': format_field(channel_id, None, 'https://rad.live/content/channel/%s'),

}
if content_type == 'episode':
Expand Down
6 changes: 3 additions & 3 deletions yt_dlp/extractor/rokfin.py
Expand Up @@ -146,7 +146,7 @@ def _get_comments(self, video_id):
for page_n in itertools.count():
raw_comments = self._download_json(
f'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50',
video_id, note=f'Downloading viewer comments page {page_n + 1}{format_field(pages_total, template=" of %s")}',
video_id, note=f'Downloading viewer comments page {page_n + 1}{format_field(pages_total, None, " of %s")}',
fatal=False) or {}

for comment in raw_comments.get('content') or []:
Expand Down Expand Up @@ -318,7 +318,7 @@ def _entries(self, channel_id, channel_name, tab):
data_url = f'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}'
metadata = self._download_json(
data_url, channel_name,
note=f'Downloading video metadata page {page_n + 1}{format_field(pages_total, template=" of %s")}')
note=f'Downloading video metadata page {page_n + 1}{format_field(pages_total, None, " of %s")}')

yield from self._get_video_data(metadata)
pages_total = int_or_none(metadata.get('totalPages')) or None
Expand Down Expand Up @@ -369,7 +369,7 @@ def _search_results(self, query):
for page_number in itertools.count(1):
search_results = self._run_search_query(
query, data={'query': query, 'page': {'size': 100, 'current': page_number}},
note=f'Downloading page {page_number}{format_field(total_pages, template=" of ~%s")}')
note=f'Downloading page {page_number}{format_field(total_pages, None, " of ~%s")}')
total_pages = traverse_obj(search_results, ('meta', 'page', 'total_pages'), expected_type=int_or_none)

for result in search_results.get('results') or []:
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/storyfire.py
Expand Up @@ -44,7 +44,7 @@ def _parse_video(self, video):
'timestamp': int_or_none(video.get('publishDate')),
'uploader': video.get('username'),
'uploader_id': uploader_id,
'uploader_url': format_field(uploader_id, template='https://storyfire.com/user/%s/video'),
'uploader_url': format_field(uploader_id, None, 'https://storyfire.com/user/%s/video'),
'episode_number': int_or_none(video.get('episodeNumber') or video.get('episode_number')),
}

Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/trovo.py
Expand Up @@ -38,7 +38,7 @@ def _extract_streamer_info(self, data):
return {
'uploader': streamer_info.get('nickName'),
'uploader_id': str_or_none(streamer_info.get('uid')),
'uploader_url': format_field(username, template='https://trovo.live/%s'),
'uploader_url': format_field(username, None, 'https://trovo.live/%s'),
}


Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/twitter.py
Expand Up @@ -470,7 +470,7 @@ def _real_extract(self, url):
'uploader': uploader,
'timestamp': unified_timestamp(status.get('created_at')),
'uploader_id': uploader_id,
'uploader_url': format_field(uploader_id, template='https://twitter.com/%s'),
'uploader_url': format_field(uploader_id, None, 'https://twitter.com/%s'),
'like_count': int_or_none(status.get('favorite_count')),
'repost_count': int_or_none(status.get('retweet_count')),
'comment_count': int_or_none(status.get('reply_count')),
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/vidio.py
Expand Up @@ -152,7 +152,7 @@ def _real_extract(self, url):
'uploader': user.get('name'),
'timestamp': parse_iso8601(video.get('created_at')),
'uploader_id': username,
'uploader_url': format_field(username, template='https://www.vidio.com/@%s'),
'uploader_url': format_field(username, None, 'https://www.vidio.com/@%s'),
'channel': channel.get('name'),
'channel_id': str_or_none(channel.get('id')),
'view_count': get_count('view_count'),
Expand Down Expand Up @@ -283,5 +283,5 @@ def _real_extract(self, url):
'uploader': user.get('name'),
'timestamp': parse_iso8601(stream_meta.get('start_time')),
'uploader_id': username,
'uploader_url': format_field(username, template='https://www.vidio.com/@%s'),
'uploader_url': format_field(username, None, 'https://www.vidio.com/@%s'),
}
2 changes: 1 addition & 1 deletion yt_dlp/extractor/vidlii.py
Expand Up @@ -100,7 +100,7 @@ def _real_extract(self, url):
uploader = self._search_regex(
r'<div[^>]+class=["\']wt_person[^>]+>\s*<a[^>]+\bhref=["\']/user/[^>]+>([^<]+)',
webpage, 'uploader', fatal=False)
uploader_url = format_field(uploader, template='https://www.vidlii.com/user/%s')
uploader_url = format_field(uploader, None, 'https://www.vidlii.com/user/%s')

upload_date = unified_strdate(self._html_search_meta(
'datePublished', webpage, default=None) or self._search_regex(
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/vine.py
Expand Up @@ -89,7 +89,7 @@ def video_url(kind):

username = data.get('username')

alt_title = format_field(username, template='Vine by %s')
alt_title = format_field(username, None, 'Vine by %s')

return {
'id': video_id,
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/younow.py
Expand Up @@ -91,7 +91,7 @@ def _extract_moment(item, fatal=True):

uploader = try_get(item, lambda x: x['owner']['name'], compat_str)
uploader_id = try_get(item, lambda x: x['owner']['userId'])
uploader_url = format_field(uploader, template='https://www.younow.com/%s')
uploader_url = format_field(uploader, None, 'https://www.younow.com/%s')

entry = {
'extractor_key': 'YouNowMoment',
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/youtube.py
Expand Up @@ -3643,7 +3643,7 @@ def feed_entry(name):
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'channel_url': format_field(channel_id, None, 'https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
Expand Down Expand Up @@ -3717,7 +3717,7 @@ def process_language(container, base_url, lang_code, sub_name, query):
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
trans_name += format_field(lang_name, None, ' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/zhihu.py
Expand Up @@ -58,7 +58,7 @@ def _real_extract(self, url):
'uploader': author.get('name'),
'timestamp': int_or_none(zvideo.get('published_at')),
'uploader_id': author.get('id'),
'uploader_url': format_field(url_token, template='https://www.zhihu.com/people/%s'),
'uploader_url': format_field(url_token, None, 'https://www.zhihu.com/people/%s'),
'duration': float_or_none(video.get('duration')),
'view_count': int_or_none(zvideo.get('play_count')),
'like_count': int_or_none(zvideo.get('liked_count')),
Expand Down
16 changes: 10 additions & 6 deletions yt_dlp/options.py
Expand Up @@ -96,12 +96,16 @@ def load_configs():

opts = optparse.Values({'verbose': True, 'print_help': False})
try:
if overrideArguments:
root.append_config(overrideArguments, label='Override')
else:
root.append_config(sys.argv[1:], label='Command-line')
try:
if overrideArguments:
root.append_config(overrideArguments, label='Override')
else:
root.append_config(sys.argv[1:], label='Command-line')
loaded_all_configs = all(load_configs())
except ValueError as err:
raise root.parser.error(err)

if all(load_configs()):
if loaded_all_configs:
# If ignoreconfig is found inside the system configuration file,
# the user configuration is removed
if root.parse_known_args()[0].ignoreconfig:
Expand Down Expand Up @@ -183,7 +187,7 @@ def parse_known_args(self, args=None, values=None, strict=True):
return self.check_values(self.values, self.largs)

def error(self, msg):
msg = f'{self.get_prog_name()}: error: {msg.strip()}\n'
msg = f'{self.get_prog_name()}: error: {str(msg).strip()}\n'
raise optparse.OptParseError(f'{self.get_usage()}\n{msg}' if self.usage else msg)

def _get_args(self, args):
Expand Down

0 comments on commit 0612117

Please sign in to comment.