Skip to content

Commit

Permalink
[utils,cleanup] traverse_obj: Allow [] and minor cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
pukkandan committed Feb 9, 2023
1 parent f14c233 commit f39766d
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 39 deletions.
2 changes: 1 addition & 1 deletion yt_dlp/downloader/fragment.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def download_and_append_fragments_multiple(self, *args, **kwargs):
max_workers = self.params.get('concurrent_fragment_downloads', 1)
if max_progress > 1:
self._prepare_multiline_status(max_progress)
is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[]))
is_live = any(traverse_obj(args, (..., 2, 'is_live')))

def thread_func(idx, ctx, fragments, info_dict, tpe):
ctx['max_progress'] = max_progress
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/abematv.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ def _real_extract(self, url):
f'https://api.abema.io/v1/video/programs/{video_id}', video_id,
note='Checking playability',
headers=headers)
ondemand_types = traverse_obj(api_response, ('terms', ..., 'onDemandType'), default=[])
ondemand_types = traverse_obj(api_response, ('terms', ..., 'onDemandType'))
if 3 not in ondemand_types:
# cannot acquire decryption key for these streams
self.report_warning('This is a premium-only stream')
Expand Down Expand Up @@ -489,7 +489,7 @@ def _fetch_page(self, playlist_id, series_version, page):
})
yield from (
self.url_result(f'https://abema.tv/video/episode/{x}')
for x in traverse_obj(programs, ('programs', ..., 'id'), default=[]))
for x in traverse_obj(programs, ('programs', ..., 'id')))

def _entries(self, playlist_id, series_version):
return OnDemandPagedList(
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/gamejolt.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def _get_comments(self, post_num_id, post_hash_id):
post_hash_id, note='Downloading comments list page %d' % page)
if not comments_data.get('comments'):
break
for comment in traverse_obj(comments_data, (('comments', 'childComments'), ...), expected_type=dict, default=[]):
for comment in traverse_obj(comments_data, (('comments', 'childComments'), ...), expected_type=dict):
yield {
'id': comment['id'],
'text': self._parse_content_as_text(
Expand Down
8 changes: 4 additions & 4 deletions yt_dlp/extractor/iqiyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ def _real_extract(self, url):
'langCode': self._get_cookie('lang', 'en_us'),
'deviceId': self._get_cookie('QC005', '')
}, fatal=False)
ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none, default=[])
ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none)
else:
ut_list = ['0']

Expand Down Expand Up @@ -617,7 +617,7 @@ def _real_extract(self, url):
self.report_warning('This preview video is limited%s' % format_field(preview_time, None, ' to %s seconds'))

# TODO: Extract audio-only formats
for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none, default=[])):
for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none)):
dash_path = dash_paths.get(bid)
if not dash_path:
self.report_warning(f'Unknown format id: {bid}. It is currently not being extracted')
Expand All @@ -628,7 +628,7 @@ def _real_extract(self, url):
fatal=False), 'data', expected_type=dict)

video_format = traverse_obj(format_data, ('program', 'video', lambda _, v: str(v['bid']) == bid),
expected_type=dict, default=[], get_all=False) or {}
expected_type=dict, get_all=False) or {}
extracted_formats = []
if video_format.get('m3u8Url'):
extracted_formats.extend(self._extract_m3u8_formats(
Expand Down Expand Up @@ -669,7 +669,7 @@ def _real_extract(self, url):
})
formats.extend(extracted_formats)

for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict, default=[]):
for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict):
lang = self._LID_TAGS.get(str_or_none(sub_format.get('lid')), sub_format.get('_name'))
subtitles.setdefault(lang, []).extend([{
'ext': format_ext,
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/panopto.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ def _real_extract(self, url):
return {
'id': video_id,
'title': delivery.get('SessionName'),
'cast': traverse_obj(delivery, ('Contributors', ..., 'DisplayName'), default=[], expected_type=lambda x: x or None),
'cast': traverse_obj(delivery, ('Contributors', ..., 'DisplayName'), expected_type=lambda x: x or None),
'timestamp': session_start_time - 11640000000 if session_start_time else None,
'duration': delivery.get('Duration'),
'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random()}',
Expand Down Expand Up @@ -563,7 +563,7 @@ def _extract_folder_metadata(self, base_url, folder_id):
base_url, '/Services/Data.svc/GetFolderInfo', folder_id,
data={'folderID': folder_id}, fatal=False)
return {
'title': get_first(response, 'Name', default=[])
'title': get_first(response, 'Name')
}

def _real_extract(self, url):
Expand Down
2 changes: 1 addition & 1 deletion yt_dlp/extractor/patreon.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def _get_comments(self, post_id):
f'posts/{post_id}/comments', post_id, query=params, note='Downloading comments page %d' % page)

cursor = None
for comment in traverse_obj(response, (('data', ('included', lambda _, v: v['type'] == 'comment')), ...), default=[]):
for comment in traverse_obj(response, (('data', ('included', lambda _, v: v['type'] == 'comment')), ...)):
count += 1
comment_id = comment.get('id')
attributes = comment.get('attributes') or {}
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/tiktok.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def extract_addr(addr, add_meta={}):
user_url = self._UPLOADER_URL_FORMAT % (traverse_obj(author_info,
'sec_uid', 'id', 'uid', 'unique_id',
expected_type=str_or_none, get_all=False))
labels = traverse_obj(aweme_detail, ('hybrid_label', ..., 'text'), expected_type=str, default=[])
labels = traverse_obj(aweme_detail, ('hybrid_label', ..., 'text'), expected_type=str)

contained_music_track = traverse_obj(
music_info, ('matched_song', 'title'), ('matched_pgc_sound', 'title'), expected_type=str)
Expand Down Expand Up @@ -355,7 +355,7 @@ def _parse_aweme_video_web(self, aweme_detail, webpage_url):
'ext': 'mp4',
'width': width,
'height': height,
} for url in traverse_obj(play_url, (..., 'src'), expected_type=url_or_none, default=[]) if url]
} for url in traverse_obj(play_url, (..., 'src'), expected_type=url_or_none) if url]

download_url = url_or_none(video_info.get('downloadAddr')) or traverse_obj(video_info, ('download', 'url'), expected_type=url_or_none)
if download_url:
Expand Down
35 changes: 16 additions & 19 deletions yt_dlp/extractor/youtube.py
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,7 @@ def _extract_badges(self, renderer: dict):
}

badges = []
for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer'), default=[]):
for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer')):
badge_type = (
privacy_icon_map.get(traverse_obj(badge, ('icon', 'iconType'), expected_type=str))
or badge_style_map.get(traverse_obj(badge, 'style'))
Expand Down Expand Up @@ -785,7 +785,7 @@ def _get_text(data, *path_list, max_runs=None):
runs = item

runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str))
if text:
return text

Expand All @@ -805,7 +805,7 @@ def _extract_thumbnails(data, *path_list):
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
Expand Down Expand Up @@ -2668,11 +2668,10 @@ def refetch_manifest(format_id, delay):
return

_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
video_details = traverse_obj(prs, (..., 'videoDetails'), expected_type=dict)
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
expected_type=dict)
_, live_status, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url)
is_live = live_status == 'is_live'
start_time = time.time()
Expand Down Expand Up @@ -3173,7 +3172,7 @@ def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
expected_type=list)
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')

Expand Down Expand Up @@ -3450,7 +3449,7 @@ def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True

reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')))
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
Expand Down Expand Up @@ -3606,7 +3605,7 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...))

for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
Expand Down Expand Up @@ -3872,7 +3871,7 @@ def _list_formats(self, video_id, microformats, video_details, player_responses,
else 'was_live' if live_content
else 'not_live' if False in (is_live, live_content)
else None)
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
streaming_data = traverse_obj(player_responses, (..., 'streamingData'))
*formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, live_status, duration)

return live_broadcast_details, live_status, streaming_data, formats, subtitles
Expand All @@ -3887,7 +3886,7 @@ def _real_extract(self, url):
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)

playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
player_responses, (..., 'playabilityStatus'), expected_type=dict)

trailer_video_id = get_first(
playability_statuses,
Expand All @@ -3900,11 +3899,10 @@ def _real_extract(self, url):
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))

video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
video_details = traverse_obj(player_responses, (..., 'videoDetails'), expected_type=dict)
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
expected_type=dict)

translated_title = self._get_text(microformats, (..., 'title'))
video_title = (self._preferred_lang and translated_title
Expand Down Expand Up @@ -4110,10 +4108,10 @@ def get_lang_code(track):
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
for sub in traverse_obj(pctr, (..., 'captionTracks', ...))}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...))}

def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
Expand Down Expand Up @@ -4267,9 +4265,8 @@ def process_language(container, base_url, lang_code, sub_name, query):
list) or []):
tbrs = variadic(
traverse_obj(
tlb, 'toggleButtonRenderer',
('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'),
default=[]))
tlb, ('toggleButtonRenderer', ...),
('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer')))
for tbr in tbrs:
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
Expand Down
13 changes: 6 additions & 7 deletions yt_dlp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5420,7 +5420,7 @@ def traverse_obj(
Each of the provided `paths` is tested and the first producing a valid result will be returned.
The next path will also be tested if the path branched but no results could be found.
Supported values for traversal are `Mapping`, `Sequence` and `re.Match`.
Unhelpful values (`[]`, `{}`, `None`) are treated as the absence of a value and discarded.
Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
Expand Down Expand Up @@ -5558,14 +5558,13 @@ def apply_key(key, obj, is_last):
result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None)

elif isinstance(key, (int, slice)):
if not is_sequence(obj):
if traverse_string:
with contextlib.suppress(IndexError):
result = str(obj)[key]
else:
if is_sequence(obj):
branching = isinstance(key, slice)
with contextlib.suppress(IndexError):
result = obj[key]
elif traverse_string:
with contextlib.suppress(IndexError):
result = str(obj)[key]

return branching, result if branching else (result,)

Expand Down Expand Up @@ -5617,7 +5616,7 @@ def apply_path(start_obj, path, test_type):

def _traverse_obj(obj, path, allow_empty, test_type):
results, has_branched, is_dict = apply_path(obj, path, test_type)
results = LazyList(item for item in results if item not in (None, [], {}))
results = LazyList(item for item in results if item not in (None, {}))
if get_all and has_branched:
if results:
return results.exhaust()
Expand Down

1 comment on commit f39766d

@CharlesMengCA
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[youtube] Extracting URL: https://www.youtube.com/watch?v=n3Dru5y3ROc
[youtube] n3Dru5y3ROc: Downloading webpage
[youtube] n3Dru5y3ROc: Downloading android player API JSON
ERROR: 'NoneType' object is not iterable

Python 3.11.2

Please sign in to comment.