Skip to content

Commit

Permalink
[ie] Avoid potentially confusing imports
Browse files Browse the repository at this point in the history
  • Loading branch information
pukkandan committed Jan 11, 2024
1 parent 9f813e7 commit 879a85a
Show file tree
Hide file tree
Showing 9 changed files with 24 additions and 30 deletions.
4 changes: 2 additions & 2 deletions yt_dlp/extractor/dtube.py
@@ -1,5 +1,5 @@
import json
from socket import timeout
import socket

from .common import InfoExtractor
from ..utils import (
Expand Down Expand Up @@ -55,7 +55,7 @@ def canonical_url(h):
try:
self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))
self._downloader._opener.open(video_url, timeout=5).close()
except timeout:
except socket.timeout:
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, format_id))
continue
Expand Down
1 change: 0 additions & 1 deletion yt_dlp/extractor/masters.py
@@ -1,4 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
traverse_obj,
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/naver.py
@@ -1,6 +1,6 @@
import itertools
import re
from urllib.parse import urlparse, parse_qs
import urllib.parse

from .common import InfoExtractor
from ..utils import (
Expand Down Expand Up @@ -381,7 +381,7 @@ def _extract_highlight(self, show_id, highlight_id):

def _real_extract(self, url):
show_id = self._match_id(url)
qs = parse_qs(urlparse(url).query)
qs = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)

if not self._yes_playlist(show_id, qs.get('shareHightlight')):
return self._extract_highlight(show_id, qs['shareHightlight'][0])
Expand Down
5 changes: 2 additions & 3 deletions yt_dlp/extractor/niconico.py
Expand Up @@ -4,8 +4,7 @@
import json
import re
import time

from urllib.parse import urlparse
import urllib.parse

from .common import InfoExtractor, SearchInfoExtractor
from ..networking import Request
Expand Down Expand Up @@ -946,7 +945,7 @@ def _real_extract(self, url):
'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
})

hostname = remove_start(urlparse(urlh.url).hostname, 'sp.')
hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
if latency not in self._KNOWN_LATENCY:
latency = 'high'
Expand Down
10 changes: 5 additions & 5 deletions yt_dlp/extractor/panopto.py
@@ -1,8 +1,8 @@
import calendar
import json
import datetime
import functools
from datetime import datetime, timezone
from random import random
import json
import random

from .common import InfoExtractor
from ..compat import (
Expand Down Expand Up @@ -243,7 +243,7 @@ def _mark_watched(self, base_url, video_id, delivery_info):
invocation_id = delivery_info.get('InvocationId')
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
if invocation_id and stream_id and duration:
timestamp_str = f'/Date({calendar.timegm(datetime.now(timezone.utc).timetuple())}000)/'
timestamp_str = f'/Date({calendar.timegm(datetime.datetime.now(datetime.timezone.utc).timetuple())}000)/'
data = {
'streamRequests': [
{
Expand Down Expand Up @@ -415,7 +415,7 @@ def _real_extract(self, url):
'cast': traverse_obj(delivery, ('Contributors', ..., 'DisplayName'), expected_type=lambda x: x or None),
'timestamp': session_start_time - 11640000000 if session_start_time else None,
'duration': delivery.get('Duration'),
'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random()}',
'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random.random()}',
'average_rating': delivery.get('AverageRating'),
'chapters': self._extract_chapters(timestamps),
'uploader': delivery.get('OwnerDisplayName') or None,
Expand Down
8 changes: 4 additions & 4 deletions yt_dlp/extractor/pr0gramm.py
@@ -1,6 +1,6 @@
import datetime
import json
from datetime import date
from urllib.parse import unquote
import urllib.parse

from .common import InfoExtractor
from ..compat import functools
Expand Down Expand Up @@ -99,7 +99,7 @@ def _maximum_flags(self):
cookies = self._get_cookies(self.BASE_URL)
if 'me' not in cookies:
self._download_webpage(self.BASE_URL, None, 'Refreshing verification information')
if traverse_obj(cookies, ('me', {lambda x: x.value}, {unquote}, {json.loads}, 'verified')):
if traverse_obj(cookies, ('me', {lambda x: x.value}, {urllib.parse.unquote}, {json.loads}, 'verified')):
flags |= 0b0110

return flags
Expand Down Expand Up @@ -183,7 +183,7 @@ def _real_extract(self, url):
'like_count': ('up', {int}),
'dislike_count': ('down', {int}),
'upload_timestamp': ('created', {int}),
'upload_date': ('created', {int}, {date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
'upload_date': ('created', {int}, {datetime.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)})
}),
}
14 changes: 5 additions & 9 deletions yt_dlp/extractor/radiokapital.py
@@ -1,18 +1,14 @@
from .common import InfoExtractor
from ..utils import (
clean_html,
traverse_obj,
unescapeHTML,
)

import itertools
from urllib.parse import urlencode
import urllib.parse

from .common import InfoExtractor
from ..utils import clean_html, traverse_obj, unescapeHTML


class RadioKapitalBaseIE(InfoExtractor):
def _call_api(self, resource, video_id, note='Downloading JSON metadata', qs={}):
return self._download_json(
f'https://www.radiokapital.pl/wp-json/kapital/v1/{resource}?{urlencode(qs)}',
f'https://www.radiokapital.pl/wp-json/kapital/v1/{resource}?{urllib.parse.urlencode(qs)}',
video_id, note=note)

def _parse_episode(self, data):
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/rokfin.py
@@ -1,8 +1,8 @@
import datetime
import itertools
import json
import re
import urllib.parse
from datetime import datetime

from .common import InfoExtractor, SearchInfoExtractor
from ..utils import (
Expand Down Expand Up @@ -157,7 +157,7 @@ def _real_extract(self, url):
self.raise_login_required('This video is only available to premium users', True, method='cookies')
elif scheduled:
self.raise_no_formats(
f'Stream is offline; scheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
f'Stream is offline; scheduled for {datetime.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
video_id=video_id, expected=True)

uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))
Expand Down
4 changes: 2 additions & 2 deletions yt_dlp/extractor/tenplay.py
@@ -1,7 +1,7 @@
import base64
import datetime
import functools
import itertools
from datetime import datetime

from .common import InfoExtractor
from ..networking import HEADRequest
Expand Down Expand Up @@ -69,7 +69,7 @@ def _get_bearer_token(self, video_id):
username, password = self._get_login_info()
if username is None or password is None:
self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.')
_timestamp = datetime.now().strftime('%Y%m%d000000')
_timestamp = datetime.datetime.now().strftime('%Y%m%d000000')
_auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii')
data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={
'X-Network-Ten-Auth': _auth_header,
Expand Down

0 comments on commit 879a85a

Please sign in to comment.