Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Loading…

Blip.tv #386

Merged
merged 6 commits into from

4 participants

@FiloSottile
Collaborator

This includes the pull requests #356, #357 and #358 by @jcarlosgarciasegovia plus a fix to make the User-Agent change specific to only the BlipTV IE and a BlipTVUserIE fix/refresh. Fixes #325.

@phihag phihag merged commit a171dbf into from
@rbrito

@phihag, @FiloSottile

For activist reasons, we should not pretend to always be iTunes.

For technical reasons, we should allow information extractors to override the default user-agent from the std_headers dictionary, as it is not impossible to have one site (like blip.tv) serve the content for user-agent A, with another site serving content for user-agent B, with A != B, of course. :)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
This page is out of date. Refresh to see the latest.
Showing with 95 additions and 1 deletion.
  1. +94 −1 youtube_dl/InfoExtractors.py
  2. +1 −0  youtube_dl/__init__.py
View
95 youtube_dl/InfoExtractors.py
@@ -1614,6 +1614,98 @@ def _real_extract(self, url):
self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+class BlipTVUserIE(InfoExtractor):
+ """Information Extractor for blip.tv users."""
+
+ _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+ _PAGE_SIZE = 12
+ IE_NAME = u'blip.tv:user'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_page(self, username, pagenum):
+ """Report attempt to download user page."""
+ self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
+ (self.IE_NAME, username, pagenum))
+
+ def _real_extract(self, url):
+ # Extract username
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ return
+
+ username = mobj.group(1)
+
+ page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
+
+ request = urllib2.Request(url)
+
+ try:
+ page = urllib2.urlopen(request).read().decode('utf-8')
+ mobj = re.search(r'data-users-id="([^"]+)"', page)
+ page_base = page_base % mobj.group(1)
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
+
+
+ # Download video ids using BlipTV Ajax calls. Result size per
+ # query is limited (currently to 12 videos) so we need to query
+ # page by page until there are no video ids - it means we got
+ # all of them.
+
+ video_ids = []
+ pagenum = 1
+
+ while True:
+ self.report_download_page(username, pagenum)
+
+ request = urllib2.Request( page_base + "&page=" + str(pagenum) )
+
+ try:
+ page = urllib2.urlopen(request).read().decode('utf-8')
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
+
+ # Extract video identifiers
+ ids_in_page = []
+
+ for mobj in re.finditer(r'href="/([^"]+)"', page):
+ if mobj.group(1) not in ids_in_page:
+ ids_in_page.append(unescapeHTML(mobj.group(1)))
+
+ video_ids.extend(ids_in_page)
+
+ # A little optimization - if current page is not
+ # "full", ie. does not contain PAGE_SIZE video ids then
+ # we can assume that this page is the last one - there
+ # are no more ids on further pages - no need to query
+ # again.
+
+ if len(ids_in_page) < self._PAGE_SIZE:
+ break
+
+ pagenum += 1
+
+ all_ids_count = len(video_ids)
+ playliststart = self._downloader.params.get('playliststart', 1) - 1
+ playlistend = self._downloader.params.get('playlistend', -1)
+
+ if playlistend == -1:
+ video_ids = video_ids[playliststart:]
+ else:
+ video_ids = video_ids[playliststart:playlistend]
+
+ self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
+ (self.IE_NAME, username, all_ids_count, len(video_ids)))
+
+ for video_id in video_ids:
+ self._downloader.download([u'http://blip.tv/'+video_id])
+
+
class DepositFilesIE(InfoExtractor):
"""Information extractor for depositfiles.com"""
@@ -1912,7 +2004,7 @@ def _real_extract(self, url):
else:
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
- request = urllib2.Request(json_url)
+ request = urllib2.Request(json_url.encode('utf-8'))
self.report_extraction(mobj.group(1))
info = None
try:
@@ -1970,6 +2062,7 @@ def _real_extract(self, url):
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return
+ std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info]
View
1  youtube_dl/__init__.py
@@ -338,6 +338,7 @@ def gen_extractors():
YahooSearchIE(),
DepositFilesIE(),
FacebookIE(),
+ BlipTVUserIE(),
BlipTVIE(),
VimeoIE(),
MyVideoIE(),
Something went wrong with that request. Please try again.