Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

BlipTVUserIE fix

  • Loading branch information...
commit 11a141dec91a28a883203bf2c97750438b932efa 1 parent 8182827
Filippo Valsorda FiloSottile authored

Showing 1 changed file with 8 additions and 8 deletions. Show diff stats Hide diff stats

  1. +8 8 youtube_dl/InfoExtractors.py
16 youtube_dl/InfoExtractors.py
@@ -1618,7 +1618,7 @@ class BlipTVUserIE(InfoExtractor):
1618 1618 """Information Extractor for blip.tv users."""
1619 1619
1620 1620 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
1621   - _PAGE_SIZE = 10
  1621 + _PAGE_SIZE = 12
1622 1622 IE_NAME = u'blip.tv:user'
1623 1623
1624 1624 def __init__(self, downloader=None):
@@ -1638,31 +1638,31 @@ def _real_extract(self, url):
1638 1638
1639 1639 username = mobj.group(1)
1640 1640
1641   - page_base = None
  1641 + page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
1642 1642
1643 1643 request = urllib2.Request(url)
1644 1644
1645 1645 try:
1646 1646 page = urllib2.urlopen(request).read().decode('utf-8')
1647   - mobj = re.search(r'data-source-url="([^"]+)"', page)
1648   - page_base = "http://blip.tv" + unescapeHTML(mobj.group(1))
  1647 + mobj = re.search(r'data-users-id="([^"]+)"', page)
  1648 + page_base = page_base % mobj.group(1)
1649 1649 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1650 1650 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
1651 1651 return
1652 1652
1653 1653
1654   - # Download video ids using BlipTV Page API. Result size per
1655   - # query is limited (currently to 10 videos) so we need to query
  1654 + # Download video ids using BlipTV Ajax calls. Result size per
  1655 + # query is limited (currently to 12 videos) so we need to query
1656 1656 # page by page until there are no video ids - it means we got
1657 1657 # all of them.
1658 1658
1659 1659 video_ids = []
1660   - pagenum = 0
  1660 + pagenum = 1
1661 1661
1662 1662 while True:
1663 1663 self.report_download_page(username, pagenum)
1664 1664
1665   - request = urllib2.Request( page_base + "&page=" + str(pagenum+1) )
  1665 + request = urllib2.Request( page_base + "&page=" + str(pagenum) )
1666 1666
1667 1667 try:
1668 1668 page = urllib2.urlopen(request).read().decode('utf-8')

0 comments on commit 11a141d

Please sign in to comment.
Something went wrong with that request. Please try again.