Skip to content

Commit

Permalink
Adding a bit for finding the favicon. Also handling all errors on fet…
Browse files Browse the repository at this point in the history
…ching the HTML of the site for finding the icon.
  • Loading branch information
samuelclay committed Jan 31, 2011
1 parent 70bb450 commit 7424dc9
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 16 deletions.
3 changes: 2 additions & 1 deletion apps/reader/views.py
Expand Up @@ -136,7 +136,7 @@ def load_feeds(request):
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)

user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
user_subs = UserSubscription.objects.select_related('feed', 'feed__feed_icon').filter(user=user)

for sub in user_subs:
feeds[sub.feed.pk] = {
Expand All @@ -152,6 +152,7 @@ def load_feeds(request):
'active': sub.active,
'favicon': sub.feed.icon.data,
'favicon_color': sub.feed.icon.color,
'favicon_finding': bool(not (sub.feed.icon.not_found or sub.feed.icon.data))
}

if not sub.feed.fetched_once:
Expand Down
17 changes: 2 additions & 15 deletions apps/rss_feeds/icon_importer.py
Expand Up @@ -31,7 +31,7 @@ def save(self):
return
image, image_file, icon_url = self.fetch_image_from_page_data()
if not image:
image, image_file, icon_url = self.fetch(force=self.force)
image, image_file, icon_url = self.fetch_image_from_path(force=self.force)

if image:
try:
Expand Down Expand Up @@ -137,7 +137,7 @@ def fetch_image_from_page_data(self):
image, image_file = self.get_image_from_url(url)
return image, image_file, url

def fetch(self, path='favicon.ico', force=False):
def fetch_image_from_path(self, path='favicon.ico', force=False):
image = None
url = None

Expand All @@ -151,19 +151,6 @@ def fetch(self, path='favicon.ico', force=False):
if not image:
url = urlparse.urljoin(self.feed.feed_link, '/favicon.ico')
image, image_file = self.get_image_from_url(url)
if not image:
request = urllib2.Request(self.feed.feed_link, headers=HEADERS)
try:
# 2048 bytes should be enough for most of websites
content = urllib2.urlopen(request).read(2048)
except(urllib2.HTTPError, urllib2.URLError):
return None, None, None
url = self._url_from_html(content)
if url:
try:
image, image_file = self.get_image_from_url(url)
except(urllib2.HTTPError, urllib2.URLError):
return None, None, None
# print 'Found: %s - %s' % (url, image)
return image, image_file, url

Expand Down

0 comments on commit 7424dc9

Please sign in to comment.