Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: 735747ab20
Fetching contributors…

Octocat-spinner-32-eaf2f5

Cannot retrieve contributors at this time

file 67 lines (54 sloc) 2.398 kb
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
#!/usr/bin/env python

__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
www.latribuna.hn
'''

from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag

class LaTribuna(BasicNewsRecipe):
    title = 'La Tribuna - Honduras'
    __author__ = 'Darko Miletic'
    description = 'Noticias de Honduras y mundo'
    publisher = 'La Tribuna'
    category = 'news, politics, Honduras'
    oldest_article = 2
    max_articles_per_feed = 100
    use_embedded_content = False
    no_stylesheets = True
    remove_javascript = True
    encoding = 'utf-8'
    language = 'es_HN'

    lang = 'es_HN'
    direction = 'ltr'

    html2lrf_options = [
                          '--comment', description
                        , '--category', category
                        , '--publisher', publisher
                        ]

    html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\npretty_print=True\noverride_css=" p {text-indent: 0cm; margin-top: 0em; margin-bottom: 0.5em} "'

    remove_tags = [dict(name=['form','object','embed'])]

    keep_only_tags = [
                        dict(name='p', attrs={'id':['BlogTitle','BlogDate']})
                       ,dict(name='div', attrs={'id':'BlogContent'})
                     ]

    feeds = [(u'Noticias', u'http://www.latribuna.hn/web2.0/?feed=rss')]

    def print_version(self, url):
        return url + '&print=1'

    def preprocess_html(self, soup):
        soup.html['lang'] = self.lang
        soup.html['dir' ] = self.direction
        mlang = Tag(soup,'meta',[("http-equiv","Content-Language"),("content",self.lang)])
        mcharset = Tag(soup,'meta',[("http-equiv","Content-Type"),("content","text/html; charset=utf-8")])
        soup.head.insert(0,mlang)
        soup.head.insert(1,mcharset)
        for item in soup.findAll(style=True):
            del item['style']
        return soup

    def get_cover_url(self):
        cover_url = None
        soup = self.index_to_soup('http://www.latribuna.hn/web2.0/')
        cover_item = soup.find('div',attrs={'class':'portada_impresa'})
        if cover_item:
           cover_url = cover_item.a.img['src']
        return cover_url
Something went wrong with that request. Please try again.