Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: 735747ab20
Fetching contributors…

Octocat-spinner-32-eaf2f5

Cannot retrieve contributors at this time

file 56 lines (46 sloc) 1.868 kb
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
# -*- coding: utf-8 -*-
#!/usr/bin/env python

__license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103'
'''
tabu.ro
'''

from calibre.web.feeds.news import BasicNewsRecipe

class TabuRo(BasicNewsRecipe):
    title = u'Tabu'
    __author__ = u'Silviu Cotoar\u0103'
    description = 'Cel mai curajos site de femei'
    publisher = 'Tabu'
    oldest_article = 5
    language = 'ro'
    max_articles_per_feed = 100
    no_stylesheets = True
    use_embedded_content = False
    category = 'Ziare,Reviste,Femei'
    encoding = 'utf-8'
    cover_url = 'http://www.tabu.ro/img/tabu-logo2.png'

    conversion_options = {
                                 'comments' : description
                                ,'tags' : category
                                ,'language' : language
                                ,'publisher' : publisher
                         }

    keep_only_tags = [
                        dict(name='h2', attrs={'class':'articol_titlu'}),
                        dict(name='div', attrs={'class':'poza_articol_featured'}),
                        dict(name='div', attrs={'class':'articol_text'})
                     ]

    remove_tags = [
            dict(name='div', attrs={'class':'asemanatoare'}),
dict(name='div', attrs={'class':'social'})
                           ]

    remove_tags_after = [
dict(name='div', attrs={'class':'social'}),
                      dict(name='div', attrs={'id':'comments'}),
                      dict(name='div', attrs={'class':'asemanatoare'})
                        ]

    feeds = [
                        (u'Feeds', u'http://www.tabu.ro/feed/')
                     ]

    def preprocess_html(self, soup):
        return self.adeify_images(soup)
Something went wrong with that request. Please try again.