diff --git a/data/greg.conf b/data/greg.conf index 09bb4f4..15fe945 100644 --- a/data/greg.conf +++ b/data/greg.conf @@ -153,9 +153,12 @@ tag_genre = Podcast # firstsync = all # # asks Greg to download every available podcast. - +# +# firstsync = 1 - +# +############################################################################### +# # The following option expects a list of words (separated by commas) which would # be part of the mime-type of the desired enclosures. That is, if the feed is a # video podcast you would have here @@ -170,7 +173,9 @@ firstsync = 1 # The default is to download only audio files. Thus, # mime = audio - +# +############################################################################### +# # The following option provides a custom download handler. It expects a shell # command, in which you can use the placeholders enumerated above. # @@ -183,6 +188,16 @@ mime = audio # downloadhandler = greg # +############################################################################### +# +# Some feeds are abnormal in that they don't use enclosures. The following +# option, when set to "yes", instructs greg to ignore enclosures and simply +# return the entry link as {link}. +# +ignoreenclosures = no +# +############################################################################### +# # Finally, you don't need to download every issue of a podcast. You can specify # a condition such that greg will download an issue if and only if the issue # meets it. For example, diff --git a/greg/greg.py b/greg/greg.py index b651f2b..0e33ec5 100755 --- a/greg/greg.py +++ b/greg/greg.py @@ -266,12 +266,65 @@ def fix_linkdate(self, entry): entry.linkdate = list(time.localtime()) - def retrieve_mime(self): # Checks the mime-type to download + def retrieve_mime(self): + """ + Check the mime-type to download + """ mime = self.retrieve_config('mime', 'audio') mimedict = {"number": mime} # the input that parse_for_download expects return parse_for_download(mimedict) + def download_entry(self, entry): + """ + Find entry link and download entry + """ + downloadlinks = {} + ignoreenclosures = self.retrieve_config('ignoreenclosures', 'no') + if ignoreenclosures == 'no': + for enclosure in entry.enclosures: + # We will download all enclosures of the desired mime-type + if any([mimetype in enclosure["type"] for mimetype in self.mime]): + downloadlinks[urlparse( + enclosure["href"]).path.split("/")[-1]] = enclosure["href"] + # preserve original name + else: + downloadlinks[urlparse(entry.link).query.split("/")[-1]] = entry.link + for podname in downloadlinks: + if podname not in self.entrylinks: + try: + title = entry.title + except: + title = podname + try: + sanitizedsummary = htmltotext(entry.summary) + if sanitizedsummary == "": + sanitizedsummary = "No summary available" + except: + sanitizedsummary = "No summary available" + try: + placeholders = Placeholders( + self, downloadlinks[podname], podname, title, + sanitizedsummary) + placeholders = check_directory(placeholders) + condition = filtercond(placeholders) + if condition: + print("Downloading {} -- {}".format(title, podname)) + download_handler(self, placeholders) + if self.willtag: + tag(placeholders) + if self.info: + with open(self.info, 'a') as current: + # We write to file this often to ensure that + # downloaded entries count as downloaded. + current.write(''.join([podname, ' ', + str(self.linkdate), '\n'])) + else: + print("Skipping {} -- {}".format(title, podname)) + except URLError: + sys.exit(("... something went wrong." + "Are you connected to the internet?")) + class Placeholders(): def __init__(self, feed, link, filename, title, summary): @@ -713,8 +766,13 @@ def sync(args): entries_to_download = [] for entry in feed.podcast.entries: feed.fix_linkdate(entry) +<<<<<<< HEAD if entry.linkdate > currentdate and entrycounter < stop: entries_to_download.append(entry) +======= + if feed.linkdate > currentdate and entrycounter < stop: + feed.download_entry(entry) +>>>>>>> 350a16d62dd9d7394515e85d5730a9a64d4f51c7 entrycounter += 1 # Sort entries_to_download entries_to_download.sort(key=operator.attrgetter("linkdate"), @@ -788,4 +846,4 @@ def download(args): feed.info = [] feed.entrylinks = [] feed.fix_linkdate(entry) - download_entry(feed, entry) + feed.download_entry(entry) diff --git a/pkgbuilds/stable/PKGBUILD b/pkgbuilds/stable/PKGBUILD index ef0b4d8..25034e7 100644 --- a/pkgbuilds/stable/PKGBUILD +++ b/pkgbuilds/stable/PKGBUILD @@ -2,7 +2,7 @@ pkgname=greg pkgver=0.4.0 -pkgrel=1 +pkgrel=2 pkgdesc="A command-line podcast aggregator." arch=(any) url="https://github.com/manolomartinez/greg" @@ -11,6 +11,7 @@ depends=('python-feedparser') optdepends=('python3-stagger-svn: writing metadata' 'wget: alternative downloadhandler' 'aria2: alternative downloadhandler') +makedepends=('python-setuptools') source=("https://github.com/manolomartinez/greg/archive/${pkgver}.tar.gz") md5sums=('084ca6ba5448b5e8e7b6f4e41e3ce955') package() {