Skip to content

Commit

Permalink
Initial search prototype.
Browse files Browse the repository at this point in the history
  • Loading branch information
tkem committed Feb 24, 2016
1 parent 1b19c53 commit 22aeab8
Show file tree
Hide file tree
Showing 5 changed files with 189 additions and 86 deletions.
21 changes: 11 additions & 10 deletions mopidy_podcast/backend.py
Expand Up @@ -55,14 +55,14 @@ def __init__(self, dbpath, config, backend):
self.__proxy = self.actor_ref.proxy()

def on_start(self):
# TODO: delete everything but configured feeds if config changed
self.__proxy.refresh()
self.__timer.start()

def on_stop(self):
self.__timer.cancel()

def refresh(self, uris=None):
# TODO: delete everything but configured feeds if config changed
if uris is None:
logger.info('Refreshing %s', Extension.dist_name)
self.__proxy.refresh(self.__feeds)
Expand All @@ -80,15 +80,16 @@ def __timeout(self):
self.__proxy.refresh()
self.__timer.start()

def __update(self, uri):
# do *not* block backend while retrieving/updating podcast
podcast = self.__backend.podcasts.get(uri).get()
def __update(self, feedurl):
# do *not* block backend for retrieving/updating podcast
podcasts = self.__backend.podcasts
podcast = podcasts.get(feedurl).get()
if podcast is None:
logger.debug('Retrieving podcast %s', uri)
# TODO: configurable timeout, move to rssfeed?
with contextlib.closing(self.__opener.open(uri)) as source:
logger.debug('Retrieving podcast %s', feedurl)
# running in the background, no timeout necessary
with contextlib.closing(self.__opener.open(feedurl)) as source:
podcast = rss.parse(source)
podcast = self.__backend.podcasts.setdefault(uri, podcast).get()
podcast = podcasts.setdefault(feedurl, podcast).get()
logger.debug('Updating podcast %s', podcast.uri)
with schema.connect(self.__dbpath) as connection:
schema.update(connection, podcast)
Expand All @@ -107,8 +108,8 @@ class PodcastBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(PodcastBackend, self).__init__()
self.__config = config
self.__dbpath = self.__init_schema(config)
self.library = PodcastLibraryProvider(self.__dbpath, config, backend=self)
self.__dbpath = dbpath = self.__init_schema(config)
self.library = PodcastLibraryProvider(dbpath, config, backend=self)
self.playback = PodcastPlaybackProvider(audio=audio, backend=self)
self.podcasts = PodcastCache(config)

Expand Down
22 changes: 18 additions & 4 deletions mopidy_podcast/library.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals

import collections
import itertools
import logging

from mopidy import backend, models
Expand All @@ -14,7 +15,7 @@

class PodcastLibraryProvider(backend.LibraryProvider):

root_directory = models.Ref(uri='podcast:', name='Podcasts')
root_directory = models.Ref.directory(uri='podcast:', name='Podcasts')

def __init__(self, dbpath, config, backend):
super(PodcastLibraryProvider, self).__init__(backend)
Expand All @@ -26,8 +27,7 @@ def __init__(self, dbpath, config, backend):

def browse(self, uri):
if uri == self.root_directory.uri:
with schema.connect(self.__dbpath) as connection:
refs = schema.list(connection)
refs = self.__list()
else:
refs = self.__browse(uri)
return list(refs)
Expand Down Expand Up @@ -65,7 +65,13 @@ def refresh(self, uri=None):
self.__tracks.clear()

def search(self, query=None, uris=None, exact=False):
return None
# translate query to model
try:
query = translator.query(query, exact)
except NotImplementedError as e:
logger.info('Not searching %s: %s', Extension.dist_name, e)
else:
logger.info('%r: %r', query, list(self.__search(query)))

def __browse(self, uri):
podcast = self.__podcast(uri)
Expand All @@ -80,6 +86,10 @@ def __images(self, uri):
result[uri] = [podcast.image] if podcast.image else None
return result

def __list(self):
with schema.connect(self.__dbpath) as connection:
return itertools.starmap(translator.ref, schema.list(connection))

def __lookup(self, uri):
podcast = self.__podcast(uri)
# return result dict as with LibraryController.lookup(uris)
Expand All @@ -93,3 +103,7 @@ def __podcast(self, uri):
scheme, _, feedurl = uri.partition('+')
assert feedurl and scheme == Extension.ext_name
return self.backend.podcasts[feedurl]

def __search(self, query):
with schema.connect(self.__dbpath) as connection:
return schema.search(connection, query)
28 changes: 2 additions & 26 deletions mopidy_podcast/models.py
Expand Up @@ -182,32 +182,8 @@ def rss(cls, **kwargs):
class Term(ValidatedImmutableObject):
"""Mopidy model type to represent a search term."""

PODCAST_TITLE = 'podcast.title'
"""Constant used for comparison with the :attr:`attribute` field."""

EPISODE_TITLE = 'episode.title'
"""Constant used for comparison with the :attr:`attribute` field."""

PODCAST_AUTHOR = 'podcast.author'
"""Constant used for comparison with the :attr:`attribute` field."""

EPISODE_AUTHOR = 'episode.author'
"""Constant used for comparison with the :attr:`attribute` field."""

CATEGORY = 'category'
"""Constant used for comparison with the :attr:`attribute` field."""

PUBDATE = 'pubdate'
"""Constant used for comparison with the :attr:`attribute` field."""

DESCRIPTION = 'description'
"""Constant used for comparison with the :attr:`attribute` field."""

attribute = fields.Field(type=basestring, choices=[
PODCAST_TITLE, EPISODE_TITLE, PODCAST_AUTHOR, EPISODE_AUTHOR,
CATEGORY, PUBDATE, DESCRIPTION
])
"""The search term's attribute or :class:`None`."""
field = fields.Field(type=fields.Field)
"""The search term's field or :class:`None`."""

values = fields.Collection(type=basestring, container=frozenset)
"""The search terms's set of values."""
Expand Down
153 changes: 117 additions & 36 deletions mopidy_podcast/schema.py
Expand Up @@ -6,19 +6,17 @@
import re
import sqlite3

from mopidy import models

from . import Extension
from . import Extension, models

PARAMETERS = {
None: 'any'
# models.Term.PODCAST_TITLE: 'podcast_title',
# models.Term.EPISODE_TITLE: 'episode_title',
# models.Term.PODCAST_AUTHOR: 'podcast_author',
# models.Term.EPISODE_AUTHOR: 'episode_author',
# models.Term.CATEGORY: 'category',
# models.Term.PUBDATE: 'pubdate',
# models.Term.DESCRIPTION: 'description'
None: 'any',
models.Episode.author: 'episode_author',
models.Episode.description: 'description',
models.Episode.pubdate: 'pubdate',
models.Episode.title: 'episode_title',
models.Podcast.author: 'podcast_author',
models.Podcast.category: 'category',
models.Podcast.title: 'podcast_title'
}

FTPODCAST_COLS = {
Expand Down Expand Up @@ -101,6 +99,101 @@
AND (:description IS NULL OR :description = e.description)
LIMIT :limit OFFSET :offset
"""
INDEXED_QUERY = """
SELECT title AS text,
NULL AS created,
category AS category,
description AS description,
language AS language,
title AS title,
uri AS uri
FROM podcast
WHERE (:any IS NULL OR :any IN (title, author, category, description))
AND (:podcast_title IS NULL OR :podcast_title = title)
AND (:episode_title IS NULL)
AND (:podcast_author IS NULL OR :podcast_author = author)
AND (:episode_author IS NULL)
AND (:category IS NULL OR :category = category)
AND (:pubdate IS NULL)
AND (:description IS NULL OR :description = description)
UNION
SELECT e.title AS text,
NULL AS created,
p.category AS category,
e.description AS description,
p.language AS language,
p.title AS title,
p.uri || '#' || e.guid AS uri
FROM episode AS e
JOIN podcast AS p ON e.podcast = p.uri
WHERE (:any IS NULL OR :any IN (e.title, e.author, e.description))
AND (:podcast_title IS NULL OR :podcast_title = p.title)
AND (:episode_title IS NULL OR :episode_title = e.title)
AND (:podcast_author IS NULL OR :podcast_author = p.author)
AND (:episode_author IS NULL OR :episode_author = e.author)
AND (:category IS NULL OR :category = p.category)
AND (:pubdate IS NULL OR e.pubdate LIKE date(:pubdate) || '%')
AND (:description IS NULL OR :description = e.description)
LIMIT :limit OFFSET :offset
"""

INDEXED_ALBUM_QUERY = """
SELECT title AS text,
NULL AS created,
category AS category,
description AS description,
language AS language,
title AS title,
uri AS uri
FROM podcast
WHERE (:any IS NULL OR :any IN (title, author, category, description))
AND (:podcast_title IS NULL OR :podcast_title = title)
AND (:episode_title IS NULL)
AND (:podcast_author IS NULL OR :podcast_author = author)
AND (:episode_author IS NULL)
AND (:category IS NULL OR :category = category)
AND (:pubdate IS NULL)
AND (:description IS NULL OR :description = description)
LIMIT :limit OFFSET :offset
"""

INDEXED_TRACK_QUERY = """
SELECT title AS text,
NULL AS created,
category AS category,
description AS description,
language AS language,
title AS title,
uri AS uri
FROM podcast
WHERE (:any IS NULL OR :any IN (title, author, category, description))
AND (:podcast_title IS NULL OR :podcast_title = title)
AND (:episode_title IS NULL)
AND (:podcast_author IS NULL OR :podcast_author = author)
AND (:episode_author IS NULL)
AND (:category IS NULL OR :category = category)
AND (:pubdate IS NULL)
AND (:description IS NULL OR :description = description)
UNION
SELECT e.title AS text,
NULL AS created,
p.category AS category,
e.description AS description,
p.language AS language,
p.title AS title,
p.uri || '#' || e.guid AS uri
FROM episode AS e
JOIN podcast AS p ON e.podcast = p.uri
WHERE (:any IS NULL OR :any IN (e.title, e.author, e.description))
AND (:podcast_title IS NULL OR :podcast_title = p.title)
AND (:episode_title IS NULL OR :episode_title = e.title)
AND (:podcast_author IS NULL OR :podcast_author = p.author)
AND (:episode_author IS NULL OR :episode_author = e.author)
AND (:category IS NULL OR :category = p.category)
AND (:pubdate IS NULL OR e.pubdate LIKE date(:pubdate) || '%')
AND (:description IS NULL OR :description = e.description)
LIMIT :limit OFFSET :offset
"""


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -143,20 +236,18 @@ def init(cursor, scripts=os.path.join(os.path.dirname(__file__), 'sql')):


def list(cursor):
rows = cursor.execute("""
SELECT 'podcast+' || uri AS uri, title AS name
return cursor.execute("""
SELECT uri AS uri, title AS title
FROM podcast
""")
return (models.Ref.album(**row) for row in rows)


def search(cursor, query, offset=0, limit=-1):
if query.exact:
rows = _indexed_search(cursor, query.terms, offset, limit)
rows = _indexed_search(cursor, query, offset, limit)
else:
rows = _fulltext_search(cursor, query.terms, offset, limit)
rows = None # FIXME
# return (models.Outline.rss(**row) for row in rows)
rows = _fulltext_search(cursor, query, offset, limit)
return rows


def update(cursor, podcast):
Expand Down Expand Up @@ -199,28 +290,19 @@ def _insert_or_replace(cursor, table, params):
return cursor.execute(sql, params.values())


def _indexed_search(cursor, terms, offset=0, limit=-1):
def _indexed_search(cursor, query, offset=0, limit=-1):
params = dict.fromkeys(PARAMETERS.values(), None)
for term in terms:
try:
key = PARAMETERS[term.attribute]
except KeyError:
raise NotImplementedError(term.attribute)
else:
params[key] = ''.join(term.values)
for term in query.terms:
params[PARAMETERS[term.field]] = ' '.join(term.values)
params.update(offset=offset, limit=limit)
return cursor.execute(INDEXED_QUERY, params)


def _fulltext_search(cursor, terms, offset=0, limit=-1):
def _fulltext_search(cursor, query, offset=0, limit=-1):
params = dict.fromkeys(PARAMETERS.values(), None)
for term in terms:
try:
key = PARAMETERS[term.attribute]
except KeyError:
raise NotImplementedError(term.attribute)
else:
params[key] = ' '.join(map(_quote, term.values))
for term in query.terms:
params[PARAMETERS[term.field]] = ' '.join(map(_quote, term.values))
params.update(offset=offset, limit=limit)
# SQLite MATCH clauses cannot be combined with AND or OR
sql = FULLTEXT_QUERY % (
' INTERSECT '.join(
Expand All @@ -234,8 +316,7 @@ def _fulltext_search(cursor, terms, offset=0, limit=-1):
if params[key] is not None
)
)
params.update(offset=offset, limit=limit)
logger.debug('sql: %r %r', sql, params)
# logger.debug('Fulltext query: %r %r', sql, params)
return cursor.execute(sql, params)


Expand Down

0 comments on commit 22aeab8

Please sign in to comment.