Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

[1.2.X] Fixed #13946 -- Modified the database cache backend to use th…

…e database router to determine availability of the cache table. Thanks to tiemonster for the report.

Backport of r13473 from trunk.

git-svn-id: http://code.djangoproject.com/svn/django/branches/releases/1.2.X@13474 bcc190cf-cafb-0310-a4f2-bffc1f526a37
  • Loading branch information...
commit 6c2e31e9b7004d5722b5e9b2c60a5fed7c017689 1 parent 9383a37
@freakboy3742 freakboy3742 authored
View
99 django/core/cache/backends/db.py
@@ -1,7 +1,7 @@
"Database cache backend."
from django.core.cache.backends.base import BaseCache
-from django.db import connection, transaction, DatabaseError
+from django.db import connections, router, transaction, DatabaseError
import base64, time
from datetime import datetime
try:
@@ -9,10 +9,31 @@
except ImportError:
import pickle
+class Options(object):
+ """A class that will quack like a Django model _meta class.
+
+ This allows cache operations to be controlled by the router
+ """
+ def __init__(self, table):
+ self.db_table = table
+ self.app_label = 'django_cache'
+ self.module_name = 'cacheentry'
+ self.verbose_name = 'cache entry'
+ self.verbose_name_plural = 'cache entries'
+ self.object_name = 'CacheEntry'
+ self.abstract = False
+ self.managed = True
+ self.proxy = False
+
class CacheClass(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
- self._table = connection.ops.quote_name(table)
+ self._table = table
+
+ class CacheEntry(object):
+ _meta = Options(table)
+ self.cache_model_class = CacheEntry
+
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
@@ -25,17 +46,22 @@ def __init__(self, table, params):
self._cull_frequency = 3
def get(self, key, default=None):
- cursor = connection.cursor()
- cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
+ db = router.db_for_read(self.cache_model_class)
+ table = connections[db].ops.quote_name(self._table)
+ cursor = connections[db].cursor()
+
+ cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = datetime.now()
if row[2] < now:
- cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
- transaction.commit_unless_managed()
+ db = router.db_for_write(self.cache_model_class)
+ cursor = connections[db].cursor()
+ cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
+ transaction.commit_unless_managed(using=db)
return default
- value = connection.ops.process_clob(row[1])
+ value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.decodestring(value))
def set(self, key, value, timeout=None):
@@ -47,56 +73,67 @@ def add(self, key, value, timeout=None):
def _base_set(self, mode, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
- cursor = connection.cursor()
- cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
+ db = router.db_for_write(self.cache_model_class)
+ table = connections[db].ops.quote_name(self._table)
+ cursor = connections[db].cursor()
+
+ cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = datetime.now().replace(microsecond=0)
exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
if num > self._max_entries:
- self._cull(cursor, now)
+ self._cull(db, cursor, now)
encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
- cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
+ cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % table, [key])
try:
result = cursor.fetchone()
if result and (mode == 'set' or
(mode == 'add' and result[1] < now)):
- cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % self._table,
- [encoded, connection.ops.value_to_db_datetime(exp), key])
+ cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % table,
+ [encoded, connections[db].ops.value_to_db_datetime(exp), key])
else:
- cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % self._table,
- [key, encoded, connection.ops.value_to_db_datetime(exp)])
+ cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % table,
+ [key, encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
- transaction.rollback_unless_managed()
+ transaction.rollback_unless_managed(using=db)
return False
else:
- transaction.commit_unless_managed()
+ transaction.commit_unless_managed(using=db)
return True
def delete(self, key):
- cursor = connection.cursor()
- cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
- transaction.commit_unless_managed()
+ db = router.db_for_write(self.cache_model_class)
+ table = connections[db].ops.quote_name(self._table)
+ cursor = connections[db].cursor()
+
+ cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
+ transaction.commit_unless_managed(using=db)
def has_key(self, key):
+ db = router.db_for_read(self.cache_model_class)
+ table = connections[db].ops.quote_name(self._table)
+ cursor = connections[db].cursor()
+
now = datetime.now().replace(microsecond=0)
- cursor = connection.cursor()
- cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % self._table,
- [key, connection.ops.value_to_db_datetime(now)])
+ cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % table,
+ [key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
- def _cull(self, cursor, now):
+ def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
- cursor.execute("DELETE FROM %s WHERE expires < %%s" % self._table,
- [connection.ops.value_to_db_datetime(now)])
- cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
+ cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
+ [connections[db].ops.value_to_db_datetime(now)])
+ cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
- cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % self._table, [num / self._cull_frequency])
- cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % self._table, [cursor.fetchone()[0]])
+ cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % table, [num / self._cull_frequency])
+ cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % table, [cursor.fetchone()[0]])
def clear(self):
- cursor = connection.cursor()
- cursor.execute('DELETE FROM %s' % self._table)
+ db = router.db_for_write(self.cache_model_class)
+ table = connections[db].ops.quote_name(self._table)
+ cursor = connections[db].cursor()
+ cursor.execute('DELETE FROM %s' % table)
View
8 django/db/backends/creation.py
@@ -353,9 +353,11 @@ def create_test_db(self, verbosity=1, autoclobber=False):
call_command('syncdb', verbosity=verbosity, interactive=False, database=self.connection.alias)
if settings.CACHE_BACKEND.startswith('db://'):
- from django.core.cache import parse_backend_uri
- _, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
- call_command('createcachetable', cache_name)
+ from django.core.cache import parse_backend_uri, cache
+ from django.db import router
+ if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
+ _, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
+ call_command('createcachetable', cache_name, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
View
43 docs/topics/cache.txt
@@ -136,6 +136,49 @@ settings file. You can't use a different database backend for your cache table.
Database caching works best if you've got a fast, well-indexed database server.
+Database caching and multiple databases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you use database caching with multiple databases, you'll also need
+to set up routing instructions for your database cache table. For the
+purposes of routing, the database cache table appears as a model named
+``CacheEntry``, in an application named ``django_cache``. This model
+won't appear in the models cache, but the model details can be used
+for routing purposes.
+
+For example, the following router would direct all cache read
+operations to ``cache_slave``, and all write operations to
+``cache_master``. The cache table will only be synchronized onto
+``cache_master``::
+
+ class CacheRouter(object):
+ """A router to control all database cache operations"""
+
+ def db_for_read(self, model, **hints):
+ "All cache read operations go to the slave"
+ if model._meta.app_label in ('django_cache',):
+ return 'cache_slave'
+ return None
+
+ def db_for_write(self, model, **hints):
+ "All cache write operations go to master"
+ if model._meta.app_label in ('django_cache',):
+ return 'cache_master'
+ return None
+
+ def allow_syncdb(self, db, model):
+ "Only synchronize the cache model on master"
+ if model._meta.app_label in ('django_cache',):
+ return db == 'cache_master'
+ return None
+
+If you don't specify routing directions for the database cache model,
+the cache backend will use the ``default`` database.
+
+Of course, if you don't use the database cache backend, you don't need
+to worry about providing routing instructions for the database cache
+model.
+
Filesystem caching
------------------
Please sign in to comment.
Something went wrong with that request. Please try again.