Skip to content

Commit

Permalink
Enable persistent caches for ZODB-level tests.
Browse files Browse the repository at this point in the history
  • Loading branch information
jamadden committed Jun 18, 2019
1 parent fcfc77d commit e565c69
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 52 deletions.
17 changes: 14 additions & 3 deletions src/relstorage/_util.py
Expand Up @@ -51,18 +51,29 @@ def f(*args, **kwargs):
return result
return f

def _thread_spawn(func, args):
import threading
t = threading.Thread(target=func, args=args)
t.name = t.name + '-spawn-' + func.__name__
t.start()
return t

def _gevent_pool_spawn(func, args):
import gevent
return gevent.get_hub().threadpool.spawn(func, *args)

def spawn(func, args=()):
"""Execute func in a different (real) thread"""
import threading
submit = lambda func, args: threading.Thread(target=func, args=args).start()

submit = _thread_spawn
try:
import gevent.monkey
import gevent
except ImportError:
pass
else:
if gevent.monkey.is_module_patched('threading'):
submit = lambda func, args: gevent.get_hub().threadpool.spawn(func, *args)
submit = _gevent_pool_spawn
submit(func, args)

def get_this_psutil_process():
Expand Down
4 changes: 2 additions & 2 deletions src/relstorage/cache/local_client.py
Expand Up @@ -128,11 +128,11 @@ def _compress(self, data): # pylint:disable=method-hidden
return data

@_log_timed
def save(self, overwrite=False, close_async=True):
def save(self, **sqlite_args):
options = self.options
if options.cache_local_dir and self.__bucket.size:
conn = sqlite_connect(options, self.prefix,
overwrite=overwrite, close_async=close_async)
**sqlite_args)
with closing(conn):
try:
self.write_to_sqlite(conn)
Expand Down
60 changes: 31 additions & 29 deletions src/relstorage/cache/persistence.py
Expand Up @@ -99,7 +99,7 @@ def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)

self.rs_db_filename = None
self.rs_close_async = True
self.rs_close_async = DEFAULT_CLOSE_ASYNC
self._rs_has_closed = False

def __repr__(self):
Expand All @@ -125,7 +125,7 @@ def close(self):
self._rs_has_closed = True
from relstorage._util import spawn as _spawn
spawn = _spawn if self.rs_close_async else lambda f: f()
def c():
def optimize_and_close():
# Recommended best practice is to OPTIMIZE the database for
# each closed connection. OPTIMIZE needs to run in each connection
# so it can see what tables and indexes were used. It's usually fast,
Expand All @@ -137,7 +137,7 @@ def c():
logger.exception("Failed to optimize database; was it removed?")

super(Connection, self).close()
spawn(c)
spawn(optimize_and_close)


# PRAGMA statements don't allow ? placeholders
Expand Down Expand Up @@ -173,7 +173,31 @@ def _execute_pragmas(cur, **kwargs):
else:
logger.debug("Using %s = %s", k, orig_value)

def _connect_to_file(fname, factory=Connection, close_async=True,
_MB = 1024 * 1024
DEFAULT_MAX_WAL = 10 * _MB
DEFAULT_CLOSE_ASYNC = False
# Benchmarking on at least one system doesn't show an improvement to
# either reading or writing by forcing a large mmap_size.
DEFAULT_MMAP_SIZE = None
# 4096 is the page size in current releases of sqlite; older versions
# used 1024. A larger page makes sense as we have biggish values.
# Going larger doesn't make much difference in benchmarks.
DEFAULT_PAGE_SIZE = 4096
# Control where temporary data is:
#
# FILE = a deleted disk file (that sqlite never flushes so
# theoretically just exists in the operating system's filesystem
# cache)
#
# MEMORY = explicitly in memory only
#
# DEFAULT = compile time default. Benchmarking for large writes
# doesn't show much difference between FILE and MEMORY, so don't
# bother to change from the default.
DEFAULT_TEMP_STORE = None

def _connect_to_file(fname, factory=Connection,
close_async=DEFAULT_CLOSE_ASYNC,
pragmas=None):

connection = sqlite3.connect(
Expand Down Expand Up @@ -204,9 +228,11 @@ def _connect_to_file(fname, factory=Connection, close_async=True,
# the database so that we can verify that it's not corrupt.
pragmas.setdefault('journal_mode', 'wal')
cur = connection.cursor()
__traceback_info__ = cur, pragmas
try:
_execute_pragmas(cur, **pragmas)
except:
logger.exception("Failed to execute pragmas")
cur.close()
if hasattr(connection, 'rs_close_async'):
connection.rs_close_async = False
Expand All @@ -217,30 +243,6 @@ def _connect_to_file(fname, factory=Connection, close_async=True,

return connection

_MB = 1024 * 1024
DEFAULT_MAX_WAL = 10 * _MB
DEFAULT_CLOSE_ASYNC = True
# Benchmarking on at least one system doesn't show an improvement to
# either reading or writing by forcing a large mmap_size.
DEFAULT_MMAP_SIZE = None
# 4096 is the page size in current releases of sqlite; older versions
# used 1024. A larger page makes sense as we have biggish values.
# Going larger doesn't make much difference in benchmarks.
DEFAULT_PAGE_SIZE = 4096
# Control where temporary data is:
#
# FILE = a deleted disk file (that sqlite never flushes so
# theoretically just exists in the operating system's filesystem
# cache)
#
# MEMORY = explicitly in memory only
#
# DEFAULT = compile time default. Benchmarking for large writes
# doesn't show much difference between FILE and MEMORY, so don't
# bother to change from the default.
DEFAULT_TEMP_STORE = None


def sqlite_connect(options, prefix,
overwrite=False,
max_wal_size=DEFAULT_MAX_WAL,
Expand All @@ -255,7 +257,7 @@ def sqlite_connect(options, prefix,
result in the connection being closed, only committed or rolled back.
"""
parent_dir = getattr(options, 'cache_local_dir', options)
# Allow for memory and temporary databases:
# Allow for memory and temporary databases (empty string):
if parent_dir != ':memory:' and parent_dir:
parent_dir = _normalize_path(options)
try:
Expand Down
5 changes: 5 additions & 0 deletions src/relstorage/cache/storage_cache.py
Expand Up @@ -775,6 +775,11 @@ def __call__(self, checkpoints, row_iter):
# Old generation, no delta.
# Even though this is old, it could be good to have it,
# it might be something that doesn't change much.
#
# Using `cp0` is our fallback preferred key, so
# this doesn't have to get copied from cp1 later.
#
# XXX: This is probably wrong. See https://github.com/zodb/relstorage/issues/249
key = (oid, cp0)
yield key, value

Expand Down
51 changes: 33 additions & 18 deletions src/relstorage/tests/reltestbase.py
Expand Up @@ -28,6 +28,7 @@
from persistent.mapping import PersistentMapping
from zc.zlibstorage import ZlibStorage

import ZODB.tests.util
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ReadConflictError
Expand Down Expand Up @@ -98,7 +99,14 @@ def make_storage(self, zap=True, **kw):
if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
kw['cache_servers'] = util.CACHE_SERVERS
kw['cache_module_name'] = util.CACHE_MODULE_NAME
kw['cache_prefix'] = type(self).__name__ + self._testMethodName
if 'cache_prefix' not in kw:
kw['cache_prefix'] = type(self).__name__ + self._testMethodName
if 'cache_local_dir' not in kw:
# Always use a persistent cache. This helps discover errors in
# the persistent cache.
# These tests run in a temporary directory that gets cleaned up, so the CWD is
# appropriate.
kw['cache_local_dir'] = '.'

assert self.driver_name
options = Options(keep_history=self.keep_history, driver=self.driver_name, **kw)
Expand All @@ -125,13 +133,10 @@ class RelStorageTestBase(StorageCreatingMixin,
__to_close = ()

def setUp(self):
# This sets up a temporary directory for each test and
# changes to it.
super(RelStorageTestBase, self).setUp()
self.__to_close = []
# Note that we're deliberately NOT calling super's setup.
# It does stuff on disk, etc, that's not necessary for us
# and just slows us down by ~10%.
#super(RelStorageTestBase, self).setUp()
# Also note that subclasses might not even call us! If they're going to
# use _closing, they have to.

def _closing(self, o):
"""
Expand All @@ -143,19 +148,26 @@ def _closing(self, o):
self.__to_close.append(o)
return o

def _close(self):
# Override from StorageTestBase.

# Try to avoid creating one through our _storage property.
if '_storage' in self.__dict__:
storage = self._storage
else:
storage = self._storage_created
self._storage = None

if storage is not None:
storage.close()
storage.cleanup()

def tearDown(self):
transaction.abort()
for x in reversed(self.__to_close):
x.close()
self.__to_close = ()
# XXX: This could create one! Do subclasses override self._storage?
storage = self._storage
if storage is not None:
self._storage = None
storage.close()
storage.cleanup()
# See comments in setUp.
#super(RelStorageTestBase, self).tearDown()
super(RelStorageTestBase, self).tearDown()

def get_storage(self):
# Create a storage with default options
Expand Down Expand Up @@ -977,7 +989,10 @@ def checkGeventSwitchesOnOpen(self):


class AbstractRSZodbConvertTests(StorageCreatingMixin,
FSZODBConvertTests):
FSZODBConvertTests,
# This one isn't cooperative in
# setUp(), so it needs to be last.
ZODB.tests.util.TestCase):
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
Expand Down Expand Up @@ -1124,8 +1139,8 @@ def tearDown(self):
self._dst.close()
self._dst.cleanup()
self._dst = None
super(AbstractToFileStorage, self).tearDown()
self.clearTempDir()
super(AbstractToFileStorage, self).tearDown()

def new_dest(self):
return self._closing(FileStorage(self._dst_path))
Expand All @@ -1145,8 +1160,8 @@ def tearDown(self):
self._dst.close()
self._dst.cleanup()
self._dst = None
super(AbstractFromFileStorage, self).tearDown()
self.clearTempDir()
super(AbstractFromFileStorage, self).tearDown()

def new_dest(self):
return self._dst
2 changes: 2 additions & 0 deletions src/relstorage/tests/test_zodbconvert.py
Expand Up @@ -44,6 +44,7 @@ class AbstractZODBConvertBase(unittest.TestCase):
zap_supported_by_dest = False

def setUp(self):
super(AbstractZODBConvertBase, self).setUp()
self._to_close = []

def tearDown(self):
Expand All @@ -66,6 +67,7 @@ def tearDown(self):
# 2.5.0 and 5.3.
gc.collect()
gc.collect()
super(AbstractZODBConvertBase, self).tearDown()

def _closing(self, thing):
self._to_close.append(thing)
Expand Down

0 comments on commit e565c69

Please sign in to comment.