Skip to content

Commit

Permalink
Speed up tests, especially MySQL tests, by several minutes.
Browse files Browse the repository at this point in the history
MySQL tests went from almost four minutes to a minute and a half on my machine.
  • Loading branch information
jamadden committed Sep 30, 2019
1 parent 63d883b commit 9fe11e7
Show file tree
Hide file tree
Showing 9 changed files with 56 additions and 25 deletions.
25 changes: 18 additions & 7 deletions src/relstorage/cache/local_client.py
Expand Up @@ -456,6 +456,10 @@ def remove_invalid_persistent_oids(self, bad_oids):
def zap_all(self):
_, destroy = sqlite_files(self.options, self.prefix)
destroy()
# zapping happens frequently during test runs,
# and during zodbconvert when the process will exist
# only for a short time.
self.flush_all(False)

@staticmethod
def key_weight(_):
Expand All @@ -468,14 +472,21 @@ def value_weight(value):
# weight is the size of the state
return value.weight

def flush_all(self):
def flush_all(self, preallocate_nodes=None):
with self._lock:
self._cache = self._cache_type(
self.limit,
key_weight=self.key_weight,
value_weight=self.value_weight,
empty_value=_SingleValue(b'', 0)
)
if self._cache or self._cache is None:
# Only actually abandon the cache object
# if it has data. Otherwise let it keep the
# preallocated CFFI objects it may have
# (those are expensive to create and tests call
# this a LOT)
self._cache = self._cache_type(
self.limit,
key_weight=self.key_weight,
value_weight=self.value_weight,
empty_value=_SingleValue(b'', 0),
preallocate_nodes=preallocate_nodes,
)
self._peek = self._cache.peek
self._cache_mru = self._cache.__getitem__
self._min_allowed_writeback = OidTMap()
Expand Down
13 changes: 11 additions & 2 deletions src/relstorage/cache/lru_cffiring.py
Expand Up @@ -24,6 +24,7 @@

from zope import interface

from relstorage._compat import IN_TESTRUNNER
from relstorage._compat import OID_OBJECT_MAP_TYPE as OidOMap
from relstorage.cache.interfaces import IGenerationalLRUCache
from relstorage.cache.interfaces import IGeneration
Expand Down Expand Up @@ -67,7 +68,9 @@ class CFFICache(GenerationalCacheBase):
# especially, or large cache sizes) because when zodbshootout clears caches,
# our implementation throws this object all away, and then allocates again.
# Meanwhile, all the old objects have to be GC'd.
_preallocate_entries = True
# So we disable it (unless explicitly requested) in test mode, where it
# was accounting for about 42s out of 142s
_preallocate_entries = not IN_TESTRUNNER
# If so, how many? Try to get enough to fill the cache assuming objects are
# this size on average
_preallocate_avg_size = 512
Expand Down Expand Up @@ -95,7 +98,8 @@ def create_generations(cls,

def __init__(self, byte_limit,
key_weight=len, value_weight=len,
empty_value=(b'', 0)):
empty_value=(b'', 0),
preallocate_nodes=None):
# This holds all the ring entries, no matter which ring they are in.

# We experimented with using OOBTree and LOBTree for the type
Expand All @@ -114,6 +118,8 @@ def __init__(self, byte_limit,

self.data = self._dict_type()
self.get = self.data.get
if preallocate_nodes is not None:
self._preallocate_entries = preallocate_nodes

generations = self.create_generations(
eden_limit=int(byte_limit * self._gen_eden_pct),
Expand Down Expand Up @@ -153,6 +159,9 @@ def __iter__(self):
def __contains__(self, key):
return key in self.data

def __bool__(self):
return bool(self.data)

def __setitem__(self, key, value):
entry = self.get(key)
if entry is not None:
Expand Down
4 changes: 2 additions & 2 deletions src/relstorage/cache/storage_cache.py
Expand Up @@ -252,7 +252,7 @@ def save(self, **save_args):
def restore(self):
# We must only restore into an empty cache.
state = self.polling_state
assert not len(self.local_client) # pylint:disable=len-as-condition
assert not self.local_client
state.restore(self.adapter, self.local_client)

def _reset(self, message=None):
Expand Down Expand Up @@ -298,8 +298,8 @@ def zap_all(self):
other instances, and globally); in addition, remove any
persistent cache files on disk.
"""
self.clear(load_persistent=False)
self.local_client.zap_all()
self.clear(load_persistent=False)

def _check_tid_after_load(self, oid_int, actual_tid_int,
expect_tid_int=None):
Expand Down
4 changes: 3 additions & 1 deletion src/relstorage/cache/tests/test_local_client.py
Expand Up @@ -75,7 +75,9 @@ def test_bucket_sizes_without_compression(self):
# and probation each can hold one item, while protected can hold 4,
# so our max size will be 60
c.limit = 51
c.flush_all()

c._cache = 1
c.flush_all(preallocate_nodes=True)

list_lrukeys = partial(list_lrukeys_, c._cache)
list_lrufreq = partial(list_lrufreq_, c._cache)
Expand Down
5 changes: 3 additions & 2 deletions src/relstorage/cache/tests/test_lru_cffiring.py
Expand Up @@ -99,11 +99,12 @@ def _getClass(self):
from . import Cache
return Cache

def _makeOne(self, limit, kind=None):
def _makeOne(self, limit, kind=None, preallocate_nodes=True):
kind = kind or self._getClass()
return kind(limit,
key_weight=self.key_weight,
value_weight=self.value_weight)
value_weight=self.value_weight,
preallocate_nodes=preallocate_nodes)

def _getIface(self):
return interfaces.ILRUCache
Expand Down
11 changes: 2 additions & 9 deletions src/relstorage/tests/__init__.py
Expand Up @@ -138,6 +138,7 @@ class StorageCreatingMixin(ABC):

keep_history = None # Override
driver_name = None # Override.
zap_slow = False # Override

@abc.abstractmethod
def make_adapter(self, options):
Expand Down Expand Up @@ -202,15 +203,7 @@ def make_storage(self, zap=True, **kw):
adapter = self.make_adapter(options)
storage = RelStorage(adapter, options=options)
if zap:
# XXX: Some ZODB tests, possibly check4ExtStorageThread
# and check7StorageThreads don't close storages when done
# with them? This leads to connections remaining open with
# locks on PyPy, so on PostgreSQL we can't TRUNCATE tables
# and have to go the slow route.
#
# As of 2019-06-20 with PyPy 7.1.1, I'm no longer able to replicate
# a problem like that locally, so we go back to the fast way.
storage.zap_all()
storage.zap_all(slow=self.zap_slow)
return self._wrap_storage(storage)

class MockConnection(object):
Expand Down
2 changes: 1 addition & 1 deletion src/relstorage/tests/fakecache.py
Expand Up @@ -48,7 +48,7 @@ def incr(self, key):
data[key] = value
return value

def flush_all(self):
def flush_all(self, **_kwargs):
data.clear()

def disconnect_all(self):
Expand Down
8 changes: 8 additions & 0 deletions src/relstorage/tests/testmysql.py
Expand Up @@ -35,6 +35,14 @@

class MySQLAdapterMixin(object):

# The MySQL schema adapter uses DROP TABLE
# and then CREATE TABLE to zap when ``zap_all(slow=True)``.
# This is *much* faster than ``DELETE FROM`` on large
# databases (since we can't use truncate.). But for small databases,
# it adds lots of extra overhead to re-create those tables all the
# time, and ``DELETE FROM`` is the way to go.
zap_slow = True

def __get_db_name(self):
if self.keep_history:
db = self.base_dbname
Expand Down
9 changes: 8 additions & 1 deletion src/relstorage/tests/util.py
Expand Up @@ -240,16 +240,23 @@ def test_suite(self):
return suite

def _default_make_check_class(self, bases, name, klass_dict=None):
base_klass_dict = self._make_base_klass_dict()
if klass_dict:
base_klass_dict.update(klass_dict)

klass = type(
name,
(self.use_adapter,) + bases,
klass_dict or {}
base_klass_dict
)

return klass

_default_make_test_class = _default_make_check_class

def _make_base_klass_dict(self):
return {}

def __make_test_class(self, base, extra_bases, maker_base_name, maker_default, klass_dict=None):
name = self.__name__ + base.__name__
maker = getattr(
Expand Down

0 comments on commit 9fe11e7

Please sign in to comment.