Skip to content

Commit

Permalink
Revert "Add transaction awareness for cache keys" because _p_serial
Browse files Browse the repository at this point in the history
isn't an adequate marker for an unique key generation

This reverts commit 9d8aea6.
  • Loading branch information
andbag committed Apr 10, 2018
1 parent 9d8aea6 commit dbe1a0d
Show file tree
Hide file tree
Showing 8 changed files with 12 additions and 123 deletions.
5 changes: 2 additions & 3 deletions src/Products/PluginIndexes/DateRangeIndex/DateRangeIndex.py
Expand Up @@ -248,9 +248,8 @@ def getRequestCacheKey(self, record, resultset=None):
tid = str(term)

# unique index identifier
iid = (self.__class__.__name__,
self.id, self.getCounterKey())

iid = '_%s_%s_%s' % (self.__class__.__name__,
self.id, self.getCounter())
# record identifier
if resultset is None:
rid = '_%s' % (tid, )
Expand Down
13 changes: 0 additions & 13 deletions src/Products/PluginIndexes/PathIndex/PathIndex.py
Expand Up @@ -24,10 +24,8 @@
from BTrees.OOBTree import OOBTree
from BTrees.Length import Length
from Persistence import Persistent
from ZODB.utils import newTid
from zope.interface import implementer


from Products.PluginIndexes.interfaces import (
IPathIndex,
IQueryIndex,
Expand Down Expand Up @@ -202,17 +200,6 @@ def getCounter(self):
"""Return a counter which is increased on index changes"""
return self._counter is not None and self._counter() or 0

def getCounterKey(self):
"""Returns an unique key indicating an uniqe state of the index"""
if self._counter is not None:
key = (self.getCounter(), self._counter._p_serial)
else:
# generate new serial for backward compatibility
# if counter is not set
key = (self.getCounter(), newTid(None))

return key

def numObjects(self):
""" See IPluggableIndex.
"""
Expand Down
11 changes: 0 additions & 11 deletions src/Products/PluginIndexes/TopicIndex/TopicIndex.py
Expand Up @@ -21,7 +21,6 @@
from BTrees.Length import Length
from OFS.SimpleItem import SimpleItem
from Persistence import Persistent
from ZODB.utils import newTid
from zope.interface import implementer

from Products.PluginIndexes.interfaces import (
Expand Down Expand Up @@ -104,16 +103,6 @@ def getCounter(self):
"""Return a counter which is increased on index changes"""
return self._counter is not None and self._counter() or 0

def getCounterKey(self):
"""Returns an unique key indicating an uniqe state of the index"""
if self._counter is not None:
key = (self.getCounter(), self._counter._p_serial)
else:
# counter is not set, generate new serial
key = (self.getCounter(), newTid(None))

return key

def numObjects(self):
"""Return the number of indexed objects."""
return "n/a"
Expand Down
3 changes: 0 additions & 3 deletions src/Products/PluginIndexes/interfaces.py
Expand Up @@ -287,6 +287,3 @@ class IIndexCounter(Interface):

def getCounter():
"""Return a counter which is increased on index changes"""

def getCounterKey():
"""Returns an unique key indicating an uniqe state of the index"""
51 changes: 0 additions & 51 deletions src/Products/PluginIndexes/tests/test_unindex.py
Expand Up @@ -16,9 +16,6 @@
from BTrees.IIBTree import difference
from OFS.SimpleItem import SimpleItem
from Testing.makerequest import makerequest
from Acquisition import aq_base
import ZODB
import transaction


class TestUnIndex(unittest.TestCase):
Expand Down Expand Up @@ -167,51 +164,3 @@ class Dummy(object):
# clear changes the index
index.clear()
self.assertEqual(index.getCounter(), 3)

def test_getCounterKey(self):
index = self._makeOne('counter')

class Dummy(object):
def __init__(self, obj_id):
self.id = obj_id
self.counter = 'counter_{0}'.format(obj_id)

# check counter key of initialized empty index
# counter key is a tuple of the counts of index operations and
# transaction id (tid) of the counter variable

key0 = index.getCounterKey()
self.assertEqual(key0, (0, b'\x00\x00\x00\x00\x00\x00\x00\x00'))

connection = ZODB.connection(None)
connection.add(aq_base(index))

# first object to index
obj = Dummy(1)
index.index_object(obj.id, obj)

# indexing of object changes counter but not the tid
key1 = index.getCounterKey()
self.assertEqual(key1, (1, b'\x00\x00\x00\x00\x00\x00\x00\x00'))

transaction.commit()

# commit changes the tid but not the counter
key2 = index.getCounterKey()
self.assertEqual(key2[0], key1[0])
self.assertFalse(key2[1] == key1[1])

# second object to index
obj = Dummy(2)
index.index_object(obj.id, obj)

# indexing of object changes counter but not the tid
key3 = index.getCounterKey()
self.assertFalse(key3[0] == key2[0])
self.assertEqual(key3[1], key2[1])

transaction.abort()

# abort resets counter key to previos state
key4 = index.getCounterKey()
self.assertEqual(key4, key2)
17 changes: 2 additions & 15 deletions src/Products/PluginIndexes/unindex.py
Expand Up @@ -31,7 +31,6 @@
from BTrees.OOBTree import OOBTree
from OFS.SimpleItem import SimpleItem
from ZODB.POSException import ConflictError
from ZODB.utils import newTid
from zope.interface import implementer

from Products.PluginIndexes.cache import RequestCache
Expand Down Expand Up @@ -300,17 +299,6 @@ def getCounter(self):
"""Return a counter which is increased on index changes"""
return self._counter is not None and self._counter() or 0

def getCounterKey(self):
"""Returns an unique key indicating an uniqe state of the index"""
if self._counter is not None:
key = (self.getCounter(), self._counter._p_serial)
else:
# generate new serial for backward compatibility
# if counter is not set
key = (self.getCounter(), newTid(None))

return key

def numObjects(self):
"""Return the number of indexed objects."""
return len(self._unindex)
Expand Down Expand Up @@ -398,9 +386,8 @@ def getRequestCacheKey(self, record, resultset=None):
rid = frozenset(params)

# unique index identifier
iid = (self.__class__.__name__,
self.id, self.getCounterKey())

iid = '_%s_%s_%s' % (self.__class__.__name__,
self.id, self.getCounter())
return (iid, rid)

def _apply_index(self, request, resultset=None):
Expand Down
4 changes: 2 additions & 2 deletions src/Products/ZCatalog/cache.py
Expand Up @@ -63,7 +63,7 @@ def skip(name, value):
if name in catalog.indexes:
index = catalog.getIndex(name)
if IIndexCounter.providedBy(index):
ck = index.getCounterKey()
counter = index.getCounter()
else:
# cache key invalidation cannot be supported if
# any index of query cannot be tested for changes
Expand All @@ -86,7 +86,7 @@ def skip(name, value):
else:
value = self._convert_datum(index, value)

keys.append((name, value, ck))
keys.append((name, value, counter))

key = frozenset(keys)
cache_key = (self.cid, key)
Expand Down
31 changes: 6 additions & 25 deletions src/Products/ZCatalog/tests/test_cache.py
Expand Up @@ -118,35 +118,19 @@ def _get_cache_key(self, query=None):
def test_make_key(self):
query = {'big': True}
expect = (('catalog',),
frozenset([('big', (True,),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00'))])
)

frozenset([('big', (True,), self.length)]))
self.assertEquals(self._get_cache_key(query), expect)

query = {'start': '2013-07-01'}
expect = (('catalog',),
frozenset([('start', ('2013-07-01',),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00'))])

)

frozenset([('start', ('2013-07-01',), self.length)]))
self.assertEquals(self._get_cache_key(query), expect)

query = {'path': '/1', 'date': '2013-07-05', 'numbers': [1, 3]}
expect = (('catalog',),
frozenset([('date', ('2013-07-05',),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00')),
('numbers', (1, 3),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00')),
('path', ('/1',),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00'))]))

frozenset([('date', ('2013-07-05',), self.length),
('numbers', (1, 3), self.length),
('path', ('/1',), self.length)]))
self.assertEquals(self._get_cache_key(query), expect)

queries = [{'big': True, 'b_start': 0},
Expand All @@ -156,10 +140,7 @@ def test_make_key(self):
{'big': True, 'sort_on': 'big', 'sort_order': 'descending'},
]
expect = (('catalog',),
frozenset([('big', (True,),
(self.length,
b'\x00\x00\x00\x00\x00\x00\x00\x00'))]))

frozenset([('big', (True,), self.length)]))
for query in queries:
self.assertEquals(self._get_cache_key(query), expect)

Expand Down

0 comments on commit dbe1a0d

Please sign in to comment.