Skip to content

Commit

Permalink
Okay, the test failures are now down to 19/1379. The left-over errors…
Browse files Browse the repository at this point in the history
… are

largely due to:

* Different pickle size in Python
  (even for protocol 1, where new padding is added as far as I can tell.)

* Weak references do not seem to work correctly.

* When all tests are run, committing transactions fails. Probably some
  tests are not cleaning up enough after themselves.

The biggest issue remaining at this point is the fork of Python 3.3's
pickle to properly load Python 2 binary strings and adding back
``noload()`` to the unpickler.
  • Loading branch information
Stephan Richter committed Feb 15, 2013
1 parent f4bfe17 commit 326e9f3
Show file tree
Hide file tree
Showing 22 changed files with 182 additions and 169 deletions.
9 changes: 8 additions & 1 deletion src/ZODB/BaseStorage.py
Expand Up @@ -39,6 +39,13 @@
# Py3
import pickle

# Py3: Python 3's `hasattr()` only swallows AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except:
return False
return True

log = logging.getLogger("ZODB.BaseStorage")

Expand Down Expand Up @@ -373,7 +380,7 @@ def copy(source, dest, verbose=0):
# using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or
# ConflictError.
restoring = hasattr(dest, 'restore')
restoring = py2_hasattr(dest, 'restore')
fiter = source.iterator()
for transaction in fiter:
tid = transaction.tid
Expand Down
2 changes: 1 addition & 1 deletion src/ZODB/FileStorage/FileStorage.py
Expand Up @@ -996,7 +996,7 @@ def _txn_undo_write(self, tpos):
raise UndoError("non-undoable transaction")

if failures:
raise MultipleUndoErrors(failures.items())
raise MultipleUndoErrors(list(failures.items()))

return tindex

Expand Down
2 changes: 1 addition & 1 deletion src/ZODB/FileStorage/fspack.py
Expand Up @@ -563,7 +563,7 @@ def writePackedDataRecord(self, h, data, new_tpos):
# Update the header to reflect current information, then write
# it to the output file.
if data is None:
data = ""
data = b''
h.prev = 0
h.back = 0
h.plen = len(data)
Expand Down
4 changes: 2 additions & 2 deletions src/ZODB/blob.py
Expand Up @@ -187,7 +187,7 @@ def destroyed(ref, readers=self.readers):
self._create_uncommitted_file()
result = BlobFile(self._p_blob_uncommitted, mode, self)
if self._p_blob_committed:
utils.cp(open(self._p_blob_committed), result)
utils.cp(open(self._p_blob_committed, 'rb'), result)
if mode == 'r+':
result.seek(0)
else:
Expand Down Expand Up @@ -863,7 +863,7 @@ def undo(self, serial_id, transaction):
data, serial_before, serial_after = load_result
orig_fn = self.fshelper.getBlobFilename(oid, serial_before)
new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
orig = open(orig_fn, "r")
orig = open(orig_fn, "rb")
new = open(new_fn, "wb")
utils.cp(orig, new)
orig.close()
Expand Down
9 changes: 7 additions & 2 deletions src/ZODB/scripts/analyze.py
Expand Up @@ -5,7 +5,12 @@
import pickle
import sys
from ZODB.FileStorage import FileStorage
from cStringIO import StringIO

try:
from cStringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import BytesIO

class FakeError(Exception):
def __init__(self, module, name):
Expand Down Expand Up @@ -96,7 +101,7 @@ def analyze_trans(report, txn):

def get_type(record):
try:
unpickled = FakeUnpickler(StringIO(record.data)).load()
unpickled = FakeUnpickler(BytesIO(record.data)).load()
except FakeError as err:
return "%s.%s" % (err.module, err.name)
except:
Expand Down
4 changes: 2 additions & 2 deletions src/ZODB/scripts/tests/test_repozo.py
Expand Up @@ -890,8 +890,8 @@ def assertRestored(self, correctpath='Data.fs', when=None):
self._callRepozoMain(argv)

# check restored file content is equal to file that was backed up
f = file(correctpath, 'rb')
g = file(restoredfile, 'rb')
f = open(correctpath, 'rb')
g = open(restoredfile, 'rb')
fguts = f.read()
gguts = g.read()
f.close()
Expand Down
2 changes: 1 addition & 1 deletion src/ZODB/serialize.py
Expand Up @@ -264,7 +264,7 @@ def persistent_id(self, obj):
...
InvalidObjectReference:
('Attempt to store an object from a foreign database connection',
<ZODB.serialize.DummyJar instance at ...>, P(bob))
<ZODB.serialize.DummyJar ...>, P(bob))
Constructor arguments used by __new__(), as returned by
__getnewargs__(), can affect memory allocation, but may also
Expand Down
10 changes: 5 additions & 5 deletions src/ZODB/tests/Corruption.py
Expand Up @@ -45,10 +45,10 @@ def checkTruncatedIndex(self):

# truncation the index file
self.failUnless(os.path.exists('Data.fs.index'))
f = open('Data.fs.index', 'r+')
f = open('Data.fs.index', 'rb+')
f.seek(0, 2)
size = f.tell()
f.seek(size / 2)
f.seek(size // 2)
f.truncate()
f.close()

Expand All @@ -62,10 +62,10 @@ def checkCorruptedIndex(self):
# truncation the index file
self.failUnless(os.path.exists('Data.fs.index'))
size = os.stat('Data.fs.index')[stat.ST_SIZE]
f = open('Data.fs.index', 'r+')
f = open('Data.fs.index', 'rb+')
while f.tell() < size:
f.seek(random.randrange(1, size / 10), 1)
f.write('\000')
f.seek(random.randrange(1, size // 10), 1)
f.write(b'\000')
f.close()

self._storage = ZODB.FileStorage.FileStorage('Data.fs')
Expand Down
4 changes: 2 additions & 2 deletions src/ZODB/tests/IExternalGC.test
Expand Up @@ -5,14 +5,14 @@ A storage that provides IExternalGC supports external garbage
collectors by providing a deleteObject method that transactionally
deletes an object.

A create_storage function is provided that creates a storage.
A create_storage function is provided that creates a storage.

>>> storage = create_storage()
>>> import ZODB.blob, transaction
>>> db = ZODB.DB(storage)
>>> conn = db.open()
>>> conn.root()[0] = conn.root().__class__()
>>> conn.root()[1] = ZODB.blob.Blob('some data')
>>> conn.root()[1] = ZODB.blob.Blob(b'some data')
>>> transaction.commit()
>>> oid0 = conn.root()[0]._p_oid
>>> oid1 = conn.root()[1]._p_oid
Expand Down
8 changes: 4 additions & 4 deletions src/ZODB/tests/IteratorStorage.py
Expand Up @@ -240,9 +240,9 @@ def compare(self, storage1, storage2):
# meaning they were the same length.
# Additionally, check that we're backwards compatible to the
# IndexError we used to raise before.
self.assertRaises(StopIteration, itxn1.next)
self.assertRaises(StopIteration, itxn2.next)
self.assertRaises(StopIteration, next, itxn1)
self.assertRaises(StopIteration, next, itxn2)
# Make sure ther are no more records left in txn1 and txn2, meaning
# they were the same length
self.assertRaises(StopIteration, iter1.next)
self.assertRaises(StopIteration, iter2.next)
self.assertRaises(StopIteration, next, iter1)
self.assertRaises(StopIteration, next, iter2)
3 changes: 2 additions & 1 deletion src/ZODB/tests/PackableStorage.py
Expand Up @@ -783,4 +783,5 @@ def setup(test):

return doctest.DocFileSuite(
'IExternalGC.test',
setUp=setup, tearDown=zope.testing.setupstack.tearDown)
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker)
4 changes: 2 additions & 2 deletions src/ZODB/tests/ReadOnlyStorage.py
Expand Up @@ -50,7 +50,7 @@ def checkWriteMethods(self):
self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)

self.assertRaises(ReadOnlyError, self._storage.store,
'\000' * 8, None, '', '', t)
b'\000' * 8, None, b'', '', t)

self.assertRaises(ReadOnlyError, self._storage.undo,
'\000' * 8, t)
b'\000' * 8, t)
2 changes: 1 addition & 1 deletion src/ZODB/tests/StorageTestBase.py
Expand Up @@ -226,7 +226,7 @@ def _undo(self, tid, expected_oids=None, note=None):
vote_result = self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
if expected_oids is not None:
oids = undo_result and undo_result[1] or []
oids = list(undo_result[1]) if undo_result else []
oids.extend(oid for (oid, _) in vote_result or ())
self.assertEqual(len(oids), len(expected_oids), repr(oids))
for oid in expected_oids:
Expand Down
33 changes: 16 additions & 17 deletions src/ZODB/tests/TransactionalUndoStorage.py
Expand Up @@ -44,12 +44,11 @@ def snooze():
time.sleep(0.1)

def listeq(L1, L2):
"""Return True if L1.sort() == L2.sort()"""
c1 = L1[:]
c2 = L2[:]
c1.sort()
c2.sort()
return c1 == c2
"""Return True if L1.sort() == L2.sort()
Also support iterators.
"""
return sorted(L1) == sorted(L2)

class TransactionalUndoStorage:

Expand All @@ -59,7 +58,7 @@ def _transaction_begin(self):
def _transaction_store(self, oid, rev, data, vers, trans):
r = self._storage.store(oid, rev, data, vers, trans)
if r:
if type(r) == str:
if isinstance(r, bytes):
self.__serials[oid] = r
else:
for oid, serial in r:
Expand Down Expand Up @@ -432,7 +431,7 @@ def checkTransactionalUndoAfterPack(self):
# record by packing.

# Add a few object revisions
oid = '\0'*8
oid = b'\0'*8
revid0 = self._dostore(oid, data=MinPO(50))
revid1 = self._dostore(oid, revid=revid0, data=MinPO(51))
snooze()
Expand Down Expand Up @@ -492,14 +491,14 @@ def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):

log = self._storage.undoLog()
eq(len(log), 4)
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3',
'o1 -> o2', 'initial database creation')):
for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3',
b'o1 -> o2', b'initial database creation')):
eq(entry[0]['description'], entry[1])

self._storage.pack(packtime, referencesf)

log = self._storage.undoLog()
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3')):
for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])

tid = log[0]['id']
Expand All @@ -511,7 +510,7 @@ def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
conn.sync()

log = self._storage.undoLog()
for entry in zip(log, ('undo', 'o1 -> o3', 'o1 -> o2 -> o3')):
for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])

eq(o1.obj, o2)
Expand Down Expand Up @@ -703,13 +702,13 @@ def undo(i):
L2.sort()
eq(L1, L2)

self.assertRaises(StopIteration, transactions.next)
self.assertRaises(StopIteration, next, transactions)

def checkUndoLogMetadata(self):
# test that the metadata is correct in the undo log
t = transaction.get()
t.note('t1')
t.setExtendedInfo('k2','this is transaction metadata')
t.setExtendedInfo('k2', 'this is transaction metadata')
t.setUser('u3',path='p3')
db = DB(self._storage)
conn = db.open()
Expand All @@ -721,9 +720,9 @@ def checkUndoLogMetadata(self):
l = self._storage.undoLog()
self.assertEqual(len(l),2)
d = l[0]
self.assertEqual(d['description'],'t1')
self.assertEqual(d['k2'],'this is transaction metadata')
self.assertEqual(d['user_name'],'p3 u3')
self.assertEqual(d['description'], b't1')
self.assertEqual(d['k2'], 'this is transaction metadata')
self.assertEqual(d['user_name'], b'p3 u3')

# A common test body for index tests on undoInfo and undoLog. Before
# ZODB 3.4, they always returned a wrong number of results (one too
Expand Down
4 changes: 2 additions & 2 deletions src/ZODB/tests/blob_connection.txt
Expand Up @@ -9,7 +9,7 @@ with some data:
>>> import transaction
>>> blob = Blob()
>>> data = blob.open("w")
>>> data.write("I'm a happy Blob.")
>>> _ = data.write(b"I'm a happy Blob.")
>>> data.close()

We also need a database with a blob supporting storage. (We're going to use
Expand Down Expand Up @@ -52,7 +52,7 @@ MVCC also works.
>>> transaction3 = transaction.TransactionManager()
>>> connection3 = database.open(transaction_manager=transaction3)
>>> f = connection.root()['myblob'].open('w')
>>> f.write('I am an ecstatic Blob.')
>>> _ = f.write(b'I am an ecstatic Blob.')
>>> f.close()
>>> transaction.commit()
>>> connection3.root()['myblob'].open('r').read()
Expand Down
24 changes: 6 additions & 18 deletions src/ZODB/tests/blob_importexport.txt
@@ -1,17 +1,3 @@
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################

Import/export support for blob data
===================================

Expand All @@ -34,12 +20,14 @@ Put a couple blob objects in our database1 and on the filesystem:

>>> import time, os
>>> nothing = transaction.begin()
>>> data1 = 'x'*100000
>>> data1 = b'x'*100000
>>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write(data1)
>>> data2 = 'y'*100000
>>> with blob1.open('w') as file:
... _ = file.write(data1)
>>> data2 = b'y'*100000
>>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write(data2)
>>> with blob2.open('w') as file:
... _ = file.write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d
>>> transaction.commit()
Expand Down
17 changes: 11 additions & 6 deletions src/ZODB/tests/blob_packing.txt
Expand Up @@ -34,32 +34,37 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob()
>>> blob.open('w').write('this is blob data 0')
>>> with blob.open('w') as file:
... _ = file.write(b'this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob._p_serial)

>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 1')
>>> transaction.commit()
>>> tids.append(blob._p_serial)

>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 2')
>>> transaction.commit()
>>> tids.append(blob._p_serial)

>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob._p_serial)

>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob._p_serial)

Expand All @@ -74,7 +79,7 @@ Do a pack to the slightly before the first revision was written:
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[True, True, True, True, True]

Do a pack to the slightly before the second revision was written:

>>> packtime = times[1]
Expand Down

0 comments on commit 326e9f3

Please sign in to comment.