Skip to content

Commit

Permalink
enhance SDMF/MDMF tests: zero-length files, size=None
Browse files Browse the repository at this point in the history
  • Loading branch information
warner committed Jul 28, 2015
1 parent a7e1dac commit 46d7085
Showing 1 changed file with 75 additions and 39 deletions.
114 changes: 75 additions & 39 deletions src/allmydata/test/test_mutable.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from allmydata.monitor import Monitor
from allmydata.test.common import ShouldFailMixin
from allmydata.test.no_network import GridTestMixin
from foolscap.api import eventually, fireEventually
from foolscap.api import eventually, fireEventually, flushEventualQueue
from foolscap.logging import log
from allmydata.storage_client import StorageFarmBroker
from allmydata.storage.common import storage_index_to_dir
Expand Down Expand Up @@ -916,11 +916,13 @@ def _created(node):
d.addCallback(_created)
return d

def publish_mdmf(self):
def publish_mdmf(self, data=None):
# like publish_one, except that the result is guaranteed to be
# an MDMF file.
# self.CONTENTS should have more than one segment.
self.CONTENTS = "This is an MDMF file" * 100000
if data is None:
data = "This is an MDMF file" * 100000
self.CONTENTS = data
self.uploadable = MutableData(self.CONTENTS)
self._storage = FakeStorage()
self._nodemaker = make_nodemaker(self._storage)
Expand All @@ -933,10 +935,12 @@ def _created(node):
return d


def publish_sdmf(self):
def publish_sdmf(self, data=None):
# like publish_one, except that the result is guaranteed to be
# an SDMF file
self.CONTENTS = "This is an SDMF file" * 1000
if data is None:
data = "This is an SDMF file" * 1000
self.CONTENTS = data
self.uploadable = MutableData(self.CONTENTS)
self._storage = FakeStorage()
self._nodemaker = make_nodemaker(self._storage)
Expand All @@ -948,20 +952,6 @@ def _created(node):
d.addCallback(_created)
return d

def publish_empty_sdmf(self):
self.CONTENTS = ""
self.uploadable = MutableData(self.CONTENTS)
self._storage = FakeStorage()
self._nodemaker = make_nodemaker(self._storage, keysize=None)
self._storage_broker = self._nodemaker.storage_broker
d = self._nodemaker.create_mutable_file(self.uploadable,
version=SDMF_VERSION)
def _created(node):
self._fn = node
self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
d.addCallback(_created)
return d


def publish_multiple(self, version=0):
self.CONTENTS = ["Contents 0",
Expand Down Expand Up @@ -1903,6 +1893,19 @@ def test_verify_mdmf_bad_encprivkey_uncheckable(self):
"test_verify_mdmf_bad_encprivkey_uncheckable")
return d

def test_verify_sdmf_empty(self):
d = self.publish_sdmf("")
d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
d.addCallback(self.check_good, "test_verify_sdmf")

This comment has been minimized.

Copy link
@daira

daira Jul 28, 2015

Member

Should be "test_verify_sdmf_empty".

d.addCallback(flushEventualQueue)
return d

def test_verify_mdmf_empty(self):
d = self.publish_mdmf("")
d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
d.addCallback(self.check_good, "test_verify_mdmf")

This comment has been minimized.

Copy link
@daira

daira Jul 28, 2015

Member

Should be "test_verify_mdmf_empty".

d.addCallback(flushEventualQueue)
return d

class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):

Expand Down Expand Up @@ -2155,7 +2158,7 @@ def test_repair_empty(self):
# In the buggy version, the check that precedes the retrieve+publish
# cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
# privkey that repair needs.
d = self.publish_empty_sdmf()
d = self.publish_sdmf("")
def _delete_one_share(ign):
shares = self._storage._peers
for peerid in shares:
Expand Down Expand Up @@ -3071,8 +3074,10 @@ def setUp(self):
self.small_data = "test data" * 10 # 90 B; SDMF


def do_upload_mdmf(self):
d = self.nm.create_mutable_file(MutableData(self.data),
def do_upload_mdmf(self, data=None):
if data is None:
data = self.data
d = self.nm.create_mutable_file(MutableData(data),
version=MDMF_VERSION)
def _then(n):
assert isinstance(n, MutableFileNode)
Expand Down Expand Up @@ -3365,8 +3370,8 @@ def _test_partial_read(self, node, expected, modes, step):
d = node.get_best_readable_version()
for (name, offset, length) in modes:
d.addCallback(self._do_partial_read, name, expected, offset, length)
# then read only a few bytes at a time, and see that the results are
# what we expect.
# then read the whole thing, but only a few bytes at a time, and see
# that the results are what we expect.
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
Expand All @@ -3381,36 +3386,71 @@ def _read_data(version):
def _do_partial_read(self, version, name, expected, offset, length):
c = consumer.MemoryConsumer()
d = version.read(c, offset, length)
expected_range = expected[offset:offset+length]
if length is None:
expected_range = expected[offset:]
else:
expected_range = expected[offset:offset+length]
d.addCallback(lambda ignored: "".join(c.chunks))
def _check(results):
if results != expected_range:
print "read([%d]+%s) got %d bytes, not %d" % \
(offset, length, len(results), len(expected_range))
print "got: %s ... %s" % (results[:20], results[-20:])
print "exp: %s ... %s" % (expected_range[:20], expected_range[-20:])
self.fail("results[%s] != expected_range" % name)
return version # daisy-chained to next call
d.addCallback(_check)
return d

def test_partial_read_mdmf(self):
def test_partial_read_mdmf_0(self):
data = ""
d = self.do_upload_mdmf(data=data)
modes = [("all1", 0,0),
("all2", 0,None),
]
d.addCallback(self._test_partial_read, data, modes, 1)
return d

def test_partial_read_mdmf_large(self):
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
modes = [("start_on_segment_boundary", segment_boundary, 50),
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
("zero_length_at_start", 0, 0),
("zero_length_in_middle", 50, 0),
("zero_length_at_segment_boundary", segment_boundary, 0),
("complete_file", 0, len(self.data)),
("complete_file_past_end", 0, len(self.data)+1),
("complete_file1", 0, len(self.data)),
("complete_file2", 0, None),
]
d = self.do_upload_mdmf()
d.addCallback(self._test_partial_read, self.data, modes, 10000)
return d

def test_partial_read_sdmf_0(self):
data = ""
modes = [("all1", 0,0),
("all2", 0,None),
]
d = self.do_upload_sdmf(data=data)
d.addCallback(self._test_partial_read, data, modes, 1)
return d

def test_partial_read_sdmf_2(self):
data = "hi"
modes = [("one_byte", 0, 1),
("last_byte", 1, 1),
("complete_file", 0, 2),
]
d = self.do_upload_sdmf(data=data)
d.addCallback(self._test_partial_read, data, modes, 1)
return d

def test_partial_read_sdmf_90(self):
modes = [("start_at_middle", 50, 40),
("zero_length_at_start", 0, 0),
("zero_length_in_middle", 50, 0),
("complete_file", 0, 90),
("zero_length_at_end", 90, 0),
("complete_file1", 0, None),
("complete_file2", 0, 90),
]
d = self.do_upload_sdmf()
d.addCallback(self._test_partial_read, self.small_data, modes, 10)
Expand All @@ -3427,25 +3467,21 @@ def test_partial_read_sdmf_100(self):
d.addCallback(self._test_partial_read, data, modes, 10)
return d

def test_partial_read_sdmf_2(self):
data = "hi"
modes = [("one_byte", 0, 1),
("last_byte", 1, 1),
("complete_file", 0, 2),
]
d = self.do_upload_sdmf(data=data)
d.addCallback(self._test_partial_read, data, modes, 1)
return d


def _test_read_and_download(self, node, expected):
d = node.get_best_readable_version()
def _read_data(version):
c = consumer.MemoryConsumer()
c2 = consumer.MemoryConsumer()
d2 = defer.succeed(None)
d2.addCallback(lambda ignored: version.read(c))
d2.addCallback(lambda ignored:
self.failUnlessEqual(expected, "".join(c.chunks)))

d2.addCallback(lambda ignored: version.read(c2, offset=0,
size=len(expected)))
d2.addCallback(lambda ignored:
self.failUnlessEqual(expected, "".join(c2.chunks)))
return d2
d.addCallback(_read_data)
d.addCallback(lambda ignored: node.download_best_version())
Expand Down

0 comments on commit 46d7085

Please sign in to comment.