Skip to content
This repository has been archived by the owner on Jun 26, 2020. It is now read-only.

Commit

Permalink
Eliminate sleep in the lockutils test case (across processes)
Browse files Browse the repository at this point in the history
Fork many processes and try to lock the same set of files
using flock without blocking and make sure we wait for all the processes
to finish as well.

Fixes LP #1068316

Change-Id: I09964b2c5af63f31b5ddee1f18eaf646f8d8ba58
  • Loading branch information
Davanum Srinivas committed Jan 7, 2013
1 parent aa8f8a3 commit 6024316
Showing 1 changed file with 48 additions and 32 deletions.
80 changes: 48 additions & 32 deletions tests/unit/test_lockutils.py
Expand Up @@ -15,6 +15,7 @@
# under the License.

import errno
import fcntl
import os
import select
import shutil
Expand Down Expand Up @@ -129,39 +130,54 @@ def inner_lock():
if os.path.exists(tempdir):
shutil.rmtree(tempdir)

@testutils.skip_test("Regularly fails, see bug #1095957")
def test_synchronized_externally(self):
"""We can lock across multiple processes"""
tempdir = tempfile.mkdtemp()
self.config(lock_path=tempdir)
rpipe1, wpipe1 = os.pipe()
rpipe2, wpipe2 = os.pipe()

@lockutils.synchronized('testlock1', 'test-', external=True)
def f(rpipe, wpipe):
try:
os.write(wpipe, "foo")
except OSError, e:
self.assertEquals(e.errno, errno.EPIPE)
return

rfds, _wfds, _efds = select.select([rpipe], [], [], 1)
self.assertEquals(len(rfds), 0, "The other process, which was "
"supposed to be locked, "
"wrote on its end of the "
"pipe")
os.close(rpipe)

pid = os.fork()
if pid > 0:
os.close(wpipe1)
os.close(rpipe2)

f(rpipe1, wpipe2)
else:
os.close(rpipe1)
os.close(wpipe2)

time.sleep(0.1)
f(rpipe2, wpipe1)
os._exit(0)

@lockutils.synchronized('external', 'test-', external=True)
def lock_files(tempdir):
if not os.path.exists(tempdir):
os.makedirs(tempdir)

# Open some files we can use for locking
handles = []
for n in range(50):
path = os.path.join(tempdir, ('file-%s' % n))
handles.append(open(path, 'w'))

# Loop over all the handles and try locking the file
# without blocking, keep a count of how many files we
# were able to lock and then unlock. If the lock fails
# we get an IOError and bail out with bad exit code
count = 0
for handle in handles:
try:
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
count += 1
fcntl.flock(handle, fcntl.LOCK_UN)
except IOError:
os._exit(2)
finally:
handle.close()

# Check if we were able to open all files
self.assertEqual(50, count)

try:
children = []
for n in range(50):
pid = os.fork()
if pid:
children.append(pid)
else:
lock_files(tempdir)
os._exit(0)

for i, child in enumerate(children):
(pid, status) = os.waitpid(child, 0)
if pid:
self.assertEqual(0, status)
finally:
if os.path.exists(tempdir):
shutil.rmtree(tempdir, ignore_errors=True)

0 comments on commit 6024316

Please sign in to comment.