Skip to content

Commit

Permalink
Older versions of multiprocessing closed fd 0
Browse files Browse the repository at this point in the history
Problem
=======

Per, https://docs.python.org/2/library/multiprocessing.html#all-platforms

""" multiprocessing originally unconditionally called:

        os.close(sys.stdin.fileno())

    in the multiprocessing.Process._bootstrap() method
"""

Solution
========

We simulate in test case by using multiprocessing module
and closing stdin before calling pexpect.spawn.

Change use of __stdin__.fileno() with __stdout__.fileno(), for
tcgetattr(3), any of stdin/out/err is fine. Then, allow for
ValueError("I/O operation on closed file") could be raised
if stdout is closed, and make another test case where we close
it as such.
  • Loading branch information
jquast committed Jul 2, 2014
1 parent b880e81 commit 146ee70
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 6 deletions.
7 changes: 4 additions & 3 deletions pexpect/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,12 +490,13 @@ def __init__(self, command, args=[], timeout=30, maxread=2000,
# inherit EOF and INTR definitions from controlling process.
try:
from termios import VEOF, VINTR
fd = sys.__stdin__.fileno()
fd = sys.__stdout__.fileno()
self._INTR = ord(termios.tcgetattr(fd)[6][VINTR])
self._EOF = ord(termios.tcgetattr(fd)[6][VEOF])
except (ImportError, OSError, IOError, termios.error):
except (ImportError, OSError, IOError, ValueError, termios.error):
# unless the controlling process is also not a terminal,
# such as cron(1). Fall-back to using CEOF and CINTR.
# such as cron(1). Or stdout fd is closed - Fall-back
# to using CEOF and CINTR.
try:
from termios import CEOF, CINTR
(self._INTR, self._EOF) = (CINTR, CEOF)
Expand Down
43 changes: 40 additions & 3 deletions tests/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,10 +353,14 @@ def test_exception_tb(self):
else:
assert False, "Should have raised an exception."

def test_multiprocessing(self):
" ensure multiprocessing may be used with pexpect. "
def test_multiprocessing_closed_stdin(self):
" multiprocessing may be used with pexpect when stdin is closed. "
# in older versions of multiprocessing, stdin was closed;
# closing an already-closed fd is a non-op; we emulate this
# older version of multiprocessing by closing stdin.
def target(queue):
def _work():
sys.__stdin__.close()
child = pexpect.spawn('cat', echo=False)
child.sendline('\n'.join(['alpha', 'beta']))
alpha, beta = child.readline(), child.readline()
Expand All @@ -379,7 +383,40 @@ def _work():
proc = multiprocessing.Process(target=target, args=[o_queue,])
proc.start()
proc.join()
assert o_queue.get() == (b'alpha', b'beta')
return_value = o_queue.get()
assert return_value == (b'alpha', b'beta'), return_value

def test_multiprocessing_closed_stdout(self):
" multiprocessing may be used with pexpect when stdout closed. "
# similar to above test, but just ensure ValueError is caught
# when stdout is closed in __init__.
def target(queue):
def _work():
sys.__stdout__.close()
child = pexpect.spawn('cat', echo=False)
child.sendline('\n'.join(['alpha', 'beta']))
alpha, beta = child.readline(), child.readline()
child.sendeof()
child.expect(pexpect.EOF)
assert alpha.rstrip() == b'alpha'
assert beta.rstrip() == b'beta'
assert not child.isalive(), ('child is alive', child.isalive())
assert child.exitstatus == 0, ('exit status', child.exitstatus)
# return output of sub-process
return (alpha.rstrip(), beta.rstrip(),)
try:
val = _work()
except Exception:
queue.put(''.join(format_exception(*sys.exc_info())))
else:
queue.put(val)

o_queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=target, args=[o_queue,])
proc.start()
proc.join()
return_value = o_queue.get()
assert return_value == (b'alpha', b'beta'), return_value

if __name__ == '__main__':
unittest.main()
Expand Down

0 comments on commit 146ee70

Please sign in to comment.