Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 34 additions & 37 deletions Lib/test/test_multiprocessing_main_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,31 @@

verbose = support.verbose

# The common suite for test scripts
test_source_suite = """\
import sys
from multiprocessing import Pool, set_start_method
start_method = sys.argv[1]
set_start_method(start_method)
p = Pool(5)
result = p.map_async({func}, {values})
timeout = 10
elapsed = 0
while timeout < 160: # this gives ~5m to finish
result.wait(timeout)
if result.ready():
break
elapsed += timeout
timeout *= 2
print('Warning -- unable to join workers after %d seconds' % elapsed,
file=sys.stderr)
else:
# waited long enough
raise RuntimeError("Timed out waiting for results")
results = sorted(result.get())
print(start_method, "->", results)
"""

test_source = """\
# multiprocessing includes all sorts of shenanigans to make __main__
# attributes accessible in the subprocess in a pickle compatible way.
Expand All @@ -34,10 +59,6 @@
# the docs to make sure it *does* work from an executed __main__,
# regardless of the invocation mechanism

import sys
import time
from multiprocessing import Pool, set_start_method

# We use this __main__ defined function in the map call below in order to
# check that multiprocessing in correctly running the unguarded
# code in child processes and then making it available as __main__
Expand All @@ -51,19 +72,7 @@ def f(x):
from . import sibling

if __name__ == '__main__':
start_method = sys.argv[1]
set_start_method(start_method)
p = Pool(5)
results = []
p.map_async(f, [1, 2, 3], callback=results.extend)
deadline = time.time() + 10 # up to 10 s to report the results
while not results:
time.sleep(0.05)
if time.time() > deadline:
raise RuntimeError("Timed out waiting for results")
results.sort()
print(start_method, "->", results)
"""
""" + test_source_suite.format(func='f', values=[1, 2, 3])

test_source_main_skipped_in_children = """\
# __main__.py files have an implied "if __name__ == '__main__'" so
Expand All @@ -74,24 +83,8 @@ def f(x):

if __name__ != "__main__":
raise RuntimeError("Should only be called as __main__!")

import sys
import time
from multiprocessing import Pool, set_start_method

start_method = sys.argv[1]
set_start_method(start_method)
p = Pool(5)
results = []
p.map_async(int, [1, 4, 9], callback=results.extend)
deadline = time.time() + 10 # up to 10 s to report the results
while not results:
time.sleep(0.05)
if time.time() > deadline:
raise RuntimeError("Timed out waiting for results")
results.sort()
print(start_method, "->", results)
"""
else:
""" + test_source_suite.format(func='int', values=[1, 4, 9])

# These helpers were copied from test_cmd_line_script & tweaked a bit...

Expand Down Expand Up @@ -143,8 +136,12 @@ def _check_output(self, script_name, exit_code, out, err):
if verbose > 1:
print("Output from test script %r:" % script_name)
print(repr(out))
self.assertEqual(exit_code, 0)
self.assertEqual(err.decode('utf-8'), '')
# Any output on stderr is strictly informational (at this point).
# An unsuccessful test script has an exit code which is already
# verified by assert_python_ok().
if verbose and err:
print(file=sys.stderr)
print(err.decode('utf-8'), file=sys.stderr, end=' ... ', flush=True)
expected_results = "%s -> [1, 4, 9]" % self.start_method
self.assertEqual(out.decode('utf-8').strip(), expected_results)

Expand Down