Skip to content

Commit

Permalink
Made thread joins interruptible by specifying a timeout (fixes #25)
Browse files Browse the repository at this point in the history
  • Loading branch information
agronholm committed Jan 18, 2016
1 parent b60cc06 commit 515263a
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 6 deletions.
6 changes: 6 additions & 0 deletions CHANGES
@@ -1,3 +1,9 @@
3.0.4
=====

- Fixed inability to forcibly terminate the process if there are pending workers


3.0.3
=====

Expand Down
6 changes: 3 additions & 3 deletions concurrent/futures/process.py
Expand Up @@ -77,7 +77,7 @@ def _python_exit():
for t, q in items:
q.put(None)
for t, q in items:
t.join()
t.join(sys.maxint)

# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
Expand Down Expand Up @@ -232,7 +232,7 @@ def shutdown_one_process():
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
p.join(sys.maxint)
call_queue.close()
return
del executor
Expand Down Expand Up @@ -347,7 +347,7 @@ def shutdown(self, wait=True):
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
self._queue_management_thread.join(sys.maxint)
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
Expand Down
4 changes: 2 additions & 2 deletions concurrent/futures/thread.py
Expand Up @@ -36,7 +36,7 @@ def _python_exit():
for t, q in items:
q.put(None)
for t, q in items:
t.join()
t.join(sys.maxint)

atexit.register(_python_exit)

Expand Down Expand Up @@ -130,5 +130,5 @@ def shutdown(self, wait=True):
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__
9 changes: 8 additions & 1 deletion setup.py
@@ -1,4 +1,11 @@
#!/usr/bin/env python
from warnings import warn
import sys

if sys.version_info[0] > 2:
warn('This backport is meant only for Python 2.\n'
'Python 3 users do not need it, as the concurrent.futures '
'package is available in the standard library.')

extras = {}
try:
Expand All @@ -8,7 +15,7 @@
from distutils.core import setup

setup(name='futures',
version='3.0.3',
version='3.0.4',
description='Backport of the concurrent.futures package from Python 3.2',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
Expand Down

2 comments on commit 515263a

@slishak
Copy link

@slishak slishak commented on 515263a Feb 9, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is causing errors for me on Python 2.7.10 x64 on Windows 8:

Traceback (most recent call last):
  File "C:\Program Files (x86)\Red Bull Technology\RBT VDG Python (x64)\lib\threading.py", line 810, in __bootstrap_inner
    self.run()
  File "C:\Program Files (x86)\Red Bull Technology\RBT VDG Python (x64)\lib\threading.py", line 763, in run
    self.__target(*self.__args, **self.__kwargs)
  File "C:\Program Files (x86)\Red Bull Technology\RBT VDG Python (x64)\lib\site-packages\concurrent\futures\process.py", line 235, in _queue_management_worker
    p.join(sys.maxint)
  File "C:\Program Files (x86)\Red Bull Technology\RBT VDG Python (x64)\lib\multiprocessing\process.py", line 145, in join
    res = self._popen.wait(timeout)
  File "C:\Program Files (x86)\Red Bull Technology\RBT VDG Python (x64)\lib\multiprocessing\forking.py", line 297, in wait
    res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
OverflowError: Python int too large to convert to C long

Seems to be because sys.maxint is multiplied by 1000 in forking.py just before the call to WaitForSingleObject.

@agronholm
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed in 3.0.5.

Please sign in to comment.