Skip to content


Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

upstream ioloop changes from tornado

in sync as of tornadoweb/tornado@38908bf (04/02/2012)

Mostly doc/pep8-related.  Only functional change seems to be
changing no-event poll-timeout to 1 hr from 200 ms.
  • Loading branch information...
commit bcb939561233ac67a7e85f47fcc013aea920fe75 1 parent a58bf77
@minrk authored
Showing with 53 additions and 8 deletions.
  1. +26 −7 zmq/eventloop/
  2. +27 −1 zmq/eventloop/
33 zmq/eventloop/
@@ -25,7 +25,7 @@
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
-from __future__ import with_statement
+from __future__ import absolute_import, division, with_statement
import datetime
import errno
@@ -46,7 +46,6 @@
thread_get_ident = thread.get_ident
from zmq.eventloop import stack_context
-from zmq.utils.strtypes import b
import signal
@@ -176,7 +175,20 @@ def close(self, all_fds=False):
"""Closes the IOLoop, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
- IOLoop will be closed (not just the ones created by the IOLoop itself.
+ IOLoop will be closed (not just the ones created by the IOLoop itself).
+ Many applications will only use a single IOLoop that runs for the
+ entire lifetime of the process. In that case closing the IOLoop
+ is not necessary since everything will be cleaned up when the
+ process exits. `IOLoop.close` is provided mainly for scenarios
+ such as unit tests, which create and destroy a large number of
+ IOLoops.
+ An IOLoop must be completely stopped before it can be closed. This
+ means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
+ be allowed to return before attempting to call `IOLoop.close()`.
+ Therefore the call to `close` will usually appear just after
+ the call to `start` rather than near the call to `stop`.
if all_fds:
@@ -256,8 +268,7 @@ def start(self):
self._thread_ident = thread_get_ident()
self._running = True
while True:
- # Never use an infinite timeout here - it can stall epoll
- poll_timeout = 0.2
+ poll_timeout = 3600.0
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
@@ -356,6 +367,9 @@ def stop(self):
ioloop.start() will return after async_method has run its callback,
whether that callback was invoked before or after ioloop.start.
+ Note that even after `stop` has been called, the IOLoop is not
+ completely stopped until `IOLoop.start` has also returned.
self._running = False
self._stopped = True
@@ -373,6 +387,10 @@ def add_timeout(self, deadline, callback):
``deadline`` may be a number denoting a unix timestamp (as returned
by ``time.time()`` or a ``datetime.timedelta`` object for a deadline
relative to the current time.
+ Note that it is not safe to call `add_timeout` from other threads.
+ Instead, you must use `add_callback` to transfer control to the
+ IOLoop's thread, and then call `add_timeout` from there.
timeout = _Timeout(deadline, stack_context.wrap(callback))
heapq.heappush(self._timeouts, timeout)
@@ -448,7 +466,7 @@ def __init__(self, deadline, callback):
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
- return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
@@ -491,7 +509,8 @@ def stop(self):
self._timeout = None
def _run(self):
- if not self._running: return
+ if not self._running:
+ return
except Exception:
28 zmq/eventloop/
@@ -44,9 +44,28 @@ def die_on_error():
# in the ioloop.
http_client.fetch(url, callback)
+Most applications shouln't have to work with `StackContext` directly.
+Here are a few rules of thumb for when it's necessary:
+* If you're writing an asynchronous library that doesn't rely on a
+ stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
+ (for example, if you're writing a thread pool), use
+ `stack_context.wrap()` before any asynchronous operations to capture the
+ stack context from where the operation was started.
+* If you're writing an asynchronous library that has some shared
+ resources (such as a connection pool), create those shared resources
+ within a ``with stack_context.NullContext():`` block. This will prevent
+ ``StackContexts`` from leaking from one request to another.
+* If you want to write something like an exception handler that will
+ persist across asynchronous calls, create a new `StackContext` (or
+ `ExceptionStackContext`), and make your asynchronous calls in a ``with``
+ block that references your `StackContext`.
-from __future__ import with_statement
+from __future__ import absolute_import, division, with_statement
import contextlib
import functools
@@ -63,6 +82,7 @@ def __init__(self):
self.contexts = ()
_state = _State()
class StackContext(object):
'''Establishes the given context as a StackContext that will be transferred.
@@ -100,6 +120,7 @@ def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
class ExceptionStackContext(object):
'''Specialization of StackContext for exception handling.
@@ -128,6 +149,7 @@ def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
class NullContext(object):
'''Resets the StackContext.
@@ -142,9 +164,11 @@ def __enter__(self):
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
class _StackContextWrapper(functools.partial):
def wrap(fn):
'''Returns a callable object that will restore the current StackContext
when executed.
@@ -157,6 +181,7 @@ def wrap(fn):
return fn
# functools.wraps doesn't appear to work on functools.partial objects
def wrapped(callback, contexts, *args, **kwargs):
if contexts is _state.contexts or not contexts:
callback(*args, **kwargs)
@@ -191,6 +216,7 @@ def wrapped(callback, contexts, *args, **kwargs):
return _StackContextWrapper(fn)
def _nested(*managers):
"""Support multiple context managers in a single with-statement.
Please sign in to comment.
Something went wrong with that request. Please try again.