Browse files

Merge branch 'master' into futures

Conflicts:
	tornado/autoreload.py
	tornado/ioloop.py
	tornado/netutil.py
	tornado/simple_httpclient.py
	tornado/test/ioloop_test.py
	tornado/test/runtests.py
	tox.ini
  • Loading branch information...
2 parents 58316a4 + 2ded266 commit aaf8c3687e00550e66179be6c3bb834d2246d1c2 @bdarnell bdarnell committed Sep 29, 2012
Showing with 1,693 additions and 1,070 deletions.
  1. +1 −1 .travis.yml
  2. +5 −1 demos/blog/README
  3. +3 −3 demos/blog/blog.py
  4. +0 −1 maint/requirements.txt
  5. +1 −3 maint/test/appengine/common/cgi_runtests.py
  6. +0 −2 maint/vm/ubuntu10.04/setup.sh
  7. +0 −3 maint/vm/ubuntu10.04/tox.ini
  8. +0 −2 maint/vm/ubuntu12.04/setup.sh
  9. +0 −2 maint/vm/ubuntu12.04/tox.ini
  10. +13 −13 tornado/auth.py
  11. +8 −4 tornado/autoreload.py
  12. +8 −7 tornado/curl_httpclient.py
  13. +0 −238 tornado/database.py
  14. +7 −2 tornado/httpclient.py
  15. +3 −8 tornado/httpserver.py
  16. +6 −6 tornado/httputil.py
  17. +91 −17 tornado/ioloop.py
  18. +249 −139 tornado/iostream.py
  19. +6 −6 tornado/locale.py
  20. +142 −0 tornado/log.py
  21. +3 −4 tornado/netutil.py
  22. +3 −88 tornado/options.py
  23. +3 −0 tornado/platform/common.py
  24. +5 −1 tornado/platform/interface.py
  25. +3 −0 tornado/platform/posix.py
  26. +2 −2 tornado/platform/twisted.py
  27. +130 −6 tornado/process.py
  28. +7 −6 tornado/simple_httpclient.py
  29. +8 −29 tornado/stack_context.py
  30. +3 −8 tornado/template.py
  31. +2 −2 tornado/test/auth_test.py
  32. +5 −3 tornado/test/gen_test.py
  33. +3 −5 tornado/test/httpclient_test.py
  34. +17 −11 tornado/test/httpserver_test.py
  35. +12 −7 tornado/test/httputil_test.py
  36. +0 −9 tornado/test/import_test.py
  37. +33 −7 tornado/test/ioloop_test.py
  38. +46 −18 tornado/test/iostream_test.py
  39. +118 −0 tornado/test/log_test.py
  40. +1 −100 tornado/test/options_test.py
  41. +122 −82 tornado/test/process_test.py
  42. +4 −0 tornado/test/runtests.py
  43. +24 −17 tornado/test/simple_httpclient_test.py
  44. +53 −9 tornado/test/stack_context_test.py
  45. +6 −5 tornado/test/template_test.py
  46. +2 −2 tornado/test/testing_test.py
  47. +7 −5 tornado/test/twisted_test.py
  48. +4 −0 tornado/test/util.py
  49. +190 −87 tornado/test/web_test.py
  50. +14 −7 tornado/test/wsgi_test.py
  51. +95 −13 tornado/testing.py
  52. +81 −41 tornado/web.py
  53. +23 −6 tornado/websocket.py
  54. +13 −15 tornado/wsgi.py
  55. +1 −9 tox.ini
  56. +0 −5 website/sphinx/database.rst
  57. +0 −1 website/sphinx/integration.rst
  58. +40 −1 website/sphinx/iostream.rst
  59. +5 −0 website/sphinx/log.rst
  60. +1 −0 website/sphinx/releases.rst
  61. +55 −0 website/sphinx/releases/next.rst
  62. +5 −0 website/sphinx/testing.rst
  63. +1 −1 website/sphinx/utilities.rst
View
2 .travis.yml
@@ -21,7 +21,7 @@ matrix:
install:
- if [[ $TRAVIS_PYTHON_VERSION == '2.5' ]]; then pip install --use-mirrors simplejson unittest2; fi
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install --use-mirrors unittest2; fi
- - if [[ $FULL == 'true' ]]; then sudo apt-get install librtmp-dev; pip install --use-mirrors MySQL-python pycurl; fi
+ - if [[ $FULL == 'true' ]]; then sudo apt-get install librtmp-dev; pip install --use-mirrors pycurl; fi
- if [[ $FULL == 'true' && $TRAVIS_PYTHON_VERSION == '2.5' ]]; then pip install --use-mirrors twisted==11.0.0 'zope.interface<4.0'; fi
- if [[ $FULL == 'true' && $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install --use-mirrors twisted==11.0.0; fi
- if [[ $FULL == 'true' && $TRAVIS_PYTHON_VERSION == '2.7' ]]; then pip install --use-mirrors twisted==12.0.0; fi
View
6 demos/blog/README
@@ -16,6 +16,10 @@ need to set up MySQL and the database schema for the demo to run.
can run "apt-get install mysql". Under OS X you can download the
MySQL PKG file from http://dev.mysql.com/downloads/mysql/
+3. Install Python prerequisites
+
+ Install the packages MySQL-python and torndb (e.g. using pip or easy_install)
+
3. Connect to MySQL and create a database and user for the blog.
Connect to MySQL as a user that can create databases and users:
@@ -51,7 +55,7 @@ need to set up MySQL and the database schema for the demo to run.
authentication.
Currently the first user to connect will automatically be given the
- ability to create and edit posts.
+ ability to create and edit posts.
Once you've created one blog post, subsequent users will not be
prompted to sign in.
View
6 demos/blog/blog.py
@@ -17,8 +17,8 @@
import markdown
import os.path
import re
+import torndb
import tornado.auth
-import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
@@ -58,7 +58,7 @@ def __init__(self):
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
- self.db = tornado.database.Connection(
+ self.db = torndb.Connection(
host=options.mysql_host, database=options.mysql_database,
user=options.mysql_user, password=options.mysql_password)
@@ -152,7 +152,7 @@ def get(self):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
-
+
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
View
1 maint/requirements.txt
@@ -1,7 +1,6 @@
# Frozen pip requirements for tools used in the development of tornado
# Tornado's optional dependencies
-MySQL-python==1.2.3
Twisted==12.1.0
pycurl==7.19.0
View
4 maint/test/appengine/common/cgi_runtests.py
@@ -33,7 +33,6 @@ def import_everything():
# import tornado.auth
# import tornado.autoreload
# import tornado.curl_httpclient # depends on pycurl
- # import tornado.database # depends on MySQLdb
import tornado.escape
# import tornado.httpclient
# import tornado.httpserver
@@ -53,7 +52,7 @@ def import_everything():
import tornado.web
# import tornado.websocket
import tornado.wsgi
-
+
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
@@ -72,4 +71,3 @@ def main():
if __name__ == '__main__':
main()
-
View
2 maint/vm/ubuntu10.04/setup.sh
@@ -12,7 +12,6 @@ apt-get update
APT_PACKAGES="
python-pip
python-dev
-libmysqlclient-dev
libcurl4-openssl-dev
python-software-properties
"
@@ -40,7 +39,6 @@ apt-get -y install $DEADSNAKES_PACKAGES
PIP_PACKAGES="
virtualenv
tox
-MySQL-python
pycurl
twisted
"
View
3 maint/vm/ubuntu10.04/tox.ini
@@ -15,7 +15,6 @@ deps =
[testenv:py25-full]
basepython = python2.5
deps =
- MySQL-python
pycurl
simplejson
twisted==11.0.0
@@ -27,14 +26,12 @@ deps = unittest2
[testenv:py26-full]
deps =
- MySQL-python
pycurl
twisted==11.0.0
unittest2
[testenv:py27-full]
basepython = python2.7
deps =
- MySQL-python
pycurl
twisted==11.0.0
View
2 maint/vm/ubuntu12.04/setup.sh
@@ -12,7 +12,6 @@ apt-get update
APT_PACKAGES="
python-pip
python-dev
-libmysqlclient-dev
libcurl4-openssl-dev
python-software-properties
"
@@ -38,7 +37,6 @@ apt-get -y install $DEADSNAKES_PACKAGES
PIP_PACKAGES="
virtualenv
tox
-MySQL-python
pycurl
twisted
"
View
2 maint/vm/ubuntu12.04/tox.ini
@@ -15,7 +15,6 @@ deps =
[testenv:py25-full]
basepython = python2.5
deps =
- MySQL-python
pycurl
simplejson
twisted==11.0.0
@@ -29,6 +28,5 @@ deps = unittest2
[testenv:py27-full]
basepython = python2.7
deps =
- MySQL-python
pycurl
twisted==11.0.0
View
26 tornado/auth.py
@@ -50,7 +50,6 @@ def _on_auth(self, user):
import binascii
import hashlib
import hmac
-import logging
import time
import urllib
import urlparse
@@ -59,6 +58,7 @@ def _on_auth(self, user):
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
+from tornado.log import gen_log
from tornado.util import bytes_type, b
@@ -150,7 +150,7 @@ def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
def _on_authentication_verified(self, callback, response):
if response.error or b("is_valid:true") not in response.body:
- logging.warning("Invalid OpenID response: %s", response.error or
+ gen_log.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
@@ -263,14 +263,14 @@ def get_authenticated_user(self, callback, http_client=None):
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
- logging.warning("Missing OAuth request token cookie")
+ gen_log.warning("Missing OAuth request token cookie")
callback(None)
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
- logging.info((cookie_key, request_key, request_cookie))
- logging.warning("Request token does not match cookie")
+ gen_log.info((cookie_key, request_key, request_cookie))
+ gen_log.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret)
@@ -348,7 +348,7 @@ def _oauth_access_token_url(self, request_token):
def _on_access_token(self, callback, response):
if response.error:
- logging.warning("Could not fetch access token")
+ gen_log.warning("Could not fetch access token")
callback(None)
return
@@ -547,7 +547,7 @@ def _on_post(self, new_entry):
def _on_twitter_request(self, callback, response):
if response.error:
- logging.warning("Error response %s fetching %s", response.error,
+ gen_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
@@ -669,7 +669,7 @@ def _on_post(self, new_entry):
def _on_friendfeed_request(self, callback, response):
if response.error:
- logging.warning("Error response %s fetching %s", response.error,
+ gen_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
@@ -930,17 +930,17 @@ def _on_get_user_info(self, callback, session, users):
def _parse_response(self, callback, response):
if response.error:
- logging.warning("HTTP error from Facebook: %s", response.error)
+ gen_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
- logging.warning("Invalid JSON from Facebook: %r", response.body)
+ gen_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
- logging.warning("Facebook error: %d: %r", json["error_code"],
+ gen_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
@@ -1007,7 +1007,7 @@ def _on_login(self, user):
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
if response.error:
- logging.warning('Facebook auth error: %s' % str(response))
+ gen_log.warning('Facebook auth error: %s' % str(response))
callback(None)
return
@@ -1090,7 +1090,7 @@ def _on_post(self, new_entry):
def _on_facebook_request(self, callback, response):
if response.error:
- logging.warning("Error response %s fetching %s", response.error,
+ gen_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
View
12 tornado/autoreload.py
@@ -76,6 +76,7 @@
import subprocess
from tornado import ioloop
+from tornado.log import gen_log
from tornado import process
try:
@@ -178,7 +179,7 @@ def _check_file(modify_times, path):
modify_times[path] = modified
return
if modify_times[path] != modified:
- logging.info("%s modified; restarting server", path)
+ gen_log.info("%s modified; restarting server", path)
_reload()
@@ -273,9 +274,11 @@ def main():
# module) will see the right things.
exec f.read() in globals(), globals()
except SystemExit, e:
- logging.info("Script exited with status %s", e.code)
+ logging.basicConfig()
+ gen_log.info("Script exited with status %s", e.code)
except Exception, e:
- logging.warning("Script exited with uncaught exception", exc_info=True)
+ logging.basicConfig()
+ gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
@@ -288,7 +291,8 @@ def main():
# from the exception object.
watch(e.filename)
else:
- logging.info("Script exited normally")
+ logging.basicConfig()
+ gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
View
15 tornado/curl_httpclient.py
@@ -27,6 +27,7 @@
from tornado import httputil
from tornado import ioloop
+from tornado.log import gen_log
from tornado import stack_context
from tornado.escape import utf8
@@ -51,7 +52,7 @@ def initialize(self, io_loop=None, max_clients=10):
# socket_action is found in pycurl since 7.18.2 (it's been
# in libcurl longer than that but wasn't accessible to
# python).
- logging.warning("socket_action method missing from pycurl; "
+ gen_log.warning("socket_action method missing from pycurl; "
"falling back to socket_all. Upgrading "
"libcurl and pycurl will improve performance")
self._socket_action = \
@@ -263,7 +264,7 @@ def __init__(self, errno, message):
def _curl_create():
curl = pycurl.Curl()
- if logging.getLogger().isEnabledFor(logging.DEBUG):
+ if gen_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
return curl
@@ -386,11 +387,11 @@ def ioctl(cmd):
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, utf8(userpwd))
- logging.debug("%s %s (username: %r)", request.method, request.url,
+ gen_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
- logging.debug("%s %s", request.method, request.url)
+ gen_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
@@ -426,12 +427,12 @@ def _curl_header_callback(headers, header_line):
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
- logging.debug('%s', debug_msg.strip())
+ gen_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
- logging.debug('%s %s', debug_types[debug_type], line)
+ gen_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
- logging.debug('%s %r', debug_types[debug_type], debug_msg)
+ gen_log.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
View
238 tornado/database.py
@@ -1,238 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A lightweight wrapper around MySQLdb."""
-
-from __future__ import absolute_import, division, with_statement
-
-import copy
-import itertools
-import logging
-import time
-
-try:
- import MySQLdb.constants
- import MySQLdb.converters
- import MySQLdb.cursors
-except ImportError:
- # If MySQLdb isn't available this module won't actually be useable,
- # but we want it to at least be importable (mainly for readthedocs.org,
- # which has limitations on third-party modules)
- MySQLdb = None
-
-
-class Connection(object):
- """A lightweight wrapper around MySQLdb DB-API connections.
-
- The main value we provide is wrapping rows in a dict/object so that
- columns can be accessed by name. Typical usage::
-
- db = database.Connection("localhost", "mydatabase")
- for article in db.query("SELECT * FROM articles"):
- print article.title
-
- Cursors are hidden by the implementation, but other than that, the methods
- are very similar to the DB-API.
-
- We explicitly set the timezone to UTC and the character encoding to
- UTF-8 on all connections to avoid time zone and encoding errors.
- """
- def __init__(self, host, database, user=None, password=None,
- max_idle_time=7 * 3600):
- self.host = host
- self.database = database
- self.max_idle_time = max_idle_time
-
- args = dict(conv=CONVERSIONS, use_unicode=True, charset="utf8",
- db=database, init_command='SET time_zone = "+0:00"',
- sql_mode="TRADITIONAL")
- if user is not None:
- args["user"] = user
- if password is not None:
- args["passwd"] = password
-
- # We accept a path to a MySQL socket file or a host(:port) string
- if "/" in host:
- args["unix_socket"] = host
- else:
- self.socket = None
- pair = host.split(":")
- if len(pair) == 2:
- args["host"] = pair[0]
- args["port"] = int(pair[1])
- else:
- args["host"] = host
- args["port"] = 3306
-
- self._db = None
- self._db_args = args
- self._last_use_time = time.time()
- try:
- self.reconnect()
- except Exception:
- logging.error("Cannot connect to MySQL on %s", self.host,
- exc_info=True)
-
- def __del__(self):
- self.close()
-
- def close(self):
- """Closes this database connection."""
- if getattr(self, "_db", None) is not None:
- self._db.close()
- self._db = None
-
- def reconnect(self):
- """Closes the existing database connection and re-opens it."""
- self.close()
- self._db = MySQLdb.connect(**self._db_args)
- self._db.autocommit(True)
-
- def iter(self, query, *parameters):
- """Returns an iterator for the given query and parameters."""
- self._ensure_connected()
- cursor = MySQLdb.cursors.SSCursor(self._db)
- try:
- self._execute(cursor, query, parameters)
- column_names = [d[0] for d in cursor.description]
- for row in cursor:
- yield Row(zip(column_names, row))
- finally:
- cursor.close()
-
- def query(self, query, *parameters):
- """Returns a row list for the given query and parameters."""
- cursor = self._cursor()
- try:
- self._execute(cursor, query, parameters)
- column_names = [d[0] for d in cursor.description]
- return [Row(itertools.izip(column_names, row)) for row in cursor]
- finally:
- cursor.close()
-
- def get(self, query, *parameters):
- """Returns the first row returned for the given query."""
- rows = self.query(query, *parameters)
- if not rows:
- return None
- elif len(rows) > 1:
- raise Exception("Multiple rows returned for Database.get() query")
- else:
- return rows[0]
-
- # rowcount is a more reasonable default return value than lastrowid,
- # but for historical compatibility execute() must return lastrowid.
- def execute(self, query, *parameters):
- """Executes the given query, returning the lastrowid from the query."""
- return self.execute_lastrowid(query, *parameters)
-
- def execute_lastrowid(self, query, *parameters):
- """Executes the given query, returning the lastrowid from the query."""
- cursor = self._cursor()
- try:
- self._execute(cursor, query, parameters)
- return cursor.lastrowid
- finally:
- cursor.close()
-
- def execute_rowcount(self, query, *parameters):
- """Executes the given query, returning the rowcount from the query."""
- cursor = self._cursor()
- try:
- self._execute(cursor, query, parameters)
- return cursor.rowcount
- finally:
- cursor.close()
-
- def executemany(self, query, parameters):
- """Executes the given query against all the given param sequences.
-
- We return the lastrowid from the query.
- """
- return self.executemany_lastrowid(query, parameters)
-
- def executemany_lastrowid(self, query, parameters):
- """Executes the given query against all the given param sequences.
-
- We return the lastrowid from the query.
- """
- cursor = self._cursor()
- try:
- cursor.executemany(query, parameters)
- return cursor.lastrowid
- finally:
- cursor.close()
-
- def executemany_rowcount(self, query, parameters):
- """Executes the given query against all the given param sequences.
-
- We return the rowcount from the query.
- """
- cursor = self._cursor()
- try:
- cursor.executemany(query, parameters)
- return cursor.rowcount
- finally:
- cursor.close()
-
- def _ensure_connected(self):
- # Mysql by default closes client connections that are idle for
- # 8 hours, but the client library does not report this fact until
- # you try to perform a query and it fails. Protect against this
- # case by preemptively closing and reopening the connection
- # if it has been idle for too long (7 hours by default).
- if (self._db is None or
- (time.time() - self._last_use_time > self.max_idle_time)):
- self.reconnect()
- self._last_use_time = time.time()
-
- def _cursor(self):
- self._ensure_connected()
- return self._db.cursor()
-
- def _execute(self, cursor, query, parameters):
- try:
- return cursor.execute(query, parameters)
- except OperationalError:
- logging.error("Error connecting to MySQL on %s", self.host)
- self.close()
- raise
-
-
-class Row(dict):
- """A dict that allows for object-like property access syntax."""
- def __getattr__(self, name):
- try:
- return self[name]
- except KeyError:
- raise AttributeError(name)
-
-if MySQLdb is not None:
- # Fix the access conversions to properly recognize unicode/binary
- FIELD_TYPE = MySQLdb.constants.FIELD_TYPE
- FLAG = MySQLdb.constants.FLAG
- CONVERSIONS = copy.copy(MySQLdb.converters.conversions)
-
- field_types = [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING]
- if 'VARCHAR' in vars(FIELD_TYPE):
- field_types.append(FIELD_TYPE.VARCHAR)
-
- for field_type in field_types:
- CONVERSIONS[field_type] = [(FLAG.BINARY, str)] + CONVERSIONS[field_type]
-
- # Alias some common MySQL exceptions
- IntegrityError = MySQLdb.IntegrityError
- OperationalError = MySQLdb.OperationalError
View
9 tornado/httpclient.py
@@ -331,11 +331,15 @@ class HTTPResponse(object):
* code: numeric HTTP status code, e.g. 200 or 404
+ * reason: human-readable reason phrase describing the status code
+ (with curl_httpclient, this is a default value rather than the
+ server's actual response)
+
* headers: httputil.HTTPHeaders object
* buffer: cStringIO object for response body
- * body: respose body as string (created on demand from self.buffer)
+ * body: response body as string (created on demand from self.buffer)
* error: Exception object, if any
@@ -349,9 +353,10 @@ class HTTPResponse(object):
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
- time_info=None):
+ time_info=None, reason=None):
self.request = request
self.code = code
+ self.reason = reason or httplib.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
View
11 tornado/httpserver.py
@@ -27,13 +27,13 @@ class except to start a server at the beginning of the process
from __future__ import absolute_import, division, with_statement
import Cookie
-import logging
import socket
import time
from tornado.escape import native_str, parse_qs_bytes
from tornado import httputil
from tornado import iostream
+from tornado.log import gen_log
from tornado.netutil import TCPServer
from tornado import stack_context
from tornado.util import b, bytes_type
@@ -267,7 +267,7 @@ def _on_headers(self, data):
self.request_callback(self._request)
except _BadRequestException, e:
- logging.info("Malformed HTTP request from %s: %s",
+ gen_log.info("Malformed HTTP request from %s: %s",
self.address[0], e)
self.close()
return
@@ -388,12 +388,7 @@ def __init__(self, method, uri, version="HTTP/1.0", headers=None,
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
- arguments = parse_qs_bytes(self.query)
- self.arguments = {}
- for name, values in arguments.iteritems():
- values = [v for v in values if v]
- if values:
- self.arguments[name] = values
+ self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
View
12 tornado/httputil.py
@@ -18,11 +18,11 @@
from __future__ import absolute_import, division, with_statement
-import logging
import urllib
import re
from tornado.escape import native_str, parse_qs_bytes, utf8
+from tornado.log import gen_log
from tornado.util import b, ObjectDict
@@ -228,7 +228,7 @@ def parse_body_arguments(content_type, body, arguments, files):
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
- logging.warning("Invalid multipart/form-data")
+ gen_log.warning("Invalid multipart/form-data")
def parse_multipart_form_data(boundary, data, arguments, files):
@@ -247,25 +247,25 @@ def parse_multipart_form_data(boundary, data, arguments, files):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b("--") + boundary + b("--"))
if final_boundary_index == -1:
- logging.warning("Invalid multipart/form-data: no final boundary")
+ gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b("--") + boundary + b("\r\n"))
for part in parts:
if not part:
continue
eoh = part.find(b("\r\n\r\n"))
if eoh == -1:
- logging.warning("multipart/form-data missing headers")
+ gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b("\r\n")):
- logging.warning("Invalid multipart/form-data")
+ gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
- logging.warning("multipart/form-data value missing name")
+ gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
View
108 tornado/ioloop.py
@@ -32,15 +32,16 @@
import errno
import functools
import heapq
-import os
import logging
+import os
import select
import thread
import threading
import time
import traceback
from tornado.concurrent import DummyFuture
+from tornado.log import app_log, gen_log
from tornado import stack_context
try:
@@ -214,7 +215,7 @@ def close(self, all_fds=False):
try:
os.close(fd)
except Exception:
- logging.debug("error closing fd %s", fd, exc_info=True)
+ gen_log.debug("error closing fd %s", fd, exc_info=True)
self._waker.close()
self._impl.close()
@@ -234,7 +235,7 @@ def remove_handler(self, fd):
try:
self._impl.unregister(fd)
except (OSError, IOError):
- logging.debug("Error deleting fd from IOLoop", exc_info=True)
+ gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the ioloop is blocked for more than s seconds.
@@ -248,8 +249,8 @@ def set_blocking_signal_threshold(self, seconds, action):
too long.
"""
if not hasattr(signal, "setitimer"):
- logging.error("set_blocking_signal_threshold requires a signal module "
- "with the setitimer method")
+ gen_log.error("set_blocking_signal_threshold requires a signal module "
+ "with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
@@ -267,23 +268,64 @@ def log_stack(self, signal, frame):
For use with set_blocking_signal_threshold.
"""
- logging.warning('IOLoop blocked for %f seconds in\n%s',
- self._blocking_signal_threshold,
- ''.join(traceback.format_stack(frame)))
+ gen_log.warning('IOLoop blocked for %f seconds in\n%s',
+ self._blocking_signal_threshold,
+ ''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the I/O handlers calls stop(), which
will make the loop stop after the current event iteration completes.
"""
+ if not logging.getLogger().handlers:
+ # The IOLoop catches and logs exceptions, so it's
+ # important that log output be visible. However, python's
+ # default behavior for non-root loggers (prior to python
+ # 3.2) is to print an unhelpful "no handlers could be
+ # found" message rather than the actual log entry, so we
+ # must explicitly configure logging if we've made it this
+ # far without anything.
+ logging.basicConfig()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
+
+ # signal.set_wakeup_fd closes a race condition in event loops:
+ # a signal may arrive at the beginning of select/poll/etc
+ # before it goes into its interruptible sleep, so the signal
+ # will be consumed without waking the select. The solution is
+ # for the (C, synchronous) signal handler to write to a pipe,
+ # which will then be seen by select.
+ #
+ # In python's signal handling semantics, this only matters on the
+ # main thread (fortunately, set_wakeup_fd only works on the main
+ # thread and will raise a ValueError otherwise).
+ #
+ # If someone has already set a wakeup fd, we don't want to
+ # disturb it. This is an issue for twisted, which does its
+ # SIGCHILD processing in response to its own wakeup fd being
+ # written to. As long as the wakeup fd is registered on the IOLoop,
+ # the loop will still wake up and everything should work.
+ old_wakeup_fd = None
+ if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
+ # requires python 2.6+, unix. set_wakeup_fd exists but crashes
+ # the python process on windows.
+ try:
+ old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
+ if old_wakeup_fd != -1:
+ # Already set, restore previous value. This is a little racy,
+ # but there's no clean get_wakeup_fd and in real use the
+ # IOLoop is just started once at the beginning.
+ signal.set_wakeup_fd(old_wakeup_fd)
+ old_wakeup_fd = None
+ except ValueError: # non-main thread
+ pass
+
while True:
poll_timeout = 3600.0
@@ -355,16 +397,18 @@ def start(self):
# Happens when the client closes the connection
pass
else:
- logging.error("Exception in I/O handler for fd %s",
+ app_log.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
except Exception:
- logging.error("Exception in I/O handler for fd %s",
+ app_log.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
+ if old_wakeup_fd is not None:
+ signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
"""Stop the loop after the current event loop iteration is complete.
@@ -424,11 +468,15 @@ def remove_timeout(self, timeout):
def add_callback(self, callback):
"""Calls the given callback on the next I/O loop iteration.
- It is safe to call this method from any thread at any time.
- Note that this is the *only* method in IOLoop that makes this
- guarantee; all other interaction with the IOLoop must be done
- from that IOLoop's thread. add_callback() may be used to transfer
+ It is safe to call this method from any thread at any time,
+ except from a signal handler. Note that this is the *only*
+ method in IOLoop that makes this thread-safety guarantee; all
+ other interaction with the IOLoop must be done from that
+ IOLoop's thread. add_callback() may be used to transfer
control from other threads to the IOLoop's thread.
+
+ To add a callback from a signal handler, see
+ `add_callback_from_signal`.
"""
with self._callback_lock:
list_empty = not self._callbacks
@@ -442,6 +490,32 @@ def add_callback(self, callback):
# avoid it when we can.
self._waker.wake()
+ def add_callback_from_signal(self, callback):
+ """Calls the given callback on the next I/O loop iteration.
+
+ Safe for use from a Python signal handler; should not be used
+ otherwise.
+
+ Callbacks added with this method will be run without any
+ stack_context, to avoid picking up the context of the function
+ that was interrupted by the signal.
+ """
+ with stack_context.NullContext():
+ if thread.get_ident() != self._thread_ident:
+ # if the signal is handled on another thread, we can add
+ # it normally (modulo the NullContext)
+ self.add_callback(callback)
+ else:
+ # If we're on the IOLoop's thread, we cannot use
+ # the regular add_callback because it may deadlock on
+ # _callback_lock. Blindly insert into self._callbacks.
+ # This is safe because the GIL makes list.append atomic.
+ # One subtlety is that if the signal interrupted the
+ # _callback_lock block in IOLoop.start, we may modify
+ # either the old or new version of self._callbacks,
+ # but either way will work.
+ self._callbacks.append(stack_context.wrap(callback))
+
if futures is not None:
_FUTURE_TYPES = (futures.Future, DummyFuture)
else:
@@ -471,7 +545,7 @@ def handle_callback_exception(self, callback):
The exception itself is not passed explicitly, but is available
in sys.exc_info.
"""
- logging.error("Exception in callback %r", callback, exc_info=True)
+ app_log.error("Exception in callback %r", callback, exc_info=True)
class _Timeout(object):
@@ -540,7 +614,7 @@ def _run(self):
try:
self.callback()
except Exception:
- logging.error("Error in periodic callback", exc_info=True)
+ app_log.error("Error in periodic callback", exc_info=True)
self._schedule_next()
def _schedule_next(self):
@@ -707,5 +781,5 @@ def poll(self, timeout):
# All other systems
import sys
if "linux" in sys.platform:
- logging.warning("epoll module not found; using select()")
+ gen_log.warning("epoll module not found; using select()")
_poll = _Select
View
388 tornado/iostream.py
@@ -14,19 +14,27 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""A utility class to write to and read from a non-blocking socket."""
+"""Utility classes to write to and read from non-blocking files and sockets.
+
+Contents:
+
+* `BaseIOStream`: Generic interface for reading and writing.
+* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
+* `SSLIOStream`: SSL-aware version of IOStream.
+* `PipeIOStream`: Pipe-based IOStream implementation.
+"""
from __future__ import absolute_import, division, with_statement
import collections
import errno
-import logging
import os
import socket
import sys
import re
from tornado import ioloop
+from tornado.log import gen_log, app_log
from tornado import stack_context
from tornado.util import b, bytes_type
@@ -35,56 +43,27 @@
except ImportError:
ssl = None
+try:
+ from tornado.platform.posix import _set_nonblocking
+except ImportError:
+ _set_nonblocking = None
+
-class IOStream(object):
- r"""A utility class to write to and read from a non-blocking socket.
+class BaseIOStream(object):
+ """A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take callbacks (since writing and reading are
non-blocking and asynchronous).
- The socket parameter may either be connected or unconnected. For
- server operations the socket is the result of calling socket.accept().
- For client operations the socket is created with socket.socket(),
- and may either be connected before passing it to the IOStream or
- connected with IOStream.connect.
-
When a stream is closed due to an error, the IOStream's `error`
attribute contains the exception object.
- A very simple (and broken) HTTP client using this class::
-
- from tornado import ioloop
- from tornado import iostream
- import socket
-
- def send_request():
- stream.write("GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
- stream.read_until("\r\n\r\n", on_headers)
-
- def on_headers(data):
- headers = {}
- for line in data.split("\r\n"):
- parts = line.split(":")
- if len(parts) == 2:
- headers[parts[0].strip()] = parts[1].strip()
- stream.read_bytes(int(headers["Content-Length"]), on_body)
-
- def on_body(data):
- print data
- stream.close()
- ioloop.IOLoop.instance().stop()
-
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
- stream = iostream.IOStream(s)
- stream.connect(("friendfeed.com", 80), send_request)
- ioloop.IOLoop.instance().start()
-
+ Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
+ `read_from_fd`, and optionally `get_fd_error`.
"""
- def __init__(self, socket, io_loop=None, max_buffer_size=104857600,
+ def __init__(self, io_loop=None, max_buffer_size=104857600,
read_chunk_size=4096):
- self.socket = socket
- self.socket.setblocking(False)
self.io_loop = io_loop or ioloop.IOLoop.instance()
self.max_buffer_size = max_buffer_size
self.read_chunk_size = read_chunk_size
@@ -105,40 +84,45 @@ def __init__(self, socket, io_loop=None, max_buffer_size=104857600,
self._connecting = False
self._state = None
self._pending_callbacks = 0
+ self._closed = False
- def connect(self, address, callback=None):
- """Connects the socket to a remote address without blocking.
+ def fileno(self):
+ """Returns the file descriptor for this stream."""
+ raise NotImplementedError()
- May only be called if the socket passed to the constructor was
- not previously connected. The address parameter is in the
- same format as for socket.connect, i.e. a (host, port) tuple.
- If callback is specified, it will be called when the
- connection is completed.
+ def close_fd(self):
+ """Closes the file underlying this stream.
- Note that it is safe to call IOStream.write while the
- connection is pending, in which case the data will be written
- as soon as the connection is ready. Calling IOStream read
- methods before the socket is connected works on some platforms
- but is non-portable.
+ ``close_fd`` is called by `BaseIOStream` and should not be called
+ elsewhere; other users should call `close` instead.
"""
- self._connecting = True
- try:
- self.socket.connect(address)
- except socket.error, e:
- # In non-blocking mode we expect connect() to raise an
- # exception with EINPROGRESS or EWOULDBLOCK.
- #
- # On freebsd, other errors such as ECONNREFUSED may be
- # returned immediately when attempting to connect to
- # localhost, so handle them the same way as an error
- # reported later in _handle_connect.
- if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
- logging.warning("Connect error on fd %d: %s",
- self.socket.fileno(), e)
- self.close()
- return
- self._connect_callback = stack_context.wrap(callback)
- self._add_io_state(self.io_loop.WRITE)
+ raise NotImplementedError()
+
+ def write_to_fd(self, data):
+ """Attempts to write ``data`` to the underlying file.
+
+ Returns the number of bytes written.
+ """
+ raise NotImplementedError()
+
+ def read_from_fd(self):
+ """Attempts to read from the underlying file.
+
+ Returns ``None`` if there was nothing to read (the socket returned
+ EWOULDBLOCK or equivalent), otherwise returns the data. When possible,
+ should return no more than ``self.read_chunk_size`` bytes at a time.
+ """
+ raise NotImplementedError()
+
+ def get_fd_error(self):
+ """Returns information about any error on the underlying file.
+
+ This method is called after the IOLoop has signaled an error on the
+ file descriptor, and should return an Exception (such as `socket.error`
+ with additional information, or None if no such information is
+ available.
+ """
+ return None
def read_until_regex(self, regex, callback):
"""Call callback when we read the given regex pattern."""
@@ -219,7 +203,7 @@ def set_close_callback(self, callback):
def close(self):
"""Close this stream."""
- if self.socket is not None:
+ if not self.closed():
if any(sys.exc_info()):
self.error = sys.exc_info()[1]
if self._read_until_close:
@@ -229,14 +213,14 @@ def close(self):
self._run_callback(callback,
self._consume(self._read_buffer_size))
if self._state is not None:
- self.io_loop.remove_handler(self.socket.fileno())
+ self.io_loop.remove_handler(self.fileno())
self._state = None
- self.socket.close()
- self.socket = None
+ self.close_fd()
+ self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
- if (self.socket is None and self._close_callback and
+ if (self.closed() and self._close_callback and
self._pending_callbacks == 0):
# if there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
@@ -254,27 +238,25 @@ def writing(self):
def closed(self):
"""Returns true if the stream has been closed."""
- return self.socket is None
+ return self._closed
def _handle_events(self, fd, events):
- if not self.socket:
- logging.warning("Got events for closed stream %d", fd)
+ if self.closed():
+ gen_log.warning("Got events for closed stream %d", fd)
return
try:
if events & self.io_loop.READ:
self._handle_read()
- if not self.socket:
+ if self.closed():
return
if events & self.io_loop.WRITE:
if self._connecting:
self._handle_connect()
self._handle_write()
- if not self.socket:
+ if self.closed():
return
if events & self.io_loop.ERROR:
- errno = self.socket.getsockopt(socket.SOL_SOCKET,
- socket.SO_ERROR)
- self.error = socket.error(errno, os.strerror(errno))
+ self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
@@ -291,9 +273,9 @@ def _handle_events(self, fd, events):
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
- self.io_loop.update_handler(self.socket.fileno(), self._state)
+ self.io_loop.update_handler(self.fileno(), self._state)
except Exception:
- logging.error("Uncaught exception, closing connection.",
+ gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close()
raise
@@ -304,7 +286,7 @@ def wrapper():
try:
callback(*args)
except Exception:
- logging.error("Uncaught exception, closing connection.",
+ app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
@@ -357,7 +339,7 @@ def _handle_read(self):
finally:
self._pending_callbacks -= 1
except Exception:
- logging.warning("error on read", exc_info=True)
+ gen_log.warning("error on read", exc_info=True)
self.close()
return
if self._read_from_buffer():
@@ -393,24 +375,6 @@ def _try_inline_read(self):
return
self._maybe_add_error_listener()
- def _read_from_socket(self):
- """Attempts to read from the socket.
-
- Returns the data read or None if there is nothing to read.
- May be overridden in subclasses.
- """
- try:
- chunk = self.socket.recv(self.read_chunk_size)
- except socket.error, e:
- if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
- return None
- else:
- raise
- if not chunk:
- self.close()
- return None
- return chunk
-
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
@@ -419,19 +383,19 @@ def _read_to_buffer(self):
error closes the socket and raises an exception.
"""
try:
- chunk = self._read_from_socket()
- except socket.error, e:
+ chunk = self.read_from_fd()
+ except (socket.error, IOError, OSError), e:
# ssl.SSLError is a subclass of socket.error
- logging.warning("Read error on %d: %s",
- self.socket.fileno(), e)
+ gen_log.warning("Read error on %d: %s",
+ self.fileno(), e)
self.close()
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size >= self.max_buffer_size:
- logging.error("Reached maximum read buffer size")
+ gen_log.error("Reached maximum read buffer size")
self.close()
raise IOError("Reached maximum read buffer size")
return len(chunk)
@@ -496,24 +460,6 @@ def _read_from_buffer(self):
_double_prefix(self._read_buffer)
return False
- def _handle_connect(self):
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- self.error = socket.error(err, os.strerror(err))
- # IOLoop implementations may vary: some of them return
- # an error state before the socket becomes writable, so
- # in that case a connection failure would be handled by the
- # error path in _handle_events instead of here.
- logging.warning("Connect error on fd %d: %s",
- self.socket.fileno(), errno.errorcode[err])
- self.close()
- return
- if self._connect_callback is not None:
- callback = self._connect_callback
- self._connect_callback = None
- self._run_callback(callback)
- self._connecting = False
-
def _handle_write(self):
while self._write_buffer:
try:
@@ -524,7 +470,7 @@ def _handle_write(self):
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
- num_bytes = self.socket.send(self._write_buffer[0])
+ num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
@@ -544,8 +490,8 @@ def _handle_write(self):
self._write_buffer_frozen = True
break
else:
- logging.warning("Write error on %d: %s",
- self.socket.fileno(), e)
+ gen_log.warning("Write error on %d: %s",
+ self.fileno(), e)
self.close()
return
if not self._write_buffer and self._write_callback:
@@ -561,12 +507,12 @@ def _consume(self, loc):
return self._read_buffer.popleft()
def _check_closed(self):
- if not self.socket:
+ if self.closed():
raise IOError("Stream is closed")
def _maybe_add_error_listener(self):
if self._state is None and self._pending_callbacks == 0:
- if self.socket is None:
+ if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
@@ -592,17 +538,143 @@ def _add_io_state(self, state):
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
- if self.socket is None:
+ if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
- self.socket.fileno(), self._handle_events, self._state)
+ self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
- self.io_loop.update_handler(self.socket.fileno(), self._state)
+ self.io_loop.update_handler(self.fileno(), self._state)
+
+
+class IOStream(BaseIOStream):
+ r"""Socket-based IOStream implementation.
+
+ This class supports the read and write methods from `BaseIOStream`
+ plus a `connect` method.
+
+ The socket parameter may either be connected or unconnected. For
+ server operations the socket is the result of calling socket.accept().
+ For client operations the socket is created with socket.socket(),
+ and may either be connected before passing it to the IOStream or
+ connected with IOStream.connect.
+
+ A very simple (and broken) HTTP client using this class::
+
+ from tornado import ioloop
+ from tornado import iostream
+ import socket
+
+ def send_request():
+ stream.write("GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
+ stream.read_until("\r\n\r\n", on_headers)
+
+ def on_headers(data):
+ headers = {}
+ for line in data.split("\r\n"):
+ parts = line.split(":")
+ if len(parts) == 2:
+ headers[parts[0].strip()] = parts[1].strip()
+ stream.read_bytes(int(headers["Content-Length"]), on_body)
+
+ def on_body(data):
+ print data
+ stream.close()
+ ioloop.IOLoop.instance().stop()
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ stream = iostream.IOStream(s)
+ stream.connect(("friendfeed.com", 80), send_request)
+ ioloop.IOLoop.instance().start()
+ """
+ def __init__(self, socket, *args, **kwargs):
+ self.socket = socket
+ self.socket.setblocking(False)
+ super(IOStream, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ def close_fd(self):
+ self.socket.close()
+ self.socket = None
+
+ def get_fd_error(self):
+ errno = self.socket.getsockopt(socket.SOL_SOCKET,
+ socket.SO_ERROR)
+ return socket.error(errno, os.strerror(errno))
+
+ def read_from_fd(self):
+ try:
+ chunk = self.socket.recv(self.read_chunk_size)
+ except socket.error, e:
+ if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
+ return None
+ else:
+ raise
+ if not chunk:
+ self.close()
+ return None
+ return chunk
+
+ def write_to_fd(self, data):
+ return self.socket.send(data)
+
+ def connect(self, address, callback=None):
+ """Connects the socket to a remote address without blocking.
+
+ May only be called if the socket passed to the constructor was
+ not previously connected. The address parameter is in the
+ same format as for socket.connect, i.e. a (host, port) tuple.
+ If callback is specified, it will be called when the
+ connection is completed.
+
+ Note that it is safe to call IOStream.write while the
+ connection is pending, in which case the data will be written
+ as soon as the connection is ready. Calling IOStream read
+ methods before the socket is connected works on some platforms
+ but is non-portable.
+ """
+ self._connecting = True
+ try:
+ self.socket.connect(address)
+ except socket.error, e:
+ # In non-blocking mode we expect connect() to raise an
+ # exception with EINPROGRESS or EWOULDBLOCK.
+ #
+ # On freebsd, other errors such as ECONNREFUSED may be
+ # returned immediately when attempting to connect to
+ # localhost, so handle them the same way as an error
+ # reported later in _handle_connect.
+ if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
+ gen_log.warning("Connect error on fd %d: %s",
+ self.socket.fileno(), e)
+ self.close()
+ return
+ self._connect_callback = stack_context.wrap(callback)
+ self._add_io_state(self.io_loop.WRITE)
+
+ def _handle_connect(self):
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ self.error = socket.error(err, os.strerror(err))
+ # IOLoop implementations may vary: some of them return
+ # an error state before the socket becomes writable, so
+ # in that case a connection failure would be handled by the
+ # error path in _handle_events instead of here.
+ gen_log.warning("Connect error on fd %d: %s",
+ self.socket.fileno(), errno.errorcode[err])
+ self.close()
+ return
+ if self._connect_callback is not None:
+ callback = self._connect_callback
+ self._connect_callback = None
+ self._run_callback(callback)
+ self._connecting = False
class SSLIOStream(IOStream):
@@ -656,7 +728,7 @@ def _do_ssl_handshake(self):
peer = self.socket.getpeername()
except:
peer = '(not connected)'
- logging.warning("SSL Error on %d %s: %s",
+ gen_log.warning("SSL Error on %d %s: %s",
self.socket.fileno(), peer, err)
return self.close()
raise
@@ -700,7 +772,7 @@ def _handle_connect(self):
**self._ssl_options)
super(SSLIOStream, self)._handle_connect()
- def _read_from_socket(self):
+ def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
@@ -730,6 +802,44 @@ def _read_from_socket(self):
return None
return chunk
+class PipeIOStream(BaseIOStream):
+ """Pipe-based IOStream implementation.
+
+ The constructor takes an integer file descriptor (such as one returned
+ by `os.pipe`) rather than an open file object.
+ """
+ def __init__(self, fd, *args, **kwargs):
+ self.fd = fd
+ _set_nonblocking(fd)
+ super(PipeIOStream, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self.fd
+
+ def close_fd(self):
+ os.close(self.fd)
+
+ def write_to_fd(self, data):
+ return os.write(self.fd, data)
+
+ def read_from_fd(self):
+ try:
+ chunk = os.read(self.fd, self.read_chunk_size)
+ except (IOError, OSError), e:
+ if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
+ return None
+ elif e.args[0] == errno.EBADF:
+ # If the writing half of a pipe is closed, select will
+ # report it as readable but reads will fail with EBADF.
+ self.close()
+ return None
+ else:
+ raise
+ if not chunk:
+ self.close()
+ return None
+ return chunk
+
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
View
12 tornado/locale.py
@@ -43,11 +43,11 @@
import csv
import datetime
-import logging
import os
import re
from tornado import escape
+from tornado.log import gen_log
_default_locale = "en_US"
_translations = {}
@@ -118,7 +118,7 @@ def load_translations(directory):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
- logging.error("Unrecognized locale %r (path: %s)", locale,
+ gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
@@ -142,13 +142,13 @@ def load_translations(directory):
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
- logging.error("Unrecognized plural indicator %r in %s line %d",
+ gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(_translations.keys() + [_default_locale])
- logging.debug("Supported locales: %s", sorted(_supported_locales))
+ gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
@@ -184,11 +184,11 @@ def load_gettext_translations(directory, domain):
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception, e:
- logging.error("Cannot load translation for '%s': %s", lang, str(e))
+ gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(_translations.keys() + [_default_locale])
_use_gettext = True
- logging.debug("Supported locales: %s", sorted(_supported_locales))
+ gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
View
142 tornado/log.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Logging support for Tornado.
+
+Tornado uses three logger streams:
+
+* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
+ potentially other servers in the future)
+* ``tornado.application``: Logging of errors from application code (i.e.
+ uncaught exceptions from callbacks)
+* ``tornado.general``: General-purpose logging, including any errors
+ or warnings from Tornado itself.
+
+These streams may be configured independently using the standard library's
+`logging` module. For example, you may wish to send ``tornado.access`` logs
+to a separate file for analysis.
+"""
+from __future__ import absolute_import, division, with_statement
+
+import logging
+import sys
+import time
+
+from tornado.escape import _unicode
+
+try:
+ import curses
+except ImportError:
+ curses = None
+
+# Logger objects for internal tornado use
+access_log = logging.getLogger("tornado.access")
+app_log = logging.getLogger("tornado.application")
+gen_log = logging.getLogger("tornado.general")
+
+def _stderr_supports_color():
+ color = False
+ if curses and sys.stderr.isatty():
+ try:
+ curses.setupterm()
+ if curses.tigetnum("colors") > 0:
+ color = True
+ except Exception:
+ pass
+ return color
+
+
+class LogFormatter(logging.Formatter):
+ """Log formatter used in Tornado.
+
+ Key features of this formatter are:
+
+ * Color support when logging to a terminal that supports it.
+ * Timestamps on every log line.
+ * Robust against str/bytes encoding problems.
+
+ This formatter is enabled automatically by
+ `tornado.options.parse_command_line` (unless ``--logging=none`` is
+ used).
+ """
+ def __init__(self, color=True, *args, **kwargs):
+ logging.Formatter.__init__(self, *args, **kwargs)
+ self._color = color and _stderr_supports_color()
+ if self._color:
+ # The curses module has some str/bytes confusion in
+ # python3. Until version 3.2.3, most methods return
+ # bytes, but only accept strings. In addition, we want to
+ # output these strings with the logging module, which
+ # works with unicode strings. The explicit calls to
+ # unicode() below are harmless in python2 but will do the
+ # right conversion in python 3.
+ fg_color = (curses.tigetstr("setaf") or
+ curses.tigetstr("setf") or "")
+ if (3, 0) < sys.version_info < (3, 2, 3):
+ fg_color = unicode(fg_color, "ascii")
+ self._colors = {
+ logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue
+ "ascii"),
+ logging.INFO: unicode(curses.tparm(fg_color, 2), # Green
+ "ascii"),
+ logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow
+ "ascii"),
+ logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red
+ "ascii"),
+ }
+ self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
+
+ def format(self, record):
+ try:
+ record.message = record.getMessage()
+ except Exception, e:
+ record.message = "Bad message (%r): %r" % (e, record.__dict__)
+ assert isinstance(record.message, basestring) # guaranteed by logging
+ record.asctime = time.strftime(
+ "%y%m%d %H:%M:%S", self.converter(record.created))
+ prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
+ record.__dict__
+ if self._color:
+ prefix = (self._colors.get(record.levelno, self._normal) +
+ prefix + self._normal)
+
+ # Encoding notes: The logging module prefers to work with character
+ # strings, but only enforces that log messages are instances of
+ # basestring. In python 2, non-ascii bytestrings will make
+ # their way through the logging framework until they blow up with
+ # an unhelpful decoding error (with this formatter it happens
+ # when we attach the prefix, but there are other opportunities for
+ # exceptions further along in the framework).
+ #
+ # If a byte string makes it this far, convert it to unicode to
+ # ensure it will make it out to the logs. Use repr() as a fallback
+ # to ensure that all byte strings can be converted successfully,
+ # but don't do it by default so we don't add extra quotes to ascii
+ # bytestrings. This is a bit of a hacky place to do this, but
+ # it's worth it since the encoding errors that would otherwise
+ # result are so useless (and tornado is fond of using utf8-encoded
+ # byte strings whereever possible).
+ try:
+ message = _unicode(record.message)
+ except UnicodeDecodeError:
+ message = repr(record.message)
+
+ formatted = prefix + " " + message
+ if record.exc_info:
+ if not record.exc_text:
+ record.exc_text = self.formatException(record.exc_info)
+ if record.exc_text:
+ formatted = formatted.rstrip() + "\n" + record.exc_text
+ return formatted.replace("\n", "\n ")
View
7 tornado/netutil.py
@@ -19,16 +19,15 @@
from __future__ import absolute_import, division, with_statement
import errno
-import functools
-import logging
import os
import socket
import stat
from tornado import process
-from tornado.concurrent import DummyFuture, dummy_executor, run_on_executor
+from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
+from tornado.log import app_log
from tornado.platform.auto import set_close_exec
try:
@@ -236,7 +235,7 @@ def _handle_connection(self, connection, address):
stream = IOStream(connection, io_loop=self.io_loop)
self.handle_stream(stream, address)
except Exception:
- logging.error("Error in connection callback", exc_info=True)
+ app_log.error("Error in connection callback", exc_info=True)
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
View
91 tornado/options.py
@@ -56,16 +56,10 @@ def connect():
import re
import sys
import os
-import time
import textwrap
from tornado.escape import _unicode
-
-# For pretty log messages, if available
-try:
- import curses
-except ImportError:
- curses = None
+from tornado.log import LogFormatter
class Error(Exception):
@@ -369,96 +363,17 @@ def enable_pretty_logging(options=options):
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
- channel.setFormatter(_LogFormatter(color=False))
+ channel.setFormatter(LogFormatter(color=False))
root_logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not root_logger.handlers)):
# Set up color if we are in a tty and curses is installed
- color = False
- if curses and sys.stderr.isatty():
- try:
- curses.setupterm()
- if curses.tigetnum("colors") > 0:
- color = True
- except Exception:
- pass
channel = logging.StreamHandler()
- channel.setFormatter(_LogFormatter(color=color))
+ channel.setFormatter(LogFormatter())
root_logger.addHandler(channel)
-class _LogFormatter(logging.Formatter):
- def __init__(self, color, *args, **kwargs):
- logging.Formatter.__init__(self, *args, **kwargs)
- self._color = color
- if color:
- # The curses module has some str/bytes confusion in
- # python3. Until version 3.2.3, most methods return
- # bytes, but only accept strings. In addition, we want to
- # output these strings with the logging module, which
- # works with unicode strings. The explicit calls to
- # unicode() below are harmless in python2 but will do the
- # right conversion in python 3.
- fg_color = (curses.tigetstr("setaf") or
- curses.tigetstr("setf") or "")
- if (3, 0) < sys.version_info < (3, 2, 3):
- fg_color = unicode(fg_color, "ascii")
- self._colors = {
- logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue
- "ascii"),
- logging.INFO: unicode(curses.tparm(fg_color, 2), # Green
- "ascii"),
- logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow
- "ascii"),
- logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red
- "ascii"),
- }
- self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
-
- def format(self, record):
- try:
- record.message = record.getMessage()
- except Exception, e:
- record.message = "Bad message (%r): %r" % (e, record.__dict__)
- assert isinstance(record.message, basestring) # guaranteed by logging
- record.asctime = time.strftime(
- "%y%m%d %H:%M:%S", self.converter(record.created))
- prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
- record.__dict__
- if self._color:
- prefix = (self._colors.get(record.levelno, self._normal) +
- prefix + self._normal)
-
- # Encoding notes: The logging module prefers to work with character
- # strings, but only enforces that log messages are instances of
- # basestring. In python 2, non-ascii bytestrings will make
- # their way through the logging framework until they blow up with
- # an unhelpful decoding error (with this formatter it happens
- # when we attach the prefix, but there are other opportunities for
- # exceptions further along in the framework).
- #
- # If a byte string makes it this far, convert it to unicode to
- # ensure it will make it out to the logs. Use repr() as a fallback
- # to ensure that all byte strings can be converted successfully,
- # but don't do it by default so we don't add extra quotes to ascii
- # bytestrings. This is a bit of a hacky place to do this, but
- # it's worth it since the encoding errors that would otherwise
- # result are so useless (and tornado is fond of using utf8-encoded
- # byte strings whereever possible).
- try:
- message = _unicode(record.message)
- except UnicodeDecodeError:
- message = repr(record.message)
-
- formatted = prefix + " " + message
- if record.exc_info:
- if not record.exc_text:
- record.exc_text = self.formatException(record.exc_info)
- if record.exc_text:
- formatted = formatted.rstrip() + "\n" + record.exc_text
- return formatted.replace("\n", "\n ")
-
# Default options
define("help", type=bool, help="show this help information")
View
3 tornado/platform/common.py
@@ -69,6 +69,9 @@ def __init__(self):
def fileno(self):
return self.reader.fileno()
+ def write_fileno(self):
+ return self.writer.fileno()
+
def wake(self):
try:
self.writer.send(b("x"))
View
6 tornado/platform/interface.py
@@ -39,13 +39,17 @@ class Waker(object):
the ``IOLoop`` is closed, it closes its waker too.
"""
def fileno(self):
- """Returns a file descriptor for this waker.
+ """Returns the read file descriptor for this waker.