Skip to content

Commit

Permalink
Use per-module loggers instead of the root logger.
Browse files Browse the repository at this point in the history
  • Loading branch information
bickfordb authored and Ben Darnell committed Mar 24, 2010
1 parent e340491 commit e391fd2
Show file tree
Hide file tree
Showing 13 changed files with 75 additions and 60 deletions.
19 changes: 10 additions & 9 deletions tornado/auth.py
Expand Up @@ -58,6 +58,7 @@ def _on_auth(self, user):
import urlparse
import uuid

_log = logging.getLogger("tornado.auth")

class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
Expand Down Expand Up @@ -145,7 +146,7 @@ def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):

def _on_authentication_verified(self, callback, response):
if response.error or u"is_valid:true" not in response.body:
logging.warning("Invalid OpenID response: %s", response.error or
_log.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
Expand Down Expand Up @@ -232,12 +233,12 @@ def get_authenticated_user(self, callback):
request_key = self.get_argument("oauth_token")
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
logging.warning("Missing OAuth request token cookie")
_log.warning("Missing OAuth request token cookie")
callback(None)
return
cookie_key, cookie_secret = request_cookie.split("|")
if cookie_key != request_key:
logging.warning("Request token does not match cookie")
_log.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret)
Expand Down Expand Up @@ -289,7 +290,7 @@ def _oauth_access_token_url(self, request_token):

def _on_access_token(self, callback, response):
if response.error:
logging.warning("Could not fetch access token")
_log.warning("Could not fetch access token")
callback(None)
return
access_token = _oauth_parse_response(response.body)
Expand Down Expand Up @@ -441,7 +442,7 @@ def _on_post(self, new_entry):

def _on_twitter_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
Expand Down Expand Up @@ -561,7 +562,7 @@ def _on_post(self, new_entry):

def _on_friendfeed_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
Expand Down Expand Up @@ -819,17 +820,17 @@ def _on_get_user_info(self, callback, session, users):

def _parse_response(self, callback, response):
if response.error:
logging.warning("HTTP error from Facebook: %s", response.error)
_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except:
logging.warning("Invalid JSON from Facebook: %r", response.body)
_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
logging.warning("Facebook error: %d: %r", json["error_code"],
_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
Expand Down
3 changes: 2 additions & 1 deletion tornado/autoreload.py
Expand Up @@ -29,6 +29,7 @@
import sys
import types

_log = logging.getLogger('tornado.autoreload')

def start(io_loop=None, check_time=500):
"""Restarts the process automatically when a module is modified.
Expand Down Expand Up @@ -68,7 +69,7 @@ def _reload_on_update(io_loop, modify_times):
modify_times[path] = modified
continue
if modify_times[path] != modified:
logging.info("%s modified; restarting server", path)
_log.info("%s modified; restarting server", path)
_reload_attempted = True
for fd in io_loop._handlers.keys():
try:
Expand Down
5 changes: 3 additions & 2 deletions tornado/database.py
Expand Up @@ -24,6 +24,7 @@
import itertools
import logging

_log = logging.getLogger('tornado.database')

class Connection(object):
"""A lightweight wrapper around MySQLdb DB-API connections.
Expand Down Expand Up @@ -71,7 +72,7 @@ def __init__(self, host, database, user=None, password=None):
try:
self.reconnect()
except:
logging.error("Cannot connect to MySQL on %s", self.host,
_log.error("Cannot connect to MySQL on %s", self.host,
exc_info=True)

def __del__(self):
Expand Down Expand Up @@ -150,7 +151,7 @@ def _execute(self, cursor, query, parameters):
try:
return cursor.execute(query, parameters)
except OperationalError:
logging.error("Error connecting to MySQL on %s", self.host)
_log.error("Error connecting to MySQL on %s", self.host)
self.close()
raise

Expand Down
15 changes: 8 additions & 7 deletions tornado/httpclient.py
Expand Up @@ -29,6 +29,7 @@
import time
import weakref

_log = logging.getLogger('tornado.httpclient')

class HTTPClient(object):
"""A blocking HTTP client backed with pycurl.
Expand Down Expand Up @@ -342,7 +343,7 @@ def __init__(self, errno, message):

def _curl_create(max_simultaneous_connections=None):
curl = pycurl.Curl()
if logging.getLogger().isEnabledFor(logging.DEBUG):
if _log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5)
Expand Down Expand Up @@ -417,11 +418,11 @@ def ioctl(cmd):
userpwd = "%s:%s" % (request.auth_username, request.auth_password)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, userpwd)
logging.info("%s %s (username: %r)", request.method, request.url,
_log.info("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
logging.info("%s %s", request.method, request.url)
_log.info("%s %s", request.method, request.url)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)

Expand All @@ -434,7 +435,7 @@ def _curl_header_callback(headers, header_line):
return
parts = header_line.split(": ")
if len(parts) != 2:
logging.warning("Invalid HTTP response header line %r", header_line)
_log.warning("Invalid HTTP response header line %r", header_line)
return
name = parts[0].strip()
value = parts[1].strip()
Expand All @@ -447,12 +448,12 @@ def _curl_header_callback(headers, header_line):
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
logging.debug('%s', debug_msg.strip())
_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
logging.debug('%s %s', debug_types[debug_type], line)
_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
logging.debug('%s %r', debug_types[debug_type], debug_msg)
_log.debug('%s %r', debug_types[debug_type], debug_msg)


def _utf8(value):
Expand Down
15 changes: 8 additions & 7 deletions tornado/httpserver.py
Expand Up @@ -33,6 +33,7 @@
except ImportError:
ssl = None

_log = logging.getLogger('tornado.httpserver')

class HTTPServer(object):
"""A non-blocking, single-threaded HTTP server.
Expand Down Expand Up @@ -164,16 +165,16 @@ def start(self, num_processes=None):
try:
num_processes = os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
logging.error("Could not get num processors from sysconf; "
_log.error("Could not get num processors from sysconf; "
"running with one process")
num_processes = 1
if num_processes > 1 and ioloop.IOLoop.initialized():
logging.error("Cannot run in multiple processes: IOLoop instance "
_log.error("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start()")
num_processes = 1
if num_processes > 1:
logging.info("Pre-forking %d server processes", num_processes)
_log.info("Pre-forking %d server processes", num_processes)
for i in range(num_processes):
if os.fork() == 0:
self.io_loop = ioloop.IOLoop.instance()
Expand Down Expand Up @@ -210,7 +211,7 @@ def _handle_events(self, fd, events):
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders)
except:
logging.error("Error in connection callback", exc_info=True)
_log.error("Error in connection callback", exc_info=True)


class HTTPConnection(object):
Expand Down Expand Up @@ -313,21 +314,21 @@ def _parse_mime_body(self, boundary, data):
if not part: continue
eoh = part.find("\r\n\r\n")
if eoh == -1:
logging.warning("multipart/form-data missing headers")
_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh])
name_header = headers.get("Content-Disposition", "")
if not name_header.startswith("form-data;") or \
not part.endswith("\r\n"):
logging.warning("Invalid multipart/form-data")
_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
name_values = {}
for name_part in name_header[10:].split(";"):
name, name_value = name_part.strip().split("=", 1)
name_values[name] = name_value.strip('"').decode("utf-8")
if not name_values.get("name"):
logging.warning("multipart/form-data value missing name")
_log.warning("multipart/form-data value missing name")
continue
name = name_values["name"]
if name_values.get("filename"):
Expand Down
16 changes: 9 additions & 7 deletions tornado/ioloop.py
Expand Up @@ -25,6 +25,8 @@
import time


_log = logging.getLogger("tornado.ioloop")

class IOLoop(object):
"""A level-triggered I/O loop.
Expand Down Expand Up @@ -141,7 +143,7 @@ def remove_handler(self, fd):
try:
self._impl.unregister(fd)
except (OSError, IOError):
logging.debug("Error deleting fd from IOLoop", exc_info=True)
_log.debug("Error deleting fd from IOLoop", exc_info=True)

def start(self):
"""Starts the I/O loop.
Expand Down Expand Up @@ -185,7 +187,7 @@ def start(self):
event_pairs = self._impl.poll(poll_timeout)
except Exception, e:
if e.errno == errno.EINTR:
logging.warning("Interrupted system call", exc_info=1)
_log.warning("Interrupted system call", exc_info=1)
continue
else:
raise
Expand All @@ -206,10 +208,10 @@ def start(self):
# Happens when the client closes the connection
pass
else:
logging.error("Exception in I/O handler for fd %d",
_log.error("Exception in I/O handler for fd %d",
fd, exc_info=True)
except:
logging.error("Exception in I/O handler for fd %d",
_log.error("Exception in I/O handler for fd %d",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
Expand Down Expand Up @@ -277,7 +279,7 @@ def handle_callback_exception(self, callback):
The exception itself is not passed explicitly, but is available
in sys.exc_info.
"""
logging.error("Exception in callback %r", callback, exc_info=True)
_log.error("Exception in callback %r", callback, exc_info=True)

def _read_waker(self, fd, events):
try:
Expand Down Expand Up @@ -335,7 +337,7 @@ def _run(self):
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.error("Error in periodic callback", exc_info=True)
_log.error("Error in periodic callback", exc_info=True)
self.start()


Expand Down Expand Up @@ -466,5 +468,5 @@ def poll(self, timeout):
# All other systems
import sys
if "linux" in sys.platform:
logging.warning("epoll module not found; using select()")
_log.warning("epoll module not found; using select()")
_poll = _Select
9 changes: 5 additions & 4 deletions tornado/iostream.py
Expand Up @@ -21,6 +21,7 @@
import logging
import socket

_log = logging.getLogger('tornado.iostream')

class IOStream(object):
"""A utility class to write to and read from a non-blocking socket.
Expand Down Expand Up @@ -138,7 +139,7 @@ def closed(self):

def _handle_events(self, fd, events):
if not self.socket:
logging.warning("Got events for closed stream %d", fd)
_log.warning("Got events for closed stream %d", fd)
return
if events & self.io_loop.READ:
self._handle_read()
Expand Down Expand Up @@ -167,7 +168,7 @@ def _handle_read(self):
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
else:
logging.warning("Read error on %d: %s",
_log.warning("Read error on %d: %s",
self.socket.fileno(), e)
self.close()
return
Expand All @@ -176,7 +177,7 @@ def _handle_read(self):
return
self._read_buffer += chunk
if len(self._read_buffer) >= self.max_buffer_size:
logging.error("Reached maximum read buffer size")
_log.error("Reached maximum read buffer size")
self.close()
return
if self._read_bytes:
Expand Down Expand Up @@ -204,7 +205,7 @@ def _handle_write(self):
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
break
else:
logging.warning("Write error on %d: %s",
_log.warning("Write error on %d: %s",
self.socket.fileno(), e)
self.close()
return
Expand Down
7 changes: 4 additions & 3 deletions tornado/locale.py
Expand Up @@ -45,6 +45,7 @@
_translations = {}
_supported_locales = frozenset([_default_locale])

_log = logging.getLogger('tornado.locale')

def get(*locale_codes):
"""Returns the closest match for the given locale codes.
Expand Down Expand Up @@ -105,7 +106,7 @@ def load_translations(directory):
if not path.endswith(".csv"): continue
locale, extension = path.split(".")
if locale not in LOCALE_NAMES:
logging.error("Unrecognized locale %r (path: %s)", locale,
_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
f = open(os.path.join(directory, path), "r")
Expand All @@ -119,13 +120,13 @@ def load_translations(directory):
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
logging.error("Unrecognized plural indicator %r in %s line %d",
_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(_translations.keys() + [_default_locale])
logging.info("Supported locales: %s", sorted(_supported_locales))
_log.info("Supported locales: %s", sorted(_supported_locales))


def get_supported_locales(cls):
Expand Down
1 change: 1 addition & 0 deletions tornado/options.py
Expand Up @@ -49,6 +49,7 @@ def connect():
"""

import datetime
import logging
import logging.handlers
import re
import sys
Expand Down

6 comments on commit e391fd2

@finiteloop
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I really don't like this change - why did we commit this? What is the goal?

@bdarnell
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The goal is to let you configure verbosity (and maybe other things, but mostly verbosity) on a per-module basis. The tornado.httpclient module in particular is so noisy at the debug level that I end up doing all my application logging at info or higher so I never have to turn on --logging=debug. This change seems like a step in that direction (although I haven't yet tried to use any non-global logging configuration yet myself). The need to explicitly define a logger using redundant information is lame, but seems to be how the logging module was (over-)designed to be used. Did you have something else in mind for module-level logging configuration?

@bdarnell
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So there's another problem with doing things this way. The root-level logging functions (logging.info(), etc) will call logging.basicConfig() for you if no logging configuration has been done. If you're using module-level loggers this doesn't happen, so if you're using tornado without tornado.options (which sets up logging in parse_command_line) the only log output you'll get is "no handlers could be found for logger "tornado.web"". The recommended fix (file:///opt/local/share/doc/python26-doc/library/logging.html#configuring-logging-for-a-library) is apparently to add a null handler to each logger, which is A) incredibly verbose since you're expected to write your own NullHandler class and B) not really what you want since it means that the library's log output will go to /dev/null in the absence of explicit logging configuration. This is absurd. It looks like the only way to get reasonable out-of-the-box behavior is to just use the root logger. It looks like handler.addFilter can be used to get the kind of per-module verbosity settings that I wanted without multiple loggers, so I think I'll switch everything back to the root logger and try adding a --vmodule-like filter.

@bickfordb
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's unfortunate: this made debugging and using Tornado significantly easier for me. Global logging configuration (e.g. 'logging.getLogger("tornado.web").setLevel(logging.ERROR)' seems to work fine.

@bdarnell
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The trouble is that since IOLoop effectively swallows and logs all exceptions, I feel it's important to ensure that tornado's logging is visible by default out of the box. I don't see any good way to do that with per-module loggers without adding handlers at import time (which sqlalchemy does and it's annoying to work around).

Are you attaching different handlers to the different loggers, or are you just using them to set different levels? If it's the latter, I think a logging.Filter that inspects the record's module attribute is a good alternative (and will work automatically even for libraries that take the easy way out and just use the root logger).

@bickfordb
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That make sense to me. In my application I'm configuring root handlers to log to stderr + scribe globally in my run script. I missed the "no loggers configured" message and the fact that module-logged messages aren't visible by default.

This isn't the first time the logging module has been a pain point for me. I started working on [what I believe is] a better logging library called "logit" (http://github.com/bickfordb/logit) if anyone reading this is interested in contributing or has any ideas.

Please sign in to comment.