Skip to content
Browse files

Use our own logger object to log events in gunicorn. While I'm here add

the possibility to manage an access log compatible with apache combined
log format. reopening files is also handled. Also, this change fix
issues #237 and #190.

For the sake of simplicity logging file have been removed

--logfile option has become --error-logfile, by default "-"

Following options have been added:

--access-logfile, the access log file path. By default None. If set
access logs will bre written to defined output

eg:

$ gunicorn -w3 --access-logfile=- test:app
2011-08-21 19:36:52 [84572] [INFO] Starting gunicorn 0.13.0
2011-08-21 19:36:52 [84572] [INFO] Listening at: http://127.0.0.1:8000
(84572)
2011-08-21 19:36:52 [84572] [INFO] Using worker: sync
2011-08-21 19:36:52 [84573] [INFO] Booting worker with pid: 84573
2011-08-21 19:36:52 [84574] [INFO] Booting worker with pid: 84574
2011-08-21 19:36:52 [84575] [INFO] Booting worker with pid: 84575
127.0.0.1 - - [21/Aug/2011:19:37:04] "GET / HTTP/1.1" 200 14 "-"
"curl/7.21.4 (universal-apple-darwin11.0) libcurl/7.21.4 OpenSSL/0.9.8r
zlib/1.2.5"
  • Loading branch information...
1 parent 325c5a8 commit 66f7271c5f67bedc576382a4384604298e6180a0 @benoitc benoitc committed Aug 21, 2011
View
2 examples/log_app.py
@@ -2,6 +2,8 @@
log = logging.getLogger(__name__)
+log.addHandler(logging.StreamHandler())
+
def app_factory(global_options, **local_options):
return app
View
47 gunicorn/app/base.py
@@ -4,16 +4,12 @@
# See the NOTICE for more information.
import errno
-import logging
import os
import sys
import traceback
-try:
- from logging.config import fileConfig
-except ImportError:
- from gunicorn.logging_config import fileConfig
+from gunicorn.glogging import Logger
from gunicorn import util
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
@@ -24,13 +20,6 @@ class Application(object):
An application interface for configuring and loading
the various necessities for any given web framework.
"""
- LOG_LEVELS = {
- "critical": logging.CRITICAL,
- "error": logging.ERROR,
- "warning": logging.WARNING,
- "info": logging.INFO,
- "debug": logging.DEBUG
- }
def __init__(self, usage=None):
self.usage = usage
@@ -106,8 +95,6 @@ def reload(self):
self.do_load_config()
if self.cfg.spew:
debug.spew()
- loglevel = self.LOG_LEVELS.get(self.cfg.loglevel.lower(), logging.INFO)
- self.logger.setLevel(loglevel)
def wsgi(self):
if self.callable is None:
@@ -124,41 +111,11 @@ def run(self):
os.setpgrp()
except OSError, e:
if e[0] != errno.EPERM:
- raise
-
- self.configure_logging()
+ raise
try:
Arbiter(self).run()
except RuntimeError, e:
sys.stderr.write("\nError: %s\n\n" % e)
sys.stderr.flush()
sys.exit(1)
- def configure_logging(self):
- """\
- Set the log level and choose the destination for log output.
- """
- self.logger = logging.getLogger('gunicorn')
-
- fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
- datefmt = r"%Y-%m-%d %H:%M:%S"
- if not self.cfg.logconfig:
- handlers = []
- if self.cfg.logfile != "-":
- handlers.append(logging.FileHandler(self.cfg.logfile))
- else:
- handlers.append(logging.StreamHandler())
-
- loglevel = self.LOG_LEVELS.get(self.cfg.loglevel.lower(), logging.INFO)
- self.logger.setLevel(loglevel)
- for h in handlers:
- h.setFormatter(logging.Formatter(fmt, datefmt))
- self.logger.addHandler(h)
- else:
- if os.path.exists(self.cfg.logconfig):
- fileConfig(self.cfg.logconfig)
- else:
- raise RuntimeError("Error: logfile '%s' not found." %
- self.cfg.logconfig)
-
-
View
22 gunicorn/app/pasterapp.py
@@ -3,7 +3,6 @@
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
-import logging
import os
import pkg_resources
import sys
@@ -44,26 +43,6 @@ def app_config(self):
return cfg
- def configure_logging(self):
- if hasattr(self, "cfgfname"):
- self.logger = logging.getLogger('gunicorn')
- # from paste.script.command
- parser = ConfigParser.ConfigParser()
- parser.read([self.cfgfname])
- if parser.has_section('loggers'):
- if sys.version_info >= (2, 6):
- from logging.config import fileConfig
- else:
- # Use our custom fileConfig -- 2.5.1's with a custom Formatter class
- # and less strict whitespace (which were incorporated into 2.6's)
- from gunicorn.logging_config import fileConfig
-
- config_file = os.path.abspath(self.cfgfname)
- fileConfig(config_file, dict(__file__=config_file,
- here=os.path.dirname(config_file)))
- return
- super(PasterBaseApplication, self).configure_logging()
-
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
@@ -123,7 +102,6 @@ def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs)
sys.stderr.flush()
sys.exit(1)
- self.configure_logging()
def load_config(self):
if not hasattr(self, "cfgfname"):
View
20 gunicorn/arbiter.py
@@ -6,14 +6,15 @@
from __future__ import with_statement
import errno
-import logging
import os
import select
import signal
import sys
import time
import traceback
+
+from gunicorn.glogging import Logger
from gunicorn.errors import HaltServer
from gunicorn.pidfile import Pidfile
from gunicorn.sock import create_socket
@@ -51,9 +52,6 @@ class Arbiter(object):
)
def __init__(self, app):
- self.log = logging.getLogger(__name__)
- self.log.info("Starting gunicorn %s", __version__)
-
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self.setup(app)
@@ -87,6 +85,11 @@ def __init__(self, app):
def setup(self, app):
self.app = app
self.cfg = app.cfg
+ self.log = Logger(app.cfg)
+
+ if 'GUNICORN_FD' in os.environ:
+ self.log.reopen_files()
+
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.debug = self.cfg.debug
@@ -109,11 +112,12 @@ def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
+ self.log.info("Starting gunicorn %s", __version__)
self.cfg.on_starting(self)
self.pid = os.getpid()
self.init_signals()
if not self.LISTENER:
- self.LISTENER = create_socket(self.cfg)
+ self.LISTENER = create_socket(self.cfg, self.log)
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
@@ -136,6 +140,7 @@ def init_signals(self):
self.PIPE = pair = os.pipe()
map(util.set_non_blocking, pair)
map(util.close_on_exec, pair)
+ self.log.close_on_exec()
map(lambda s: signal.signal(s, self.signal), self.SIGNALS)
signal.signal(signal.SIGCHLD, self.handle_chld)
@@ -368,6 +373,8 @@ def reload(self):
util._setproctitle("master [%s]" % self.proc_name)
# manage workers
+ self.log.reopen_files()
+
self.manage_workers()
def murder_workers(self):
@@ -428,7 +435,8 @@ def manage_workers(self):
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENER,
- self.app, self.timeout/2.0, self.cfg)
+ self.app, self.timeout/2.0,
+ self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
View
37 gunicorn/config.py
@@ -551,15 +551,28 @@ class SecureSchemeHeader(Setting):
the headers defined here can not be passed directly from the client.
"""
-class Logfile(Setting):
- name = "logfile"
+class AccessLog(Setting):
+ name = "accesslog"
section = "Logging"
- cli = ["--log-file"]
+ cli = ["--access-logfile"]
+ meta = "FILE"
+ validator = validate_string
+ default = None
+ desc = """\
+ The Access log file to write to.
+
+ "-" means log to stdout.
+ """
+
+class ErrorLog(Setting):
+ name = "errorlog"
+ section = "Logging"
+ cli = ["--error-logfile"]
meta = "FILE"
validator = validate_string
default = "-"
desc = """\
- The log file to write to.
+ The Error log file to write to.
"-" means log to stdout.
"""
@@ -572,7 +585,7 @@ class Loglevel(Setting):
validator = validate_string
default = "info"
desc = """\
- The granularity of log outputs.
+ The granularity of Error log outputs.
Valid level names are:
@@ -583,20 +596,6 @@ class Loglevel(Setting):
* critical
"""
-class LogConfig(Setting):
- name = "logconfig"
- section = "Logging"
- cli = ["--log-config"]
- meta = "FILE"
- validator = validate_string
- default = None
- desc = """\
- The log config file to use.
-
- Gunicorn uses the standard Python logging module's Configuration
- file format.
- """
-
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
View
156 gunicorn/glogging.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import datetime
+import logging
+logging.Logger.manager.emittedNoHandlerWarning = 1
+import sys
+
+from gunicorn import util
+
+class Logger(object):
+
+ LOG_LEVELS = {
+ "critical": logging.CRITICAL,
+ "error": logging.ERROR,
+ "warning": logging.WARNING,
+ "info": logging.INFO,
+ "debug": logging.DEBUG
+ }
+
+ error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
+ datefmt = r"%Y-%m-%d %H:%M:%S"
+
+ access_fmt = "%(message)s"
+
+ access_log_format = \
+ '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
+
+ def __init__(self, cfg):
+ self.error_log = logging.getLogger("gunicor.error")
+ self.access_log = logging.getLogger("gunicorn.access")
+ self.error_handlers = []
+ self.access_handlers = []
+
+ self.setup(cfg)
+
+ def setup(self, cfg):
+ self.cfg = cfg
+
+ loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
+ self.error_log.setLevel(loglevel)
+
+ # always info in access log
+ self.access_log.setLevel(logging.INFO)
+
+ self._set_handler(self.error_log, cfg.errorlog,
+ logging.Formatter(self.error_fmt, self.datefmt))
+
+
+ if cfg.accesslog is not None:
+ self._set_handler(self.access_log, cfg.accesslog,
+ fmt=logging.Formatter(self.access_fmt))
+
+
+ def critical(self, msg, *args, **kwargs):
+ self.error_log.exception(msg, *args, **kwargs)
+
+ def error(self, msg, *args, **kwargs):
+ self.error_log.error(msg, *args, **kwargs)
+
+ def warning(self, msg, *args, **kwargs):
+ self.error_log.warning(msg, *args, **kwargs)
+
+ def info(self, msg, *args, **kwargs):
+ self.error_log.info(msg, *args, **kwargs)
+
+ def debug(self, msg, *args, **kwargs):
+ self.error_log.debug(msg, *args, **kwargs)
+
+ def exception(self, msg, *args):
+ self.error_log.exception(msg, *args)
+
+ def log(lvl, msg, *args, **kwargs):
+ if isinstance(lvl, basestring):
+ lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)
+ self.error_log.log(lvl, msg, *args, **kwargs)
+
+ def access(self, resp, environ):
+ """ Seee http://httpd.apache.org/docs/2.0/logs.html#combined
+ for format details
+ """
+
+ if not self.cfg.accesslog:
+ return
+
+ status = resp.status.split(None, 1)[0]
+ atoms = {
+ 'h': environ['REMOTE_ADDR'],
+ 'l': '-',
+ 'u': '-', # would be cool to get username from basic auth header
+ 't': self.now(),
+ 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
+ environ['RAW_URI'], environ["SERVER_PROTOCOL"]),
+ 's': status,
+ 'b': str(resp.clength) or '-',
+ 'f': environ.get('HTTP_REFERER', '-'),
+ 'a': environ.get('HTTP_USER_AGENT', '-')
+ }
+
+ for k, v in atoms.items():
+ atoms[k] = v.replace('"', '\\"')
+
+ try:
+ self.access_log.info(self.access_log_format % atoms)
+ except:
+ self.errors(traceback.format_exc())
+
+ def now(self):
+ """ return date in Apache Common Log Format """
+ now = datetime.datetime.now()
+ month = util.monthname[now.month]
+ return '[%02d/%s/%04d:%02d:%02d:%02d]' % (now.day, month,
+ now.year, now.hour, now.minute, now.second)
+
+
+ def reopen_files(self):
+ for log in (self.error_log, self.access_log):
+ for handler in log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.acquire()
+ handler.stream.close()
+ handler.stream = open(handler.baseFileName,
+ handler.mode)
+ handler.release()
+
+ def close_on_exec(self):
+ for log in (self.error_log, self.access_log):
+ for handler in log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.acquire()
+ util.close_on_exec(handler.stream.fileno())
+ handler.release()
+
+
+ def _get_gunicorn_handler(self, log):
+ for h in log.handlers:
+ if getattr(h, "_gunicorn") == True:
+ return h
+
+ def _set_handler(self, log, output, fmt):
+ # remove previous gunicorn log handler
+ h = self._get_gunicorn_handler(log)
+ if h:
+ log.handlers.remove(h)
+
+ if output == "-":
+ h = logging.StreamHandler()
+ else:
+ h = logging.FileHandler(output)
+
+ h.setFormatter(fmt)
+ h._gunicorn = True
+ log.addHandler(h)
+
View
1 gunicorn/http/wsgi.py
@@ -96,7 +96,6 @@ def create(req, sock, client, server, cfg):
environ['wsgi.url_scheme'] = url_scheme
-
if isinstance(forward, basestring):
# we only took the last one
# http://en.wikipedia.org/wiki/X-Forwarded-For
View
346 gunicorn/logging_config.py
@@ -1,346 +0,0 @@
-# -*- coding: utf-8 -
-#
-# This file is part of gunicorn released under the MIT license.
-# See the NOTICE for more information.
-#
-# Copyright 2001-2005 by Vinay Sajip. All Rights Reserved.
-#
-
-"""
-Configuration functions for the logging package for Python. The core package
-is based on PEP 282 and comments thereto in comp.lang.python, and influenced
-by Apache's log4j system.
-
-Should work under Python versions >= 1.5.2, except that source line
-information is not available unless 'sys._getframe()' is.
-
-Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, logging, logging.handlers, string, socket, struct, os, traceback, types
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-from SocketServer import ThreadingTCPServer, StreamRequestHandler
-
-
-DEFAULT_LOGGING_CONFIG_PORT = 9030
-
-if sys.platform == "win32":
- RESET_ERROR = 10054 #WSAECONNRESET
-else:
- RESET_ERROR = 104 #ECONNRESET
-
-#
-# The following code implements a socket listener for on-the-fly
-# reconfiguration of logging.
-#
-# _listener holds the server object doing the listening
-_listener = None
-
-def fileConfig(fname, defaults=None):
- """
- Read the logging configuration from a ConfigParser-format file.
-
- This can be called several times from an application, allowing an end user
- the ability to select from various pre-canned configurations (if the
- developer provides a mechanism to present the choices and load the chosen
- configuration).
- In versions of ConfigParser which have the readfp method [typically
- shipped in 2.x versions of Python], you can pass in a file-like object
- rather than a filename, in which case the file-like object will be read
- using readfp.
- """
- import ConfigParser
-
- cp = ConfigParser.ConfigParser(defaults)
- if hasattr(cp, 'readfp') and hasattr(fname, 'readline'):
- cp.readfp(fname)
- else:
- cp.read(fname)
-
- formatters = _create_formatters(cp)
-
- # critical section
- logging._acquireLock()
- try:
- logging._handlers.clear()
- if hasattr(logging, '_handlerList'):
- del logging._handlerList[:]
- # Handlers add themselves to logging._handlers
- handlers = _install_handlers(cp, formatters)
- _install_loggers(cp, handlers)
- finally:
- logging._releaseLock()
-
-
-def _resolve(name):
- """Resolve a dotted name to a global object."""
- name = string.split(name, '.')
- used = name.pop(0)
- found = __import__(used)
- for n in name:
- used = used + '.' + n
- try:
- found = getattr(found, n)
- except AttributeError:
- __import__(used)
- found = getattr(found, n)
- return found
-
-
-def _create_formatters(cp):
- """Create and return formatters"""
- flist = cp.get("formatters", "keys")
- if not len(flist):
- return {}
- flist = string.split(flist, ",")
- formatters = {}
- for form in flist:
- form = string.strip(form)
- sectname = "formatter_%s" % form
- opts = cp.options(sectname)
- if "format" in opts:
- fs = cp.get(sectname, "format", 1)
- else:
- fs = None
- if "datefmt" in opts:
- dfs = cp.get(sectname, "datefmt", 1)
- else:
- dfs = None
- c = logging.Formatter
- if "class" in opts:
- class_name = cp.get(sectname, "class")
- if class_name:
- c = _resolve(class_name)
- f = c(fs, dfs)
- formatters[form] = f
- return formatters
-
-
-def _install_handlers(cp, formatters):
- """Install and return handlers"""
- hlist = cp.get("handlers", "keys")
- if not len(hlist):
- return {}
- hlist = string.split(hlist, ",")
- handlers = {}
- fixups = [] #for inter-handler references
- for hand in hlist:
- hand = string.strip(hand)
- sectname = "handler_%s" % hand
- klass = cp.get(sectname, "class")
- opts = cp.options(sectname)
- if "formatter" in opts:
- fmt = cp.get(sectname, "formatter")
- else:
- fmt = ""
- try:
- klass = eval(klass, vars(logging))
- except (AttributeError, NameError):
- klass = _resolve(klass)
- args = cp.get(sectname, "args")
- args = eval(args, vars(logging))
- h = apply(klass, args)
- if "level" in opts:
- level = cp.get(sectname, "level")
- h.setLevel(logging._levelNames[level])
- if len(fmt):
- h.setFormatter(formatters[fmt])
- #temporary hack for FileHandler and MemoryHandler.
- if klass == logging.handlers.MemoryHandler:
- if "target" in opts:
- target = cp.get(sectname,"target")
- else:
- target = ""
- if len(target): #the target handler may not be loaded yet, so keep for later...
- fixups.append((h, target))
- handlers[hand] = h
- #now all handlers are loaded, fixup inter-handler references...
- for h, t in fixups:
- h.setTarget(handlers[t])
- return handlers
-
-
-def _install_loggers(cp, handlers):
- """Create and install loggers"""
-
- # configure the root first
- llist = cp.get("loggers", "keys")
- llist = string.split(llist, ",")
- llist = map(lambda x: string.strip(x), llist)
- llist.remove("root")
- sectname = "logger_root"
- root = logging.root
- log = root
- opts = cp.options(sectname)
- if "level" in opts:
- level = cp.get(sectname, "level")
- log.setLevel(logging._levelNames[level])
- for h in root.handlers[:]:
- root.removeHandler(h)
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = string.split(hlist, ",")
- for hand in hlist:
- log.addHandler(handlers[string.strip(hand)])
-
- #and now the others...
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- existing = root.manager.loggerDict.keys()
- #now set up the new ones...
- for log in llist:
- sectname = "logger_%s" % log
- qn = cp.get(sectname, "qualname")
- opts = cp.options(sectname)
- if "propagate" in opts:
- propagate = cp.getint(sectname, "propagate")
- else:
- propagate = 1
- logger = logging.getLogger(qn)
- if qn in existing:
- existing.remove(qn)
- if "level" in opts:
- level = cp.get(sectname, "level")
- logger.setLevel(logging._levelNames[level])
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- logger.propagate = propagate
- logger.disabled = 0
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = string.split(hlist, ",")
- for hand in hlist:
- logger.addHandler(handlers[string.strip(hand)])
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- for log in existing:
- root.manager.loggerDict[log].disabled = 1
-
-
-def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
- """
- Start up a socket server on the specified port, and listen for new
- configurations.
-
- These will be sent as a file suitable for processing by fileConfig().
- Returns a Thread object on which you can call start() to start the server,
- and which you can join() when appropriate. To stop the server, call
- stopListening().
- """
- if not thread:
- raise NotImplementedError, "listen() needs threading to work"
-
- class ConfigStreamHandler(StreamRequestHandler):
- """
- Handler for a logging configuration request.
-
- It expects a completely new logging configuration and uses fileConfig
- to install it.
- """
- def handle(self):
- """
- Handle a request.
-
- Each request is expected to be a 4-byte length, packed using
- struct.pack(">L", n), followed by the config file.
- Uses fileConfig() to do the grunt work.
- """
- import tempfile
- try:
- conn = self.connection
- chunk = conn.recv(4)
- if len(chunk) == 4:
- slen = struct.unpack(">L", chunk)[0]
- chunk = self.connection.recv(slen)
- while len(chunk) < slen:
- chunk = chunk + conn.recv(slen - len(chunk))
- #Apply new configuration. We'd like to be able to
- #create a StringIO and pass that in, but unfortunately
- #1.5.2 ConfigParser does not support reading file
- #objects, only actual files. So we create a temporary
- #file and remove it later.
- file = tempfile.mktemp(".ini")
- f = open(file, "w")
- f.write(chunk)
- f.close()
- try:
- fileConfig(file)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- traceback.print_exc()
- os.remove(file)
- except socket.error, e:
- if type(e.args) != types.TupleType:
- raise
- else:
- errcode = e.args[0]
- if errcode != RESET_ERROR:
- raise
-
- class ConfigSocketReceiver(ThreadingTCPServer):
- """
- A simple TCP socket-based logging config receiver.
- """
-
- allow_reuse_address = 1
-
- def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
- handler=None):
- ThreadingTCPServer.__init__(self, (host, port), handler)
- logging._acquireLock()
- self.abort = 0
- logging._releaseLock()
- self.timeout = 1
-
- def serve_until_stopped(self):
- import select
- abort = 0
- while not abort:
- rd, wr, ex = select.select([self.socket.fileno()],
- [], [],
- self.timeout)
- if rd:
- self.handle_request()
- logging._acquireLock()
- abort = self.abort
- logging._releaseLock()
-
- def serve(rcvr, hdlr, port):
- server = rcvr(port=port, handler=hdlr)
- global _listener
- logging._acquireLock()
- _listener = server
- logging._releaseLock()
- server.serve_until_stopped()
-
- return threading.Thread(target=serve,
- args=(ConfigSocketReceiver,
- ConfigStreamHandler, port))
-
-def stopListening():
- """
- Stop the listening server which was created with a call to listen().
- """
- global _listener
- if _listener:
- logging._acquireLock()
- _listener.abort = 1
- _listener = None
- logging._releaseLock()
View
12 gunicorn/sock.py
@@ -12,11 +12,11 @@
from gunicorn import util
-log = logging.getLogger(__name__)
class BaseSocket(object):
- def __init__(self, conf, fd=None):
+ def __init__(self, conf, log, fd=None):
+ self.log = log
self.conf = conf
self.address = conf.address
if fd is None:
@@ -46,7 +46,7 @@ def close(self):
try:
self.sock.close()
except socket.error, e:
- log.info("Error while closing socket %s", str(e))
+ self.log.info("Error while closing socket %s", str(e))
time.sleep(0.3)
del self.sock
@@ -94,7 +94,7 @@ def close(self):
super(UnixSocket, self).close()
os.unlink(self.address)
-def create_socket(conf):
+def create_socket(conf, log):
"""
Create a new socket for the given address. If the
address is a tuple, a TCP socket is created. If it
@@ -117,7 +117,7 @@ def create_socket(conf):
if 'GUNICORN_FD' in os.environ:
fd = int(os.environ.pop('GUNICORN_FD'))
try:
- return sock_type(conf, fd=fd)
+ return sock_type(conf, log, fd=fd)
except socket.error, e:
if e[0] == errno.ENOTCONN:
log.error("GUNICORN_FD should refer to an open socket.")
@@ -130,7 +130,7 @@ def create_socket(conf):
for i in range(5):
try:
- return sock_type(conf)
+ return sock_type(conf, log)
except socket.error, e:
if e[0] == errno.EADDRINUSE:
log.error("Connection in use: %s", str(addr))
View
2 gunicorn/workers/async.py
@@ -65,6 +65,8 @@ def handle_request(self, req, sock, addr):
try:
for item in respiter:
resp.write(item)
+
+ self.log.access(resp, environ)
resp.close()
finally:
if hasattr(respiter, "close"):
View
11 gunicorn/workers/base.py
@@ -27,7 +27,7 @@ class Worker(object):
PIPE = []
- def __init__(self, age, ppid, socket, app, timeout, cfg):
+ def __init__(self, age, ppid, socket, app, timeout, cfg, log):
"""\
This is called pre-fork so it shouldn't do anything to the
current process. If there's a need to make process wide
@@ -44,7 +44,7 @@ def __init__(self, age, ppid, socket, app, timeout, cfg):
self.nr = 0
self.max_requests = cfg.max_requests or sys.maxint
self.alive = True
- self.log = logging.getLogger(__name__)
+ self.log = log
self.debug = cfg.debug
self.address = self.socket.getsockname()
self.tmp = WorkerTmp(cfg)
@@ -92,6 +92,9 @@ def init_process(self):
# Prevent fd inherientence
util.close_on_exec(self.socket)
util.close_on_exec(self.tmp.fileno())
+
+ self.log.close_on_exec()
+
self.init_signals()
self.wsgi = self.app.wsgi()
@@ -106,6 +109,10 @@ def init_signals(self):
signal.signal(signal.SIGTERM, self.handle_exit)
signal.signal(signal.SIGINT, self.handle_exit)
signal.signal(signal.SIGWINCH, self.handle_winch)
+ signal.signal(signal.SIGUSR1, self.handle_usr1)
+
+ def handle_usr1(self, sig, frame):
+ self.log.reopen_files()
def handle_quit(self, sig, frame):
self.alive = False
View
1 gunicorn/workers/sync.py
@@ -101,6 +101,7 @@ def handle_request(self, req, client, addr):
else:
for item in respiter:
resp.write(item)
+ self.log.access(resp, environ)
resp.close()
finally:
if hasattr(respiter, "close"):

0 comments on commit 66f7271

Please sign in to comment.
Something went wrong with that request. Please try again.