From cce935bccc05783c544c338eb302539ed9284d58 Mon Sep 17 00:00:00 2001 From: Umesh Prasad Pathak Date: Fri, 28 Sep 2018 13:39:36 +0545 Subject: [PATCH] Update handlers.py For RotatingFileHandler, For gunicorn running with multiple workers, each worker will have separate RotatingFileHandler instances. Due to this, when rollover occurs, the effect is reflected only for the current worker. Due to this, while rotation, multiple new .log file is being created and the new logs keep emitting in these new files (Unexpected!). The situation is illustrated here too. `https://bugs.python.org/issue29001` Hence, as a solution, to propagate the rollover for all workers, both stream_size and file_size has to be checked and take action accordingly. --- Lib/logging/handlers.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py index e213e438c31aa1..6bcae6d656de41 100644 --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -183,8 +183,22 @@ def shouldRollover(self, record): if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) #due to non-posix-compliant Windows feature - if self.stream.tell() + len(msg) >= self.maxBytes: + """ + For gunicorn running with multiple workers, each worker will have + separate RotatingFileHandler instances. Due to this, when rollover occurs, + the effect is reflected only for the current worker. To propagate the rollover + for all workers, both stream_size and file_size has to be checked and take action + accordingly. + """ + file_size = os.stat(self.baseFilename).st_size + len(msg) + stream_size = self.stream.tell() + len(msg) + if file_size >= self.maxBytes: + #Rollover Yes! return 1 + if stream_size >= self.maxBytes: + #Rollover already done by previous worker; just update the stream. + self.stream.close() + self.stream = open(self.baseFilename, 'a') return 0 class TimedRotatingFileHandler(BaseRotatingHandler):