Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

removed scrapy.log.started attribute, and avoid checking if log has a…

…lready been started (since it should be called once anyway)
  • Loading branch information...
commit 1a905d62f599c6270fcd994604c8eb37af421d22 1 parent 1f89eb5
@pablohoffman pablohoffman authored
View
1  docs/news.rst
@@ -43,6 +43,7 @@ Scrapy changes:
- DBM is now the default storage backend for HTTP cache middleware
- number of log messages (per level) are now tracked through Scrapy stats (stat name: ``log_count/LEVEL``)
- number received responses are now tracked through Scrapy stats (stat name: ``response_received_count``)
+- removed ``scrapy.log.started`` attribute
Scrapyd changes:
View
4 docs/topics/logging.rst
@@ -53,10 +53,6 @@ scrapy.log module
.. module:: scrapy.log
:synopsis: Logging facility
-.. attribute:: started
-
- A boolean which is ``True`` if logging has been started or ``False`` otherwise.
-
.. function:: start(logfile=None, loglevel=None, logstdout=None)
Start the logging facility. This must be called before actually logging any
View
6 scrapy/command.py
@@ -20,6 +20,7 @@ class ScrapyCommand(object):
def __init__(self):
self.settings = None # set in scrapy.cmdline
+ self.configured = False
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "crawler already set"
@@ -27,9 +28,10 @@ def set_crawler(self, crawler):
@property
def crawler(self):
- if not log.started:
+ if not self.configured:
log.start_from_crawler(self._crawler)
- self._crawler.configure()
+ self._crawler.configure()
+ self.configured = True
return self._crawler
def syntax(self):
View
23 scrapy/log.py
@@ -29,8 +29,6 @@
SILENT: "SILENT",
}
-started = False
-
class ScrapyFileLogObserver(log.FileLogObserver):
def __init__(self, f, level=INFO, encoding='utf-8', crawler=None):
@@ -113,15 +111,14 @@ def _get_log_level(level_name_or_id):
raise ValueError("Unknown log level: %r" % level_name_or_id)
def start(logfile=None, loglevel='INFO', logstdout=True, logencoding='utf-8', crawler=None):
- if log.defaultObserver: # check twisted log not already started
- loglevel = _get_log_level(loglevel)
- file = open(logfile, 'a') if logfile else sys.stderr
- sflo = ScrapyFileLogObserver(file, loglevel, logencoding, crawler)
- _oldshowwarning = warnings.showwarning
- log.startLoggingWithObserver(sflo.emit, setStdout=logstdout)
- # restore warnings, wrongly silenced by Twisted
- warnings.showwarning = _oldshowwarning
- return sflo
+ loglevel = _get_log_level(loglevel)
+ file = open(logfile, 'a') if logfile else sys.stderr
+ sflo = ScrapyFileLogObserver(file, loglevel, logencoding, crawler)
+ _oldshowwarning = warnings.showwarning
+ log.startLoggingWithObserver(sflo.emit, setStdout=logstdout)
+ # restore warnings, wrongly silenced by Twisted
+ warnings.showwarning = _oldshowwarning
+ return sflo
def msg(message=None, _level=INFO, **kw):
kw['logLevel'] = kw.pop('level', _level)
@@ -137,11 +134,9 @@ def err(_stuff=None, _why=None, **kw):
log.err(_stuff, _why, **kw)
def start_from_crawler(crawler):
- global started
settings = crawler.settings
- if started or not settings.getbool('LOG_ENABLED'):
+ if not settings.getbool('LOG_ENABLED'):
return
- started = True
start(settings['LOG_FILE'], settings['LOG_LEVEL'], settings['LOG_STDOUT'],
settings['LOG_ENCODING'], crawler)
Please sign in to comment.
Something went wrong with that request. Please try again.