Permalink
Browse files

shouldn't clear out the crawler queue after restart...

  • Loading branch information...
1 parent 0cbc9e4 commit ee52dd8e948c4383786bf5e75814e97c3629d482 @bianjiang committed Nov 10, 2013
Showing with 2 additions and 2 deletions.
  1. +1 −1 tweetf0rm/bootstrap.py
  2. +1 −1 tweetf0rm/scheduler.py
View
@@ -139,7 +139,7 @@ def start_server(config, proxies):
# block, the main process...for a command
if(not scheduler.is_alive()):
- logger.info("no crawler is alive... i'm done too...")
+ logger.info("no crawler is alive... waiting to recreate all crawlers...")
time.sleep(120) # sleep for a minute and retry
continue
View
@@ -81,7 +81,7 @@ def new_crawler(self, node_id, apikeys, config, crawler_proxies = None):
crawler = UserRelationshipCrawler(node_id, crawler_id, copy.copy(apikeys), handlers=[create_handler(file_handler_config)], redis_config=copy.copy(config['redis_config']), proxies=crawler_proxies)
if (crawler_id in self.crawlers):
- self.crawlers[crawler_id].clear()
+ #self.crawlers[crawler_id].clear()
del self.crawlers[crawler_id]
self.crawlers[crawler_id] = {

0 comments on commit ee52dd8

Please sign in to comment.