Skip to content
Browse files

clean up logging lines, removing or switching to debug level

asyncmongo throws exceptions and passes errors to callbacks, so the error
information is not lost, but the client application should be able to
control what is logged and how.

Some logging lines were already commented out, most of those were removed.
  • Loading branch information...
1 parent 03c4b9a commit dc85ee689dd856ab535dec157c6b2520a9a173f7 @ploxiln ploxiln committed Jan 11, 2013
Showing with 9 additions and 16 deletions.
  1. +4 −11 asyncmongo/connection.py
  2. +3 −3 asyncmongo/cursor.py
  3. +2 −2 asyncmongo/pool.py
View
15 asyncmongo/connection.py
@@ -130,7 +130,6 @@ def _send_message(self, message):
self.usage_count +=1
# __request_id used by get_more()
(self.__request_id, data) = message
- # logging.info('request id %d writing %r' % (self.__request_id, data))
try:
self.__stream.write(data)
if self.__callback:
@@ -146,24 +145,20 @@ def _send_message(self, message):
def _parse_header(self, header):
# return self.__receive_data_on_socket(length - 16, sock)
- # logging.info('got data %r' % header)
length = int(struct.unpack("<i", header[:4])[0])
request_id = struct.unpack("<i", header[8:12])[0]
assert request_id == self.__request_id, \
"ids don't match %r %r" % (self.__request_id,
request_id)
operation = 1 # who knows why
assert operation == struct.unpack("<i", header[12:])[0]
- # logging.info('%s' % length)
- # logging.info('waiting for another %d bytes' % length - 16)
try:
self.__stream.read(length - 16, callback=self._parse_response)
except IOError, e:
self.__alive = False
raise
def _parse_response(self, response):
- # logging.info('got data %r' % response)
callback = self.__callback
request_id = self.__request_id
self.__request_id = None
@@ -177,22 +172,20 @@ def _parse_response(self, response):
try:
response = helpers._unpack_response(response, request_id) # TODO: pass tz_awar
except Exception, e:
- logging.error('error %s' % e)
+ logging.debug('error %s' % e)
callback(None, e)
return
if response and response['data'] and response['data'][0].get('err') and response['data'][0].get('code'):
- # logging.error(response['data'][0]['err'])
callback(response, IntegrityError(response['data'][0]['err'], code=response['data'][0]['code']))
return
- # logging.info('response: %s' % response)
callback(response)
def _start_authentication(self, response, error=None):
# this is the nonce response
if error:
- logging.error(error)
- logging.error(response)
+ logging.debug(error)
+ logging.debug(response)
raise AuthenticationError(error)
nonce = response['data'][0]['nonce']
key = helpers._auth_key(nonce, self.__dbuser, self.__dbpass)
@@ -214,7 +207,7 @@ def _finish_authentication(self, response, error=None):
assert response['number_returned'] == 1
response = response['data'][0]
if response['ok'] != 1:
- logging.error('Failed authentication %s' % response['errmsg'])
+ logging.debug('Failed authentication %s' % response['errmsg'])
self.__deferred_message = None
self.__deferred_callback = None
raise AuthenticationError(response['errmsg'])
View
6 asyncmongo/cursor.py
@@ -390,7 +390,7 @@ def find(self, spec=None, fields=None, skip=0, limit=0,
self.__fields),
callback=functools.partial(self._handle_response, orig_callback=callback))
except Exception, e:
- logging.error('Error sending query %s' % e)
+ logging.debug('Error sending query %s' % e)
connection.close()
raise
@@ -402,12 +402,12 @@ def _handle_response(self, result, error=None, orig_callback=None):
message.kill_cursors([result['cursor_id']]),
callback=None)
except Exception, e:
- logging.error('Error killing cursor %s: %s' % (result['cursor_id'], e))
+ logging.debug('Error killing cursor %s: %s' % (result['cursor_id'], e))
connection.close()
raise
if error:
- logging.error('%s %s' % (self.full_collection_name , error))
+ logging.debug('%s %s' % (self.full_collection_name , error))
orig_callback(None, error=error)
else:
if self.__limit == -1 and len(result['data']) == 1:
View
4 asyncmongo/pool.py
@@ -127,7 +127,7 @@ def cache(self, con):
"""Put a dedicated connection back into the idle cache."""
if self._maxusage and con.usage_count > self._maxusage:
self._connections -=1
- # logging.info('dropping connection %s uses past max usage %s' % (con.usage_count, self._maxusage))
+ logging.debug('dropping connection %s uses past max usage %s' % (con.usage_count, self._maxusage))
con._close()
return
self._condition.acquire()
@@ -140,7 +140,7 @@ def cache(self, con):
# the idle cache is not full, so put it there
self._idle_cache.append(con)
else: # if the idle cache is already full,
- # logging.info('dropping connection. connection pool (%s) is full. maxcached %s' % (len(self._idle_cache), self._maxcached))
+ logging.debug('dropping connection. connection pool (%s) is full. maxcached %s' % (len(self._idle_cache), self._maxcached))
con._close() # then close the connection
self._condition.notify()
finally:

0 comments on commit dc85ee6

Please sign in to comment.
Something went wrong with that request. Please try again.