From 838e314a21fef9ca6cf99140b2400e0d3b68b109 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 8 Dec 2015 13:30:33 -0800 Subject: [PATCH] Don't trace DB errors when we're retrying If we know we're retrying on DB failures, let's be smarter and not trace an exception at ERROR level while looping, since this clutters up the logs and causes confusion when trying to track down failures. DBDeadlock errors show up quite a bit in q-svc and n-api logs in gate runs but when you check logstash for those, they are primarily in jobs that are successful, so the trace is on the first try in the loop and then we pass on the second try - but the trace itself is confusing while debugging gate failures. So let's just be smarter and log at debug level between retries, and if we hit an unexpected exception, log that error (as before), and raise it up to the caller. Closes-Bug: #1523990 Change-Id: I15b4a9b5c7ec9bfede9ec9989de02c1da46eac81 --- oslo_db/api.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/oslo_db/api.py b/oslo_db/api.py index 9e2c45df..dcba2a33 100644 --- a/oslo_db/api.py +++ b/oslo_db/api.py @@ -139,6 +139,11 @@ def wrapper(*args, **kwargs): with excutils.save_and_reraise_exception() as ectxt: if remaining > 0: ectxt.reraise = not self._is_exception_expected(e) + if ectxt.reraise: + # We got an unexpected exception so stop + # retrying, log it and raise it up to the + # caller. + LOG.exception(_LE('DB error.')) else: LOG.exception(_LE('DB exceeded retry limit.')) # if it's a RetryRequest, we need to unpack it @@ -166,7 +171,7 @@ def _is_exception_expected(self, exc): # and not an error condition in case retries are # not exceeded if not isinstance(exc, exception.RetryRequest): - LOG.exception(_LE('DB error.')) + LOG.debug('DB error: %s', exc) return True return self.exception_checker(exc)