Permalink
Browse files

MB-100: use standard logging in stats module

Change-Id: Ib8aecad67d18a79e46acbf696b4c64b2d6d6e8ac
Reviewed-on: http://review.couchbase.org/22548
Reviewed-by: Ronnie Sun <ronnie@couchbase.com>
Tested-by: Pavel Paulau <pavel.paulau@gmail.com>
  • Loading branch information...
1 parent 0e87c8d commit 7d4a8f10cbccda6b6b80cd8f151d2a27c46d5373 @pavel-paulau pavel-paulau committed with pavel-paulau Nov 14, 2012
Showing with 27 additions and 25 deletions.
  1. +27 −25 lib/membase/performance/stats.py
View
52 lib/membase/performance/stats.py
@@ -4,6 +4,8 @@
import time
import gzip
from collections import defaultdict
+import logging
+import logging.config
from lib.membase.api.rest_client import RestConnection
from lib.memcached.helper.data_helper import MemcachedClientHelper
@@ -13,6 +15,9 @@
RETRIES = 10
+logging.config.fileConfig('mcsoda.logging.conf')
+log = logging.getLogger()
+
def histo_percentile(histo, percentiles):
"""The histo dict is returned by add_timing_sample(). The percentiles must
@@ -209,16 +214,16 @@ def get_bucket_size(self, bucket, rest, frequency):
self._task["bucket_size"] = []
d = []
while not self._aborted():
- print "Collecting bucket size stats"
+ log.info("Collecting bucket size stats")
status, db_size = rest.get_database_disk_size(bucket)
if status:
d.append(db_size)
else:
- print "Enable to read bucket stats"
+ log.warn("Enable to read bucket stats")
time.sleep(frequency)
self._task["bucket_size"] = d
- print "finished bucket size stats"
+ log.info("Finished bucket size stats")
def get_data_file_size(self, nodes, frequency, bucket):
shells = []
@@ -257,7 +262,7 @@ def get_data_file_size(self, nodes, frequency, bucket):
d["snapshots"].append(value.copy())
i += 1
self._task["data_size_stats"] = d["snapshots"]
- print " finished data_size_stats"
+ log.info("Finished data_size_stats")
#ops stats
#{'tot-sets': 899999, 'tot-gets': 1, 'tot-items': 899999, 'tot-creates': 899999}
@@ -327,8 +332,7 @@ def machine_stats(self, nodes):
self._task["machinestats"] = machine_stats
def reb_stats(self, start, dur):
- print "[reb_stats] recording reb start = {0}, reb duration = {1}"\
- .format(start, dur)
+ log.info("Recording reb start = {0}, reb duration = {1}".format(start, dur))
self._reb_stats["reb_start"] = start
self._reb_stats["reb_dur"] = dur
@@ -404,7 +408,7 @@ def system_stats(self, nodes, pnames, frequency, verbosity=False):
d["snapshots"].append(value)
i += 1
self._task["systemstats"] = d["snapshots"]
- print " finished system_stats"
+ log.info("Finished system_stats")
def iostats(self, nodes, frequency, verbosity=False):
@@ -419,11 +423,11 @@ def iostats(self, nodes, frequency, verbosity=False):
self._task["iostats"] = []
- print "started capturing io stats"
+ log.info("Started capturing io stats")
while not self._aborted():
time.sleep(frequency)
- print "collecting io_stats"
+ log.info("Collecting io_stats")
for shell in shells:
try:
kB_read, kB_wrtn, util, iowait, idle = \
@@ -438,7 +442,7 @@ def iostats(self, nodes, frequency, verbosity=False):
"util": util,
"iowait": iowait,
"idle": idle})
- print "finished capturing io stats"
+ log.info("Finished capturing io stats")
def couchdb_stats(nodes):
pass
@@ -447,8 +451,7 @@ def capture_mb_snapshot(self, node):
"""
Capture membase stats snapshot manually
"""
- print "[capture_mb_snapshot] capturing memcache stats snapshot for {0}"\
- .format(node.ip)
+ log.info("Capturing memcache stats snapshot for {0}".format(node.ip))
stats = {}
try:
@@ -457,15 +460,15 @@ def capture_mb_snapshot(self, node):
stats = mc.stats()
stats.update(mc.stats("warmup"))
except Exception as e:
- print "[capture_mb_snapshot] Exception: {0}".format(str(e))
+ log.error("Exception: {0}".format(str(e)))
return False
finally:
stats["time"] = time.time()
stats["ip"] = node.ip
self._mb_stats["snapshots"].append(stats)
print stats
- print "[capture_mb_snapshot] memcache stats snapshot captured"
+ log.info("Memcache stats snapshot captured")
return True
def membase_stats(self, nodes, bucket, frequency, verbose=False):
@@ -486,7 +489,7 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
while not self._aborted():
time_left = frequency
- print "Collecting membase stats"
+ log.info("Collecting membase stats")
timings = None
# at minimum we want to check for aborted every minute
while not self._aborted() and time_left > 0:
@@ -504,8 +507,7 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
mem_stats = mc.stats('memory')
stats.update(mem_stats)
except Exception as e:
- print "[memebase_stats] Exception: {0}, retries = {1}"\
- .format(str(e), retries)
+ log.error("{0}, retries = {1}".format(str(e), retries))
time.sleep(2)
mc.reconnect()
retries += 1
@@ -544,14 +546,14 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
dispatcher['ip'] = ip
self._task["dispatcher"].append(dispatcher)
- if timings:
- # TODO dump timings for all servers
- print "Dumping disk timing stats: {0}".format(time.strftime('%X %x %Z'))
+ if timings: # TODO: dump timings for all servers
+ timestamp = time.strftime('%X %x %Z')
+ log.info("Dumping disk timing stats: {0}".format(timestamp))
for key, value in sorted(timings.iteritems()):
if key.startswith("disk"):
print "{0:50s}: {1}".format(key, value)
- print " finished membase_stats"
+ log.info("Finished membase_stats")
def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
@@ -564,7 +566,7 @@ def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
while not self._aborted():
time.sleep(frequency)
- print "Collecting ns_server_stats"
+ log.info("Collecting ns_server_stats")
for node in nodes:
rest = RestConnection(node)
data_json = rest.fetch_bucket_stats(bucket=bucket, zoom='minute')
@@ -584,15 +586,15 @@ def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
for snapshot in d[node]["system_snapshots"]:
self._task["ns_server_stats_system"].append(snapshot)
- print " finished ns_server_stats"
+ log.info("Finished ns_server_stats")
def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
"""Collect view indexing stats"""
self._task['view_info'] = list()
while not self._aborted():
time.sleep(frequency)
- print "Collecting view indexing stats"
+ log.info("Collecting view indexing stats")
for node in nodes:
rest = RestConnection(node)
data = rest.set_view_info(bucket, ddoc)
@@ -608,7 +610,7 @@ def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
'indexing_time': avg_time,
'timestamp': time.time()})
- print "Finished collecting view indexing stats"
+ log.info("Finished collecting view indexing stats")
def measure_indexing_throughput(self, nodes):
self._task['indexer_info'] = list()

0 comments on commit 7d4a8f1

Please sign in to comment.