Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

MB-100: use standard logging in stats module

Change-Id: Ib8aecad67d18a79e46acbf696b4c64b2d6d6e8ac
Reviewed-on: http://review.couchbase.org/22548
Reviewed-by: Ronnie Sun <ronnie@couchbase.com>
Tested-by: Pavel Paulau <pavel.paulau@gmail.com>
  • Loading branch information...
commit 7d4a8f10cbccda6b6b80cd8f151d2a27c46d5373 1 parent 0e87c8d
Pavel Paulau pavel-paulau authored

Showing 1 changed file with 27 additions and 25 deletions. Show diff stats Hide diff stats

  1. +27 25 lib/membase/performance/stats.py
52 lib/membase/performance/stats.py
@@ -4,6 +4,8 @@
4 4 import time
5 5 import gzip
6 6 from collections import defaultdict
  7 +import logging
  8 +import logging.config
7 9
8 10 from lib.membase.api.rest_client import RestConnection
9 11 from lib.memcached.helper.data_helper import MemcachedClientHelper
@@ -13,6 +15,9 @@
13 15
14 16 RETRIES = 10
15 17
  18 +logging.config.fileConfig('mcsoda.logging.conf')
  19 +log = logging.getLogger()
  20 +
16 21
17 22 def histo_percentile(histo, percentiles):
18 23 """The histo dict is returned by add_timing_sample(). The percentiles must
@@ -209,16 +214,16 @@ def get_bucket_size(self, bucket, rest, frequency):
209 214 self._task["bucket_size"] = []
210 215 d = []
211 216 while not self._aborted():
212   - print "Collecting bucket size stats"
  217 + log.info("Collecting bucket size stats")
213 218 status, db_size = rest.get_database_disk_size(bucket)
214 219 if status:
215 220 d.append(db_size)
216 221 else:
217   - print "Enable to read bucket stats"
  222 + log.warn("Enable to read bucket stats")
218 223 time.sleep(frequency)
219 224
220 225 self._task["bucket_size"] = d
221   - print "finished bucket size stats"
  226 + log.info("Finished bucket size stats")
222 227
223 228 def get_data_file_size(self, nodes, frequency, bucket):
224 229 shells = []
@@ -257,7 +262,7 @@ def get_data_file_size(self, nodes, frequency, bucket):
257 262 d["snapshots"].append(value.copy())
258 263 i += 1
259 264 self._task["data_size_stats"] = d["snapshots"]
260   - print " finished data_size_stats"
  265 + log.info("Finished data_size_stats")
261 266
262 267 #ops stats
263 268 #{'tot-sets': 899999, 'tot-gets': 1, 'tot-items': 899999, 'tot-creates': 899999}
@@ -327,8 +332,7 @@ def machine_stats(self, nodes):
327 332 self._task["machinestats"] = machine_stats
328 333
329 334 def reb_stats(self, start, dur):
330   - print "[reb_stats] recording reb start = {0}, reb duration = {1}"\
331   - .format(start, dur)
  335 + log.info("Recording reb start = {0}, reb duration = {1}".format(start, dur))
332 336 self._reb_stats["reb_start"] = start
333 337 self._reb_stats["reb_dur"] = dur
334 338
@@ -404,7 +408,7 @@ def system_stats(self, nodes, pnames, frequency, verbosity=False):
404 408 d["snapshots"].append(value)
405 409 i += 1
406 410 self._task["systemstats"] = d["snapshots"]
407   - print " finished system_stats"
  411 + log.info("Finished system_stats")
408 412
409 413 def iostats(self, nodes, frequency, verbosity=False):
410 414
@@ -419,11 +423,11 @@ def iostats(self, nodes, frequency, verbosity=False):
419 423
420 424 self._task["iostats"] = []
421 425
422   - print "started capturing io stats"
  426 + log.info("Started capturing io stats")
423 427
424 428 while not self._aborted():
425 429 time.sleep(frequency)
426   - print "collecting io_stats"
  430 + log.info("Collecting io_stats")
427 431 for shell in shells:
428 432 try:
429 433 kB_read, kB_wrtn, util, iowait, idle = \
@@ -438,7 +442,7 @@ def iostats(self, nodes, frequency, verbosity=False):
438 442 "util": util,
439 443 "iowait": iowait,
440 444 "idle": idle})
441   - print "finished capturing io stats"
  445 + log.info("Finished capturing io stats")
442 446
443 447 def couchdb_stats(nodes):
444 448 pass
@@ -447,8 +451,7 @@ def capture_mb_snapshot(self, node):
447 451 """
448 452 Capture membase stats snapshot manually
449 453 """
450   - print "[capture_mb_snapshot] capturing memcache stats snapshot for {0}"\
451   - .format(node.ip)
  454 + log.info("Capturing memcache stats snapshot for {0}".format(node.ip))
452 455 stats = {}
453 456
454 457 try:
@@ -457,7 +460,7 @@ def capture_mb_snapshot(self, node):
457 460 stats = mc.stats()
458 461 stats.update(mc.stats("warmup"))
459 462 except Exception as e:
460   - print "[capture_mb_snapshot] Exception: {0}".format(str(e))
  463 + log.error("Exception: {0}".format(str(e)))
461 464 return False
462 465 finally:
463 466 stats["time"] = time.time()
@@ -465,7 +468,7 @@ def capture_mb_snapshot(self, node):
465 468 self._mb_stats["snapshots"].append(stats)
466 469 print stats
467 470
468   - print "[capture_mb_snapshot] memcache stats snapshot captured"
  471 + log.info("Memcache stats snapshot captured")
469 472 return True
470 473
471 474 def membase_stats(self, nodes, bucket, frequency, verbose=False):
@@ -486,7 +489,7 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
486 489
487 490 while not self._aborted():
488 491 time_left = frequency
489   - print "Collecting membase stats"
  492 + log.info("Collecting membase stats")
490 493 timings = None
491 494 # at minimum we want to check for aborted every minute
492 495 while not self._aborted() and time_left > 0:
@@ -504,8 +507,7 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
504 507 mem_stats = mc.stats('memory')
505 508 stats.update(mem_stats)
506 509 except Exception as e:
507   - print "[memebase_stats] Exception: {0}, retries = {1}"\
508   - .format(str(e), retries)
  510 + log.error("{0}, retries = {1}".format(str(e), retries))
509 511 time.sleep(2)
510 512 mc.reconnect()
511 513 retries += 1
@@ -544,14 +546,14 @@ def membase_stats(self, nodes, bucket, frequency, verbose=False):
544 546 dispatcher['ip'] = ip
545 547 self._task["dispatcher"].append(dispatcher)
546 548
547   - if timings:
548   - # TODO dump timings for all servers
549   - print "Dumping disk timing stats: {0}".format(time.strftime('%X %x %Z'))
  549 + if timings: # TODO: dump timings for all servers
  550 + timestamp = time.strftime('%X %x %Z')
  551 + log.info("Dumping disk timing stats: {0}".format(timestamp))
550 552 for key, value in sorted(timings.iteritems()):
551 553 if key.startswith("disk"):
552 554 print "{0:50s}: {1}".format(key, value)
553 555
554   - print " finished membase_stats"
  556 + log.info("Finished membase_stats")
555 557
556 558 def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
557 559
@@ -564,7 +566,7 @@ def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
564 566
565 567 while not self._aborted():
566 568 time.sleep(frequency)
567   - print "Collecting ns_server_stats"
  569 + log.info("Collecting ns_server_stats")
568 570 for node in nodes:
569 571 rest = RestConnection(node)
570 572 data_json = rest.fetch_bucket_stats(bucket=bucket, zoom='minute')
@@ -584,7 +586,7 @@ def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
584 586 for snapshot in d[node]["system_snapshots"]:
585 587 self._task["ns_server_stats_system"].append(snapshot)
586 588
587   - print " finished ns_server_stats"
  589 + log.info("Finished ns_server_stats")
588 590
589 591 def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
590 592 """Collect view indexing stats"""
@@ -592,7 +594,7 @@ def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
592 594
593 595 while not self._aborted():
594 596 time.sleep(frequency)
595   - print "Collecting view indexing stats"
  597 + log.info("Collecting view indexing stats")
596 598 for node in nodes:
597 599 rest = RestConnection(node)
598 600 data = rest.set_view_info(bucket, ddoc)
@@ -608,7 +610,7 @@ def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
608 610 'indexing_time': avg_time,
609 611 'timestamp': time.time()})
610 612
611   - print "Finished collecting view indexing stats"
  613 + log.info("Finished collecting view indexing stats")
612 614
613 615 def measure_indexing_throughput(self, nodes):
614 616 self._task['indexer_info'] = list()

0 comments on commit 7d4a8f1

Please sign in to comment.
Something went wrong with that request. Please try again.