Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

migrate cbtop project from cbmonitor

  • Loading branch information...
commit 6662336d5e8b07e005f457f488730b4a294d7a4b 0 parents
@pavel-paulau pavel-paulau authored
Showing with 63,417 additions and 0 deletions.
  1. +7 −0 .gitignore
  2. +202 −0 LICENSE.txt
  3. +2 −0  MANIFEST.in
  4. +416 −0 TestInput.py
  5. +70 −0 cbtop.py
  6. 0  libcbtop/__init__.py
  7. +28 −0 libcbtop/collector.py
  8. +26 −0 libcbtop/data_helper.py
  9. +13 −0 libcbtop/handler.py
  10. +30 −0 libcbtop/json_handler.py
  11. +49 −0 libcbtop/main.py
  12. +51 −0 libcbtop/mc_collector.py
  13. +82 −0 libcbtop/mc_source.py
  14. +170 −0 libcbtop/paint.py
  15. +27 −0 libcbtop/seriesly_handler.py
  16. +28 −0 libcbtop/server.py
  17. +27 −0 libcbtop/source.py
  18. +106 −0 libcbtop/sys_helper.py
  19. +276 −0 libcbtop/visit_cb.py
  20. +33 −0 logging.conf
  21. +37 −0 metadata/Makefile
  22. 0  metadata/__init__.py
  23. +19 −0 metadata/ns_server-meta/2.0.0/couchBase__replicator.json
  24. +2 −0  metadata/ns_server-meta/2.0.0/couchBase__replicator__design__replicator_info__view_infos.json
  25. +2 −0  metadata/ns_server-meta/2.0.0/nodeStatuses.json
  26. +280 −0 metadata/ns_server-meta/2.0.0/pools_default.json
  27. +3,207 −0 metadata/ns_server-meta/2.0.0/pools_default_buckets.json
  28. +14 −0 metadata/ns_server-meta/2.0.0/pools_default_buckets_BUCKET_nodes.json
  29. +9,352 −0 metadata/ns_server-meta/2.0.0/pools_default_buckets_BUCKET_nodes_HOST%3APORT_stats.json
  30. +2 −0  metadata/ns_server-meta/2.0.0/pools_default_rebalanceProgress.json
  31. +8,802 −0 metadata/ns_server-meta/2.0.0/pools_default_stats.json
  32. +2 −0  metadata/ns_server-meta/2.0.0/pools_default_tasks.json
  33. +161 −0 metadata/ns_server.json
  34. BIN  metadata/ns_server.tar.gz
  35. +23 −0 metadata/ns_server/2.0.0/couchBase__replicator.json
  36. +2 −0  metadata/ns_server/2.0.0/couchBase__replicator__design__replicator_info__view_infos.json
  37. +2 −0  metadata/ns_server/2.0.0/nodeStatuses.json
  38. +302 −0 metadata/ns_server/2.0.0/pools_default.json
  39. +3,236 −0 metadata/ns_server/2.0.0/pools_default_buckets.json
  40. +3,170 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET.json
  41. +14 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET_nodes.json
  42. +9 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET_nodes_HOST%3APORT.json
  43. +9,352 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET_nodes_HOST%3APORT_stats.json
  44. +8,796 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET_stats.json
  45. +649 −0 metadata/ns_server/2.0.0/pools_default_buckets_BUCKET_statsDirectory.json
  46. +2 −0  metadata/ns_server/2.0.0/pools_default_rebalanceProgress.json
  47. +8,802 −0 metadata/ns_server/2.0.0/pools_default_stats.json
  48. +2 −0  metadata/ns_server/2.0.0/pools_default_tasks.json
  49. +94 −0 metadata/stats-conv
  50. +2,953 −0 metadata/stats.json
  51. +21 −0 metadata/stats.json.sample
  52. +795 −0 metadata/stats.org
  53. +555 −0 metadata/stats.org.gen
  54. +555 −0 metadata/stats.src
  55. +47 −0 metadata/visit-conv
  56. +127 −0 metadata/visit-zero
  57. +349 −0 metadata/visit.py
  58. +37 −0 setup.py
7 .gitignore
@@ -0,0 +1,7 @@
+*.idea
+*.log
+*.log.*
+*.pyc
+MemcachedSource-*.json
+.installed.cfg
+*.egg-info
202 LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
2  MANIFEST.in
@@ -0,0 +1,2 @@
+include metadata/stats.json
+recursive-include metadata/ns_server *
416 TestInput.py
@@ -0,0 +1,416 @@
+import getopt
+import re
+from cbtestlib.builds.build_query import BuildQuery
+import ConfigParser
+import os
+import collections
+
+#class to parse the inputs either from command line or from a ini file
+#command line supports a subset of
+# configuration
+# which tests
+# ideally should accept a regular expression
+
+
+class TestInputSingleton():
+ input = None
+
+
+class TestInput(object):
+
+ def __init__(self):
+ self.servers = []
+ self.moxis = []
+ self.clusters = {}
+ self.membase_settings = None
+ self.test_params = {}
+ #servers , each server can have u,p,port,directory
+
+ def param(self, name, default_value):
+ if name in self.test_params:
+ return TestInput._parse_param(self.test_params[name])
+ else:
+ return default_value
+
+ @staticmethod
+ def _parse_param(value):
+ try:
+ return int(value)
+ except ValueError:
+ pass
+
+ try:
+ return float(value)
+ except ValueError:
+ pass
+
+ if value.lower() == "false":
+ return False
+
+ if value.lower() == "true":
+ return True
+
+ return value
+
+
+class TestInputServer(object):
+ def __init__(self):
+ self.ip = ''
+ self.ssh_username = ''
+ self.ssh_password = ''
+ self.ssh_key = ''
+ self.rest_username = ''
+ self.rest_password = ''
+ self.port = ''
+ self.cli_path = ''
+ self.data_path = ''
+
+ def __str__(self):
+ #ip_str = "ip:{0}".format(self.ip)
+ ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
+ ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
+ return "{0} {1}".format(ip_str, ssh_username_str)
+
+ def __repr__(self):
+ #ip_str = "ip:{0}".format(self.ip)
+ ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
+ ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
+ return "{0} {1}".format(ip_str, ssh_username_str)
+
+
+class TestInputMembaseSetting(object):
+
+ def __init__(self):
+ self.rest_username = ''
+ self.rest_password = ''
+
+
+class TestInputBuild(object):
+ def __init__(self):
+ self.version = ''
+ self.url = ''
+
+
+# we parse this and then pass it on to all the test case
+class TestInputParser():
+
+ @staticmethod
+ def get_test_input(argv):
+ #if file is given use parse_from_file
+ #if its from command line
+ (opts, args) = getopt.getopt(argv[1:], 'ht:c:v:s:i:p:l:', [])
+ #first let's loop over and find out if user has asked for help
+ #if it has i
+ params = {}
+ has_ini = False
+ ini_file = ''
+ for option, argument in opts:
+ if option == '-h':
+ print 'usage'
+ return
+ if option == '-i':
+ has_ini = True
+ ini_file = argument
+ if option == '-p':
+ # takes in a string of the form "p1=v1,v2,p2=v3,p3=v4,v5,v6"
+ # converts to a dictionary of the form {"p1":"v1,v2","p2":"v3","p3":"v4,v5,v6"}
+ argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", argument)[1:]]
+ pairs = dict(zip(argument_split[::2], argument_split[1::2]))
+ for pair in pairs.iteritems():
+ if pair[0] == "vbuckets":
+ # takes in a string of the form "1-100,140,150-160"
+ # converts to an array with all those values inclusive
+ vbuckets = set()
+ for v in pair[1].split(","):
+ r = v.split("-")
+ vbuckets.update(range(int(r[0]), int(r[-1]) + 1))
+ params[pair[0]] = sorted(vbuckets)
+ else:
+ argument_list = [a.strip() for a in pair[1].split(",")]
+ if len(argument_list) > 1:
+ # if the parameter had multiple entries seperated by comma
+ # then store as a list
+ # ex. {'vbuckets':[1,2,3,4,100]}
+ params[pair[0]] = argument_list
+ else:
+ # if parameter only had one entry then store as a string
+ # ex. {'product':'cb'}
+ params[pair[0]] = argument_list[0]
+
+ if has_ini:
+ input = TestInputParser.parse_from_file(ini_file)
+ #now let's get the test specific parameters
+ else:
+ input = TestInputParser.parse_from_command_line(argv)
+ input.test_params = params
+
+ if "num_clients" not in input.test_params.keys() and input.clients: # do not override the command line value
+ input.test_params["num_clients"] = len(input.clients)
+ if "num_nodes" not in input.test_params.keys() and input.servers:
+ input.test_params["num_nodes"] = len(input.servers)
+
+ return input
+
+ @staticmethod
+ def parse_from_file(file):
+ servers = []
+ ips = []
+ input = TestInput()
+ config = ConfigParser.ConfigParser()
+ config.read(file)
+ sections = config.sections()
+ global_properties = {}
+ count = 0
+ start = 0
+ end = 0
+ cluster_ips = []
+ clusters = {}
+ moxis = []
+ moxi_ips = []
+ client_ips = []
+ input.dashboard = []
+ for section in sections:
+ result = re.search('^cluster', section)
+ if section == 'servers':
+ ips = TestInputParser.get_server_ips(config, section)
+ elif section == 'moxis':
+ moxi_ips = TestInputParser.get_server_ips(config, section)
+ elif section == 'clients':
+ client_ips = TestInputParser.get_server_ips(config, section)
+ elif section == 'membase':
+ input.membase_settings = TestInputParser.get_membase_settings(config, section)
+ elif section == 'global':
+ #get global stuff and override for those unset
+ for option in config.options(section):
+ global_properties[option] = config.get(section, option)
+ elif section == 'dashboard':
+ input.dashboard = TestInputParser.get_server_ips(config, section)
+ elif result is not None:
+ cluster_list = TestInputParser.get_server_ips(config, section)
+ cluster_ips.extend(cluster_list)
+ clusters[count] = len(cluster_list)
+ count += 1
+
+ # Setup 'cluster#' tag as dict
+ # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
+ for cluster_ip in cluster_ips:
+ servers.append(TestInputParser.get_server(cluster_ip, config))
+ servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
+ for key, value in clusters.items():
+ end += value
+ input.clusters[key] = servers[start:end]
+ start += value
+
+ # Setting up 'servers' tag
+ servers = []
+ for ip in ips:
+ servers.append(TestInputParser.get_server(ip, config))
+ input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
+
+ # Setting up 'moxis' tag
+ moxis = []
+ for moxi_ip in moxi_ips:
+ moxis.append(TestInputParser.get_server(moxi_ip, config))
+ input.moxis = TestInputParser.get_server_options(moxis, input.membase_settings, global_properties)
+
+ # Setting up 'clients' tag
+ input.clients = client_ips
+
+ return input
+
+ @staticmethod
+ def get_server_options(servers, membase_settings, global_properties):
+ for server in servers:
+ if server.ssh_username == '' and 'username' in global_properties:
+ server.ssh_username = global_properties['username']
+ if server.ssh_password == '' and 'password' in global_properties:
+ server.ssh_password = global_properties['password']
+ if server.ssh_key == '' and 'ssh_key' in global_properties:
+ server.ssh_key = os.path.expanduser(global_properties['ssh_key'])
+ if not server.port and 'port' in global_properties:
+ server.port = global_properties['port']
+ if server.cli_path == '' and 'cli' in global_properties:
+ server.cli_path = global_properties['cli']
+ if server.rest_username == '' and membase_settings.rest_username != '':
+ server.rest_username = membase_settings.rest_username
+ if server.rest_password == '' and membase_settings.rest_password != '':
+ server.rest_password = membase_settings.rest_password
+ if server.data_path == '' and 'data_path' in global_properties:
+ server.data_path = global_properties['data_path']
+ return servers
+
+ @staticmethod
+ def get_server_ips(config, section):
+ ips = []
+ options = config.options(section)
+ for option in options:
+ ips.append(config.get(section, option))
+ return ips
+
+ @staticmethod
+ def get_server(ip, config):
+ server = TestInputServer()
+ server.ip = ip
+ for section in config.sections():
+ if section == ip:
+ options = config.options(section)
+ for option in options:
+ if option == 'username':
+ server.ssh_username = config.get(section, option)
+ if option == 'password':
+ server.ssh_password = config.get(section, option)
+ if option == 'cli':
+ server.cli_path = config.get(section, option)
+ if option == 'ssh_key':
+ server.ssh_key = config.get(section, option)
+ if option == 'port':
+ server.port = config.get(section, option)
+ if option == 'ip':
+ server.ip = config.get(section, option)
+ break
+ #get username
+ #get password
+ #get port
+ #get cli_path
+ #get key
+ return server
+
+ @staticmethod
+ def get_membase_build(config, section):
+ membase_build = TestInputBuild()
+ for option in config.options(section):
+ if option == 'version':
+ pass
+ if option == 'url':
+ pass
+ return membase_build
+
+ @staticmethod
+ def get_membase_settings(config, section):
+ membase_settings = TestInputMembaseSetting()
+ for option in config.options(section):
+ if option == 'rest_username':
+ membase_settings.rest_username = config.get(section, option)
+ if option == 'rest_password':
+ membase_settings.rest_password = config.get(section, option)
+ return membase_settings
+
+ @staticmethod
+ def parse_from_command_line(argv):
+
+ input = TestInput()
+
+ try:
+ # -f : won't be parse here anynore
+ # -s will have comma separated list of servers
+ # -t : wont be parsed here anymore
+ # -v : version
+ # -u : url
+ # -b : will have the path to cli
+ # -k : key file
+ # -p : for smtp ( taken care of by jenkins)
+ # -o : taken care of by jenkins
+ servers = []
+ membase_setting = None
+ (opts, args) = getopt.getopt(argv[1:], 'h:t:c:i:p:', [])
+ #first let's loop over and find out if user has asked for help
+ need_help = False
+ for option, argument in opts:
+ if option == "-h":
+ print 'usage...'
+ need_help = True
+ break
+ if need_help:
+ return
+ #first let's populate the server list and the version number
+ for option, argument in opts:
+ if option == "-s":
+ #handle server list
+ servers = TestInputParser.handle_command_line_s(argument)
+ elif option == "-u" or option == "-v":
+ input_build = TestInputParser.handle_command_line_u_or_v(option, argument)
+
+ #now we can override the username pass and cli_path info
+ for option, argument in opts:
+ if option == "-k":
+ #handle server list
+ for server in servers:
+ if server.ssh_key == '':
+ server.ssh_key = argument
+ elif option == "--username":
+ #handle server list
+ for server in servers:
+ if server.ssh_username == '':
+ server.ssh_username = argument
+ elif option == "--password":
+ #handle server list
+ for server in servers:
+ if server.ssh_password == '':
+ server.ssh_password = argument
+ elif option == "-b":
+ #handle server list
+ for server in servers:
+ if server.cli_path == '':
+ server.cli_path = argument
+ # loop over stuff once again and set the default
+ # value
+ for server in servers:
+ if server.ssh_username == '':
+ server.ssh_username = 'root'
+ if server.ssh_password == '':
+ server.ssh_password = 'northscale!23'
+ if server.cli_path == '':
+ server.cli_path = '/opt/membase/bin/'
+ if not server.port:
+ server.port = 8091
+ input.servers = servers
+ input.membase_settings = membase_setting
+ return input
+ except Exception:
+ log = logger.Logger.get_logger()
+ log.error("unable to parse input arguments")
+ raise
+
+ @staticmethod
+ def handle_command_line_u_or_v(option, argument):
+ input_build = TestInputBuild()
+ if option == "-u":
+ # let's check whether this url exists or not
+ # let's extract version from this url
+ pass
+ if option == "-v":
+ allbuilds = BuildQuery().get_all_builds()
+ for build in allbuilds:
+ if build.product_version == argument:
+ input_build.url = build.url
+ input_build.version = argument
+ break
+ return input_build
+
+ #returns list of server objects
+ @staticmethod
+ def handle_command_line_s(argument):
+ #ip:port:username:password:clipath
+
+ ips = argument.split(",")
+ servers = []
+
+ for ip in ips:
+ server = TestInputServer()
+ if ip.find(":") == -1:
+ pass
+ else:
+ info = ip.split(":")
+ #info[0] : ip
+ #info[1] : port
+ #info[2] :username
+ #info[3] : password
+ #info[4] : cli path
+ server.ip = info[0]
+ server.port = info[1]
+ server.ssh_username = info[2]
+ server.ssh_password = info[3]
+ server.cli_path = info[4]
+ servers.append(server)
+
+ return servers
70 cbtop.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+import logging
+import logging.config
+import ConfigParser
+import signal
+from optparse import OptionParser
+
+from libcbtop.main import main
+import libcbtop.paint as pt
+
+
+DEFAULT_HOST = "127.0.0.1"
+ctl = {}
+
+try:
+ logging.config.fileConfig("logging.conf")
+except ConfigParser.NoSectionError:
+ logging.config.fileConfig("cbtop/logging.conf")
+
+
+def handle_signal(signum, frame):
+ """Handles registered signals and exit."""
+ global ctl
+
+ logging.info("received signal %s, aborting" % signum)
+
+ if not ctl["bg"]:
+ pt.exit_fullscreen()
+
+ ctl["run_ok"] = False
+
+
+def parse_args():
+ usage = \
+ "./%prog HOST [options]\n\n" + \
+ "Monitor a couchbase cluster.\n\n" + \
+ "Examples:\n" + \
+ " ./%prog -- defaults to 127.0.0.1\n" + \
+ " ./%prog 10.2.1.65\n" + \
+ " ./%prog 10.2.1.65 -i 4\n"
+
+ parser = OptionParser(usage=usage)
+ parser.add_option("-i", "--itv", dest="itv", default="1", type="int",
+ help="stats polling interval (seconds)")
+ parser.add_option("-d", "--dbhost", dest="dbhost", default=DEFAULT_HOST,
+ help="host where seriesly database is located")
+ parser.add_option("-s", "--dbslow", dest="dbslow", default="slow",
+ help="seriesly database to store slow changing data")
+ parser.add_option("-f", "--dbfast", dest="dbfast", default="fast",
+ help="seriesly database to store fast changing data")
+ parser.add_option("-b", "--background", dest="bg", default=False,
+ action="store_true", help="run cbtop in background")
+ return parser.parse_args()
+
+
+def cbtop_main():
+ global ctl
+
+ signal.signal(signal.SIGINT, handle_signal)
+
+ options, args = parse_args()
+ server = args and args[0] or DEFAULT_HOST
+ ctl = {"run_ok": True, "bg": options.bg}
+
+ main(server, itv=options.itv, ctl=ctl, dbhost=options.dbhost,
+ dbslow=options.dbslow, dbfast=options.dbfast)
+
+if __name__ == "__main__":
+ cbtop_main()
0  libcbtop/__init__.py
No changes.
28 libcbtop/collector.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+
+class Collector(object):
+ """
+ Collector to collect data from data @class Source
+ Emit data to different types of data handlers
+ """
+ def collect(self):
+ """
+ Collect data from data sources
+ """
+ raise NotImplementedError(
+ "collect() has not been implemented")
+
+ def process(self):
+ """
+ Optionally process the data
+ """
+ raise NotImplementedError(
+ "process() has not been implemented")
+
+ def emit(self):
+ """
+ Emit data to data handlers
+ """
+ raise NotImplementedError(
+ "emit() has not been implemented")
26 libcbtop/data_helper.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+
+class DataHelper:
+ """
+ Helper to retrieve data from particular nodes
+ """
+ @staticmethod
+ def get_bucket(root, parents):
+ """Get bucket name from root and parent nodes"""
+ path = root.get("path", "")
+ if path:
+ # assume path like /pools/default
+ return path.split("/")[-1]
+ else:
+ return "default"
+
+ @staticmethod
+ def get_ip(root, parent):
+ """
+ Get hostname or ip address from root and parent nodes
+
+ Carbon needs to know the originator of the fast changing data, \
+ for the purpose of contruct the metric info.
+ """
+ return root.get("host", "127.0.0.1")
13 libcbtop/handler.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+
+class Handler(object):
+ """
+ Base class for data handlers
+ """
+ def handle(self, source):
+ """
+ Consume and handle a data source
+ """
+ raise NotImplementedError(
+ "handle() has not been implemented")
30 libcbtop/json_handler.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import logging
+import json
+from handler import Handler
+
+
+class JsonHandler(Handler):
+ """
+ Handler to dump data to a json file
+ """
+ def __init__(self, filename=""):
+ self.filename = filename
+
+ def handle(self, source):
+ """Wrap data into json doc and dump to a file"""
+ if not source or not source.data:
+ logging.error("unable to handle : invalid data")
+ return False
+
+ if not self.filename:
+ self.filename = \
+ source.__class__.__name__ + "-" \
+ + source.server.ip.replace(".", "-") \
+ + ".json"
+
+ with open(self.filename, "w") as f:
+ f.write(json.dumps(source.data))
+
+ return True
49 libcbtop/main.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+import logging
+import time
+import multiprocessing
+
+from metadata.visit import main as visit, VISIT_ENTRY_FUNCS, retrieve_meta
+
+import visit_cb as vc
+import paint as pt
+
+CTL = {"run_ok": True, "bg": True}
+
+
+def main(server, itv=5, ctl=CTL, port=8091, path="/pools/default",
+ dbhost="127.0.0.1", dbslow="slow", dbfast="fast"):
+
+ vc.store = vc.SerieslyStore(dbhost, dbslow, dbfast)
+ vc.tbl.set_ftr("Last Update: %time")
+ if not ctl["bg"]:
+ pt.enter_fullscreen()
+
+ mc_store = vc.SerieslyStore(dbhost, dbslow, dbfast)
+
+ mc_proc = multiprocessing.Process(target=vc.mc_worker,
+ args=(vc.mc_jobs, vc.mc_stats,
+ ctl, mc_store, itv))
+
+ mc_proc.daemon = True
+ mc_proc.start()
+
+ visit_entry_func = VISIT_ENTRY_FUNCS.copy()
+ visit_entry_func["collect_mc_stats"] = vc.collect_mc_stats
+
+ while ctl["run_ok"]:
+
+ vc.store.clear()
+ visit(server, port, path,
+ {"fast": vc.store_fast, "slow": vc.store_slow},
+ {"url_before": vc.url_before, "url_after": vc.url_after},
+ retrieve_funcs={"retrieve_data": vc.retrieve_data,
+ "retrieve_meta": retrieve_meta},
+ entry_funcs=visit_entry_func, ctl=ctl)
+ vc.store.persist()
+
+ if not ctl["bg"]:
+ pt.paint(vc.tbl)
+ logging.info("sleep for %s seconds" % itv)
+ time.sleep(itv)
51 libcbtop/mc_collector.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+import logging
+from collector import Collector
+from mc_source import MemcachedSource
+
+
+class MemcachedCollector(Collector):
+ """
+ Collect memcached stats across the cluster, \
+ optionally process results and emit to the data handlers.
+ """
+
+ def __init__(self, sources=None, handlers=None):
+ self.sources = sources # [@class MemcachedSource]
+ self.handlers = handlers # [@class Handler]
+
+ def collect(self):
+ """
+ Collect stats from data sources
+ """
+ if not self.sources or not isinstance(self.sources, list):
+ logging.error("invalid sources: must be a list")
+ return False
+
+ for source in self.sources:
+ if not isinstance(source, MemcachedSource):
+ logging.error("not a MemcachedSource, skipped")
+ continue
+
+ logging.info("collecting mc stats from server %s"
+ % source.server.ip)
+ source.connect()
+ source.feed()
+ source.disconnect()
+
+ def process(self):
+ raise NotImplementedError(
+ "Do not alter memcached stats")
+
+ def emit(self):
+ """
+ Emit stats to data handlers
+ """
+ if not self.handlers or not isinstance(self.handlers, list):
+ logging.error("invalid handlers: must be a list")
+ return False
+
+ for handler in self.handlers:
+ for source in self.sources:
+ handler.handle(source)
82 libcbtop/mc_source.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+import logging
+import os
+import json
+from source import Source
+from server import Server
+
+from cbtestlib.memcached.helper.data_helper import MemcachedClientHelper
+
+
+class MemcachedSource(Source):
+
+ META_FILE = "%s/../metadata/stats.json" % os.path.dirname(__file__)
+ MC_STATS = ["", "allocator", "checkpoint", "config", "dispatcher",
+ "hash", "kvstore", "kvtimings", "memory", "prev-vbucket",
+ "tap", "tapagg", "timings", "vbucket", "vbucket-details"]
+
+ def __init__(self, server, bucket, meta_file=META_FILE, mc=None):
+ self.server = server # @class: Server
+ self.bucket = bucket
+ self.mc = mc
+ self.data = {}
+ self.meta = self.get_meta(meta_file)
+ self.slow = {"mc-host": server.ip, "mc-bucket": bucket}
+ self.fast = {"mc-host": server.ip, "mc-bucket": bucket}
+
+ def get_meta(self, meta_file):
+ with open(meta_file) as f:
+ return json.loads(f.read())
+
+ def filter(self, stats):
+ if not stats or not self.meta:
+ logging.error("unable to apply filter: invalid data or meta")
+ return False
+
+ for key in stats:
+ mkey = "mc-%s" % key
+ if mkey in self.meta and "visit" in self.meta[mkey]:
+ if self.meta[mkey]["visit"] == "slow":
+ self.slow[mkey] = stats[key]
+ elif self.meta[mkey]["visit"] == "fast":
+ self.fast[mkey] = stats[key]
+
+ def connect(self):
+ if not self.server or not isinstance(self.server, Server):
+ return False
+
+ self.mc = MemcachedClientHelper.direct_client(
+ self.server, self.bucket)
+
+ return True
+
+ def feed(self):
+ if not self.mc and not self.connect():
+ logging.error("unable to connect to server : %s" % self.server)
+ return None
+
+ self.data = {}
+ for key in MemcachedSource.MC_STATS:
+ try:
+ stats = self.mc.stats(key)
+ if key != "":
+ self.data[key] = stats
+ else:
+ self.data["all"] = stats
+ self.filter(stats)
+ except Exception, e:
+ logging.error("exception for key %s : %s" % (key, e))
+
+ return self.data
+
+ def disconnect(self):
+ self.mc.close()
+
+ def gen_stats(self):
+ """
+ Generate individual stats
+ """
+ for data in self.data.itervalues():
+ for key_val in data.iteritems():
+ yield key_val
170 libcbtop/paint.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+"""
+- Paint the table on:
+
+ - blessings terminal (http://pypi.python.org/pypi/blessings)
+
+- Callbacks for tabula package to format a value based on its meta.
+"""
+
+import logging
+import ast
+
+from blessings import Terminal
+from tabula.table import Table
+
+UNITS_SUFFIX = ["", "K", "M", "G", "T"]
+
+term = Terminal()
+
+STYLES = {"bold": term.bold,
+ "reverse": term.reverse,
+ "underline": term.underline,
+ "blink": term.blink,
+ "italic": term.italic,
+ "shadow": term.shadow,
+ "standout": term.standout,
+ "subscript": term.subscript,
+ "superscript": term.superscript,
+ "flash": term.flash}
+
+COLORS = {"red": term.red,
+ "black": term.black,
+ "green": term.green,
+ "yellow": term.yellow,
+ "blue": term.blue,
+ "magenta": term.magenta,
+ "white": term.white}
+
+
+def conv_units(val, meta):
+ """
+ Callback function for tabula
+
+ Format and convert units to be more human readable
+
+ @return new val with converted units
+ """
+ if not val or not meta:
+ return val
+
+ try:
+ val = float(val)
+ except ValueError:
+ logging.error("unable to apply convert units for %s" % val)
+ return val
+
+ suf = 0
+ if meta in ["bytes", "items"]:
+
+ while val > 1024 and suf < 4:
+ val /= 1024
+ suf += 1
+ return "%.2f%s" % (val, UNITS_SUFFIX[suf])
+
+ return "%.2f" % val
+
+
+def change_style(val, meta):
+ """
+ Callback function for tabula
+
+ Change style to display the value, for blessings terminal only
+
+ Supported style: bold, reverse, underline, blink, normal, etc
+ """
+ if not val or not meta:
+ return val
+
+ if meta in STYLES:
+ return STYLES[meta](val)
+
+ return val
+
+
+def change_color(val, meta):
+ """
+ Callback function for tabula
+
+ Change color to display the value, for blessings terminal only
+ """
+ if not val or not meta:
+ return val
+
+ if meta in COLORS:
+ return COLORS[meta](val)
+
+ return val
+
+
+def check_range(val, meta):
+ """
+ Callback function for tabula
+
+ Check the range of acceptance defined in metadata
+ and display error on screen
+
+ Range is a str literal which evaluates to a @tuple or @list pair
+ e.g - (10,100), [20,50]
+
+ Value and edges are converted to float before comparison
+ """
+ if not val or not meta:
+ return val
+
+ try:
+ f_val = float(val)
+ f_min, f_max = map(lambda x: float(x), ast.literal_eval(meta))
+ except (SyntaxError, ValueError), e:
+ logging.error("unable to parse range string - %s: %s" % (meta, e))
+ return val
+
+ if f_val < f_min or f_val > f_max:
+ return COLORS["red"](val)
+
+ return val
+
+"""
+Functions to format a value using its meta,
+which are called in alphabecial order.
+
+TABULA_CONV_FUNCS: convert value before column alignments
+TABULA_DECO_FUNCS: decorate value after column alignments
+"""
+TABULA_CONV_FUNCS = {"units": conv_units}
+
+TABULA_DECO_FUNCS = {"style": change_style,
+ "color": change_color,
+ "range": check_range}
+
+
+def enter_fullscreen():
+ """
+ Invoke before printing out anything.
+ This method should be replaced by or merged to blessings package
+ """
+ term.stream.write(term.enter_fullscreen)
+ term.stream.write(term.hide_cursor)
+
+
+def exit_fullscreen():
+ """
+ Invoke before printing out anything.
+ This method should be replaced by or merged to blessings package
+ """
+ term.stream.write(term.exit_fullscreen)
+ term.stream.write(term.normal_cursor)
+
+
+def paint(tbl):
+ """
+ Paint the table on terminal
+ """
+ if not isinstance(tbl, Table):
+ logging.error("unable to paint table: invalid object")
+ return False
+
+ term.stream.write(term.clear)
+
+ term.stream.write(str(tbl))
+ return True
27 libcbtop/seriesly_handler.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+import logging
+from handler import Handler
+
+
+class SerieslyHandler(Handler):
+ """
+ Handler to dump data to seriesly
+ """
+ def __init__(self, store):
+ self.store = store
+
+ def handle(self, source):
+ if not source:
+ logging.error("unable to handle : invalid source")
+ return False
+
+ self.store.clear()
+ if source.fast:
+ self.store.fast = source.fast
+
+ if source.slow:
+ self.store.slow = source.slow
+ self.store.persist()
+
+ return True
28 libcbtop/server.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+
+class Server(object):
+
+ def __init__(self, ip="127.0.0.1", port="8091", ssh_username="root",
+ ssh_password="coucbhase", ssh_key="",
+ rest_username="Administrator", rest_password="password",
+ data_path=""):
+ self.ip = ip
+ self.port = port
+ self.ssh_username = ssh_username
+ self.ssh_password = ssh_password
+ self.ssh_key = ssh_key
+ self.rest_username = rest_username
+ self.rest_password = rest_password
+ self.data_path = data_path
+
+ def __repr__(self):
+ return "<Server> ip: %s, port: %s, ssh_username: %s, "\
+ "ssh_password: %s, ssh_key: %s, rest_username: %s, "\
+ "rest_password: %s, data_path: %s" % \
+ (self.ip, self.port, self.ssh_username, self.ssh_password,
+ self.ssh_key, self.rest_username, self.rest_password,
+ self.data_path)
+
+ def __str__(self):
+ return self.__repr__()
27 libcbtop/source.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+
+class Source(object):
+ """
+ Base class for all data sources
+ """
+ def connect(self):
+ """
+ Connect to the upstream data provider
+ """
+ raise NotImplementedError(
+ "connect() has not been implemented")
+
+ def feed(self):
+ """
+ Feed downstream pipe with data
+ """
+ raise NotImplementedError(
+ "feed() has not been implemented")
+
+ def disconnect(self):
+ """
+ Disconnect from the upstream data provider
+ """
+ raise NotImplementedError(
+ "disconnect() has not been implemented")
106 libcbtop/sys_helper.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+import logging
+from functools import wraps
+
+
+class SocketHelper:
+
+ @staticmethod
+ def send_bytes(skt, buf, timeout=0):
+ """
+ Send bytes to the socket
+ @raise ValueError, IOError, socket.timeout, Exception
+ """
+ if not buf or not skt:
+ raise ValueError("<send_bytes> invalid socket descriptor or buf")
+
+ if timeout:
+ skt.settimeout(timeout)
+ else:
+ skt.settimeout(None)
+
+ length = len(buf)
+ sent_total = 0
+
+ while sent_total < length:
+ sent = skt.send(buf)
+ if not sent:
+ raise IOError("<send_bytes> connection broken")
+ sent_total += sent
+ buf = buf[sent:]
+
+ return sent_total
+
+ @staticmethod
+ def recv_bytes(skt, length, timeout=0):
+ """
+ Receive bytes from the socket
+ @raise ValueError, IOError, socket.timeout, Exception
+ """
+ if not skt or length < 0:
+ raise ValueError("<recv_bytes> invalid socket descriptor or length")
+
+ if timeout:
+ skt.settimeout(timeout)
+ else:
+ skt.settimeout(None)
+
+ buf = ''
+ while len(buf) < length:
+ data = skt.recv(length - len(buf))
+ if not data:
+ raise IOError("<recv_bytes> connection broken")
+ buf += data
+
+ return buf
+
+ @staticmethod
+ def send_bytes_ex(skt, buf, timeout=0):
+ """
+ Send bytes to the socket. Swallow exceptions.
+ """
+ try:
+ return SocketHelper.send_bytes(skt, buf, timeout)
+ except Exception, e:
+ logging.error(e)
+
+ return -1
+
+ @staticmethod
+ def recv_bytes_ex(skt, length, timeout=0):
+ """
+ Receive bytes to the socket. Swallow exceptions.
+ """
+ try:
+ return SocketHelper.recv_bytes(skt, length, timeout)
+ except Exception, e:
+ logging.error(e)
+
+ return None
+
+
+def synchronized(lock_name):
+ """synchronized access to class method"""
+ def _outer(func):
+ @wraps(func)
+ def _inner(self, *args, **kwargs):
+ try:
+ lock = self.__getattribute__(lock_name)
+ except AttributeError:
+ logging.error("<synchronized> cannot find lock: %s", lock_name)
+ return _inner
+ with lock:
+ return func(self, *args, **kwargs)
+ return _inner
+ return _outer
+
+
+def is_num(val):
+ """check if a string is numeric"""
+ try:
+ float(val)
+ except ValueError:
+ return False
+
+ return True
276 libcbtop/visit_cb.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python
+import logging
+import json
+import time
+import multiprocessing
+import Queue
+
+from cbtestlib.membase.api.rest_client import RestConnection
+from cbtestlib.membase.api.exception import ServerUnavailableException
+
+from tabula.table import Table
+from tabula.section import Section
+from seriesly import Seriesly
+import seriesly.exceptions
+
+from metadata.visit import retrieve_meta
+
+from paint import TABULA_CONV_FUNCS, TABULA_DECO_FUNCS
+from server import Server
+from mc_source import MemcachedSource
+from mc_collector import MemcachedCollector
+from json_handler import JsonHandler
+from seriesly_handler import SerieslyHandler
+from data_helper import DataHelper
+
+# configuration for each tabula.section
+SECTION_CONFIG = {"settings": {"id": 0,
+ "show_row_hdrs": False,
+ "show_col_hdrs": False,
+ "show_col_hdr_in_cell": True},
+ "storage": {"id": 1,
+ "show_row_hdrs": False,
+ "show_col_hdrs": False,
+ "show_col_hdr_in_cell": True},
+ "buckets": {"id": 2,
+ "show_row_hdrs": False,
+ "show_col_hdrs": True,
+ "show_col_hdr_in_cell": False},
+ "nodes": {"id": 3,
+ "show_row_hdrs": False,
+ "show_col_hdrs": True,
+ "show_col_hdr_in_cell": False},
+ "Memory Stats": {"id": 4,
+ "show_row_hdrs": False,
+ "show_col_hdrs": True,
+ "show_col_hdr_in_cell": False}}
+
+tbl = Table("cbtop", sep=" ")
+cur_row = {} # {sec_nam: row name}
+mc_jobs = multiprocessing.Queue(1)
+mc_stats = multiprocessing.Queue(20)
+store = None
+
+
+class SerieslyStore(object):
+
+ def __init__(self, host, dbslow, dbfast):
+ self.slow = {}
+ self.fast = {}
+ self.dbslow = dbslow
+ self.dbfast = dbfast
+ self.seriesly = Seriesly(host=host)
+ try:
+ dbs = self.seriesly.list_dbs()
+ except seriesly.exceptions.ConnectionError, e:
+ logging.error("unable to connect to seriesly server: %s" % e)
+ return
+
+ if dbslow not in dbs:
+ self.seriesly.create_db(dbslow)
+ if dbfast not in dbs:
+ self.seriesly.create_db(dbfast)
+
+ def clear(self):
+ self.slow = {}
+ self.fast = {}
+
+ def add_fast(self, key, val):
+ self.fast[key] = val
+
+ def add_slow(self, key, val):
+ self.slow[key] = val
+
+ def persist(self):
+ try:
+ if self.slow:
+ self.seriesly[self.dbslow].append(self.slow)
+ if self.fast:
+ self.seriesly[self.dbfast].append(self.fast)
+ except seriesly.exceptions.ConnectionError, e:
+ logging.error("unable to connect to seriesly server: %s" % e)
+ return False
+
+ return True
+
+
+def _show_stats(key, val, meta_inf):
+ """
+ Show stats on the ascii table
+ """
+ if not tbl or not isinstance(tbl, Table):
+ return False
+
+ if not meta_inf or not "section" in meta_inf:
+ logging.debug("unable to show data: key %s, val %s, invalid meta info"
+ % (key, val))
+ return False
+
+ # ok, not deal with unicode for now
+ sec_nam = str(meta_inf["section"])
+ val = str(val)
+
+ section = tbl.get_section(sec_nam)
+
+ if not section:
+ if sec_nam in SECTION_CONFIG:
+ config = SECTION_CONFIG[sec_nam]
+ section = Section(sec_nam, config["id"],
+ conv_funcs=TABULA_CONV_FUNCS,
+ deco_funcs=TABULA_DECO_FUNCS)
+ section.config(config["show_row_hdrs"],
+ config["show_col_hdrs"],
+ config["show_col_hdr_in_cell"])
+ else:
+ return False
+ tbl.add_section(section)
+
+ if "col" in meta_inf:
+ col = str(meta_inf["col"])
+ else:
+ col = str(key)
+
+ if "new_row" in meta_inf:
+ # create a new row using the col name
+ section.add_cell(val, col, val, "S50", meta=meta_inf)
+ cur_row[sec_nam] = val
+ return True
+
+ if not sec_nam in cur_row:
+ logging.debug("stats %s is not associated with a section" % key)
+ return True
+
+ row = cur_row[sec_nam]
+ section.add_cell(row, col, val, type="S50", meta=meta_inf)
+
+ return True
+
+
+def show_all_stats(stats, meta):
+ if not isinstance(stats, dict) or not isinstance(meta, dict):
+ logging.error("failed to show all stats : invalid data")
+ return False
+
+ for key, val in stats.iteritems():
+ if not key in meta:
+ continue
+
+ _show_stats(key, val, meta[key])
+
+
+def store_fast(root, parents, data, meta, coll,
+ key, val, meta_val, meta_inf, level):
+ """Store time-series data into fast-changing database"""
+ store.add_fast(key, val)
+ return _show_stats(key, val, meta_inf)
+
+
+def store_slow(root, parents, data, meta, coll,
+ key, val, meta_val, meta_inf, level):
+ """Store time-series data into slow-changing database"""
+ store.add_slow(key, val)
+ return _show_stats(key, val, meta_inf)
+
+
+def url_before(context, path):
+ return context, path
+
+
+def url_after(context, path, root):
+ pass
+
+
+def retrieve_data(context, path):
+ """Retrieve json data from a couchbase server through REST calls"""
+ # TODO: use cbtestlib
+ server = Server(context.get("host", "127.0.0.1"))
+ rest = RestConnection(server)
+ api = rest.baseUrl + path
+
+ try:
+ status, content, header = rest._http_request(api) # TODO: expose
+ except ServerUnavailableException, e:
+ logging.error("unable to retrieve data from %s: %s" % (server, e))
+ return retrieve_meta(context, path)
+
+ if status:
+ return json.loads(content)
+
+ return retrieve_meta(context, path)
+
+
+def collect_mc_stats(root, parents, data, meta, coll,
+ key, val, meta_val, meta_inf, level=0):
+ """
+ Collect memcached stats
+ Dump time series data a json snapshot
+ """
+ if not isinstance(val, list):
+ logging.error(
+ "unable to collect mc stats: val must be a list - %s" % val)
+ return False
+
+ try:
+ stats, meta = mc_stats.get(block=False)
+ show_all_stats(stats, meta)
+ except Queue.Empty:
+ pass
+
+ try:
+ mc_jobs.put([root, parents, val], block=False)
+ return True
+ except Queue.Full:
+ logging.debug("unable to collect mcstats : queue is full")
+ return False
+
+
+def mc_worker(jobs, stats, ctl, store, timeout=5):
+ logging.info("mc_worker started")
+
+ while ctl["run_ok"]:
+ try:
+ root, parents, val = jobs.get(block=True, timeout=timeout)
+ except Queue.Empty:
+ logging.debug("mc_worker hasn't received jobs for %s seconds"
+ % timeout)
+ continue
+
+ start = time.time()
+
+ for server in val:
+
+ try:
+ ip, port = server.split(":")
+ except (ValueError, AttributeError), e:
+ logging.error("unable to collect mc stats from %s : %s"
+ % (server, e))
+ continue
+
+ mc_server = Server(ip)
+
+ # get bucket name from root and parent nodes
+ bucket = DataHelper.get_bucket(root, parents)
+
+ # initialize memcached source
+ mc_source = MemcachedSource(mc_server, bucket)
+
+ # initialize handlers to dump data json doc
+ j_handler = JsonHandler()
+ s_handler = SerieslyHandler(store)
+
+ # collect data from source and emit to handlers
+ mc_coll = MemcachedCollector([mc_source], [j_handler, s_handler])
+ mc_coll.collect()
+ mc_coll.emit()
+ stats.put([mc_source.fast, mc_source.meta], block=True)
+ stats.put([mc_source.slow, mc_source.meta], block=True)
+
+ delta = time.time() - start
+ logging.debug("collected mc stats from %s, took %s seconds"
+ % (val, delta))
+
+ if delta < timeout:
+ logging.debug("mc_worker sleep for %s seconds" % (timeout - delta))
+ time.sleep(timeout - delta)
+
+ logging.info("mc_worker stopped")
33 logging.conf
@@ -0,0 +1,33 @@
+[loggers]
+keys=root
+
+[logger_root]
+handlers=file
+level=ERROR
+
+[handlers]
+keys=file,console
+
+[formatters]
+keys=simple,complex
+
+[formatter_simple]
+format=[%(asctime)s] - [%(module)s:%(lineno)d] %(levelname)s - %(message)s
+
+[formatter_complex]
+format=[%(asctime)s] - [%(module)s] [%(thread)d] - %(levelname)s - %(message)s
+
+[handler_file]
+class=handlers.TimedRotatingFileHandler
+interval=midnight
+backupCount=5
+maxBytes=20000000
+formatter=complex
+level=NOTSET
+args=('cbtop.log',)
+
+[handler_console]
+class=StreamHandler
+formatter=simple
+level=NOTSET
+args=(sys.stdout,)
37 metadata/Makefile
@@ -0,0 +1,37 @@
+# Relationship between files...
+# stats.org
+# ==> [automated cleansing (make stats.org.gen)] ==>
+# stats.org.gen
+# ==> [manual annotation (cp stats.org.gen stats.src && emacs stats.src)] ==>
+# stats.src
+# ==> [json conversion (stats-conv stats.src)] ==>
+# stats.json
+#
+all: stats.org.gen stats.json ns_server.json
+
+clean:
+ rm -f stats.org stats.org.gen stats.json ns_server.json
+
+stats.org:
+ wget https://raw.github.com/couchbase/ep-engine/master/docs/stats.org
+
+stats.org.gen: stats.org
+ cat stats.org | \
+ grep -E "(^[|*])|(engine:)" | grep -v "\-\-\-\-\-\-\-" | \
+ grep -v "Stats Definitions" | grep -v "Getting Started" | grep -v "Description" \
+ > stats.org.gen
+
+stats.json: stats.src
+ ./stats-conv stats.src > stats.json
+
+diff:
+ grep -v "^ " stats.src | diff stats.org.gen -
+
+test-stats.json.sample: stats.json.sample
+ cat stats.json.sample | python -m json.tool
+
+test-stats.json: stats.json
+ cat stats.json | python -m json.tool
+
+ns_server.json:
+ ./visit-conv > ns_server.json
0  metadata/__init__.py
No changes.
19 metadata/ns_server-meta/2.0.0/couchBase__replicator.json
@@ -0,0 +1,19 @@
+{
+ "committed_update_seq": 0,
+ "compact_running": false,
+ "-compact_running": {
+ "visit": "fast"
+ },
+ "data_size": 0,
+ "db_name": "_replicator",
+ "disk_format_version": 0,
+ "-disk_format_version": {
+ "visit": "slow"
+ },
+ "disk_size": 0,
+ "doc_count": 0,
+ "doc_del_count": 0,
+ "instance_start_time": "0",
+ "purge_seq": 0,
+ "update_seq": 0
+}
2  metadata/ns_server-meta/2.0.0/couchBase__replicator__design__replicator_info__view_infos.json
@@ -0,0 +1,2 @@
+{
+}
2  metadata/ns_server-meta/2.0.0/nodeStatuses.json
@@ -0,0 +1,2 @@
+{
+}
280 metadata/ns_server-meta/2.0.0/pools_default.json
@@ -0,0 +1,280 @@
+{
+ "alerts": [
+ ],
+ "alertsSilenceURL": "/controller/resetAlerts",
+ "autoCompactionSettings": {
+ "databaseFragmentationThreshold": {
+ "percentage": 0,
+ "-percentage": {
+ "col": "DB Compaction",
+ "new_row": "true",
+ "section": "settings",
+ "units": "percent",
+ "visit": "slow"
+ },
+ "size": "undefined"
+ },
+ "parallelDBAndViewCompaction": false,
+ "viewFragmentationThreshold": {
+ "percentage": 0,
+ "-percentage": {
+ "col": "View Compaction",
+ "section": "settings",
+ "units": "percent",
+ "visit": "slow"
+ },
+ "size": "undefined"
+ }
+ },
+ "balanced": false,
+ "buckets": {
+ "uri": "/pools/default/buckets",
+ "-uri": {
+ "follow": true
+ }
+ },
+ "controllers": {
+ "addNode": {
+ "uri": "/controller/addNode"
+ },
+ "ejectNode": {
+ "uri": "/controller/ejectNode"
+ },
+ "failOver": {
+ "uri": "/controller/failOver"
+ },
+ "reAddNode": {
+ "uri": "/controller/reAddNode"
+ },
+ "rebalance": {
+ "uri": "/controller/rebalance"
+ },
+ "replication": {
+ "createURI": "/controller/createReplication",
+ "infosURI": "/couchBase/_replicator/_design/_replicator_info/_view/infos",
+ "-infosURI": {
+ "follow": true
+ },
+ "replicatorDBURI": "/couchBase/_replicator",
+ "-replicatorDBURI": {
+ "follow": true
+ }
+ },
+ "setAutoCompaction": {
+ "uri": "/controller/setAutoCompaction",
+ "validateURI": "/controller/setAutoCompaction"
+ },
+ "setFastWarmup": {
+ "uri": "/controller/setFastWarmup",
+ "validateURI": "/controller/setFastWarmup"
+ }
+ },
+ "counters": {
+ },
+ "failoverWarnings": [
+ ],
+ "fastWarmupSettings": {
+ "fastWarmupEnabled": false,
+ "minItemsThreshold": 0,
+ "-minItemsThreshold": {
+ "col": "Min Items Threshold",
+ "section": "settings",
+ "visit": "slow"
+ },
+ "minMemoryThreshold": 0,
+ "-minMemoryThreshold": {
+ "col": "Min Memory Threshold",
+ "section": "settings",
+ "visit": "slow"
+ }
+ },
+ "name": "default",
+ "nodeStatusesUri": "/nodeStatuses",
+ "-nodeStatusesUri": {
+ "follow": true
+ },
+ "nodes": [
+ {
+ "clusterCompatibility": 0,
+ "-clusterCompatibility": {
+ "visit": "slow"
+ },
+ "clusterMembership": "active",
+ "couchApiBase": "http://127.0.0.1:8092/",
+ "hostname": "127.0.0.1:8091",
+ "-hostname": {
+ "new_row": "true",
+ "section": "nodes",
+ "visit": "slow"
+ },
+ "interestingStats": {
+ "curr_items": 0,
+ "-curr_items": {
+ "section": "nodes",
+ "units": "items"
+ },
+ "curr_items_tot": 0,
+ "-curr_items_tot": {
+ "section": "nodes",
+ "units": "items"
+ },
+ "vb_replica_curr_items": 0,
+ "-vb_replica_curr_items": {
+ "section": "nodes",
+ "units": "items"
+ }
+ },
+ "mcdMemoryAllocated": 0,
+ "-mcdMemoryAllocated": {
+ "section": "nodes",
+ "units": "megabytes",
+ "visit": "slow"
+ },
+ "mcdMemoryReserved": 0,
+ "-mcdMemoryReserved": {
+ "section": "nodes",
+ "units": "megabytes",
+ "visit": "slow"
+ },
+ "memoryFree": 0,
+ "-memoryFree": {
+ "units": "bytes"
+ },
+ "memoryTotal": 0,
+ "-memoryTotal": {
+ "section": "nodes",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "os": "x86_64-unknown-linux-gnu",
+ "ports": {
+ "direct": 0,
+ "-direct": {
+ "section": "nodes",
+ "visit": "slow"
+ },
+ "proxy": 0,
+ "-proxy": {
+ "section": "nodes",
+ "visit": "collect_proxy_stats"
+ }
+ },
+ "status": "healthy",
+ "systemStats": {
+ "cpu_utilization_rate": 0.0,
+ "-cpu_utilization_rate": {
+ "col": "cpu_util",
+ "section": "nodes",
+ "units": "percent"
+ },
+ "swap_total": 0,
+ "-swap_total": {
+ "section": "nodes",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "swap_used": 0,
+ "-swap_used": {
+ "units": "bytes"
+ }
+ },
+ "thisNode": false,
+ "uptime": 0,
+ "-uptime": {
+ "units": "seconds",
+ "visit": "int"
+ },
+ "version": "2.0.0-1559-rel-community"
+ }
+ ],
+ "rebalanceProgressUri": "/pools/default/rebalanceProgress",
+ "-rebalanceProgressUri": {
+ "follow": true
+ },
+ "rebalanceStatus": "none",
+ "remoteClusters": {
+ "uri": "/pools/default/remoteClusters",
+ "validateURI": "/pools/default/remoteClusters"
+ },
+ "stats": {
+ "uri": "/pools/default/stats",
+ "-uri": {
+ "follow": true
+ }
+ },
+ "stopRebalanceIsSafe": false,
+ "stopRebalanceUri": "/controller/stopRebalance",
+ "storageTotals": {
+ "hdd": {
+ "free": 0,
+ "-free": {
+ "units": "bytes"
+ },
+ "quotaTotal": 0,
+ "-quotaTotal": {
+ "col": "HDD Quota",
+ "new_row": "true",
+ "section": "storage",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "total": 0,
+ "-total": {
+ "col": "HDD Total",
+ "section": "storage",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "used": 0,
+ "-used": {
+ "col": "HDD Used",
+ "section": "storage",
+ "units": "bytes"
+ },
+ "usedByData": 0,
+ "-usedByData": {
+ "col": "HDD Used By Data",
+ "section": "storage",
+ "units": "bytes"
+ }
+ },
+ "ram": {
+ "quotaTotal": 0,
+ "-quotaTotal": {
+ "col": "RAM Quota",
+ "section": "storage",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "quotaUsed": 0,
+ "-quotaUsed": {
+ "units": "bytes"
+ },
+ "total": 0,
+ "-total": {
+ "col": "RAM Total",
+ "section": "storage",
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "used": 0,
+ "-used": {
+ "col": "RAM Used",
+ "section": "storage",
+ "units": "bytes"
+ },
+ "usedByData": 0,
+ "-usedByData": {
+ "col": "RAM Used By Data",
+ "section": "storage",
+ "units": "bytes"
+ }
+ }
+ },
+ "tasks": {
+ "uri": "/pools/default/tasks",
+ "-uri": {
+ "follow": true
+ }
+ }
+}
3,207 metadata/ns_server-meta/2.0.0/pools_default_buckets.json
@@ -0,0 +1,3207 @@
+[
+ {
+ "authType": "sasl",
+ "autoCompactionSettings": false,
+ "basicStats": {
+ "dataUsed": 0,
+ "-dataUsed": {
+ "units": "bytes"
+ },
+ "diskFetches": 0,
+ "diskUsed": 0,
+ "-diskUsed": {
+ "units": "bytes"
+ },
+ "itemCount": 0,
+ "-itemCount": {
+ "units": "items"
+ },
+ "memUsed": 0,
+ "-memUsed": {
+ "units": "bytes"
+ },
+ "opsPerSec": 0,
+ "quotaPercentUsed": 0.0,
+ "-quotaPercentUsed": {
+ "units": "percent"
+ },
+ "viewOps": 0
+ },
+ "bucketCapabilities": [
+ "touch",
+ "couchapi"
+ ],
+ "-bucketCapabilities": {
+ "visit": "strip"
+ },
+ "bucketCapabilitiesVer": "",
+ "bucketType": "membase",
+ "controllers": {
+ "compactAll": "/pools/default/buckets/default/controller/compactBucket",
+ "compactDB": "/pools/default/buckets/default/controller/compactDatabases",
+ "flush": "/pools/default/buckets/default/controller/doFlush"
+ },
+ "ddocs": {
+ "uri": "/pools/default/buckets/default/ddocs"
+ },
+ "fastWarmupSettings": false,
+ "localRandomKeyUri": "/pools/default/buckets/default/localRandomKey",
+ "name": "default",
+ "nodeLocator": "vbucket",
+ "nodes": [
+ {
+ "clusterCompatibility": 0,
+ "clusterMembership": "active",
+ "couchApiBase": "http://127.0.0.1:8092/default",
+ "hostname": "127.0.0.1:8091",
+ "interestingStats": {
+ "curr_items": 0,
+ "curr_items_tot": 0,
+ "vb_replica_curr_items": 0
+ },
+ "mcdMemoryAllocated": 0,
+ "mcdMemoryReserved": 0,
+ "memoryFree": 0,
+ "memoryTotal": 0,
+ "os": "x86_64-unknown-linux-gnu",
+ "ports": {
+ "direct": 0,
+ "proxy": 0
+ },
+ "replication": 0,
+ "status": "healthy",
+ "systemStats": {
+ "cpu_utilization_rate": 0.0,
+ "swap_total": 0,
+ "swap_used": 0
+ },
+ "thisNode": false,
+ "uptime": "0",
+ "version": "2.0.0-1559-rel-community"
+ }
+ ],
+ "-nodes": {
+ "visit": "strip"
+ },
+ "proxyPort": 0,
+ "-proxyPort": {
+ "visit": "slow"
+ },
+ "quota": {
+ "ram": 0,
+ "-ram": {
+ "units": "bytes",
+ "visit": "slow"
+ },
+ "rawRAM": 0,
+ "-rawRAM": {
+ "units": "bytes",
+ "visit": "slow"
+ }
+ },
+ "replicaIndex": false,
+ "replicaNumber": 0,
+ "-replicaNumber": {
+ "visit": "slow"
+ },
+ "saslPassword": "",
+ "stats": {
+ "directoryURI": "/pools/default/buckets/default/statsDirectory",
+ "nodeStatsListURI": "/pools/default/buckets/default/nodes",
+ "-nodeStatsListURI": {
+ "follow": true
+ },
+ "uri": "/pools/default/buckets/default/stats"
+ },
+ "streamingUri": "/pools/default/bucketsStreaming/default",
+ "uri": "/pools/default/buckets/default",
+ "uuid": "6b22a23338675f894575e6ce2c9c37ad",
+ "vBucketServerMap": {
+ "hashAlgorithm": "CRC",
+ "numReplicas": 0,
+ "serverList": [
+ "127.0.0.1:11210"
+ ],
+ "-serverList": {
+ "visit": "collect_mc_stats"
+ },
+ "vBucketMap": [
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [
+ 0
+ ],
+ [