Skip to content

Commit

Permalink
Rebase and add 3.x Tests to PR-395 (#411)
Browse files Browse the repository at this point in the history
* Python 2.6, 2.7, 3.4, 3.5, 3.6, 3.7 compatibility
* Added Python 3.x versions to Travis
  • Loading branch information
johann8384 committed Dec 4, 2018
1 parent 2b68c4b commit b684402
Show file tree
Hide file tree
Showing 52 changed files with 494 additions and 382 deletions.
3 changes: 3 additions & 0 deletions .travis.yml
@@ -1,6 +1,9 @@
language: python
python:
- "2.7"
- "3.4"
- "3.5"
- "3.6"
install:
- pip install pylint pylint_runner ordereddict mysqlclient requests feedparser prometheus_client
script:
Expand Down
6 changes: 3 additions & 3 deletions collectors/0/couchbase.py
Expand Up @@ -87,7 +87,7 @@ def find_conf_file(pid):
"""Returns config file for couchbase-server."""
try:
fd = open('/proc/%s/cmdline' % pid)
except IOError, e:
except IOError as e:
utils.err("Couchbase (pid %s) went away ? %s" % (pid, e))
return
try:
Expand All @@ -100,7 +100,7 @@ def find_bindir_path(config_file):
"""Returns the bin directory path"""
try:
fd = open(config_file)
except IOError, e:
except IOError as e:
utils.err("Error for Config file (%s): %s" % (config_file, e))
return None
try:
Expand Down Expand Up @@ -142,7 +142,7 @@ def collect_stats(bin_dir, bucket):
metric = stat.split(":")[0].lstrip(" ")
value = stat.split(":")[1].lstrip(" \t")
if metric in KEYS:
print ("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket))
print("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket))

def main():
utils.drop_privileges()
Expand Down
6 changes: 3 additions & 3 deletions collectors/0/dfstat.py
Expand Up @@ -51,7 +51,7 @@ def main():
"""dfstats main loop"""
try:
f_mounts = open("/proc/mounts", "r")
except IOError, e:
except IOError as e:
utils.err("error: can't open /proc/mounts: %s" % e)
return 13 # Ask tcollector to not respawn us

Expand All @@ -72,7 +72,7 @@ def main():
# fs_passno # Order in which filesystem checks are done at reboot time
try:
fs_spec, fs_file, fs_vfstype, fs_mntops, fs_freq, fs_passno = line.split(None)
except ValueError, e:
except ValueError as e:
utils.err("error: can't parse line at /proc/mounts: %s" % e)
continue

Expand Down Expand Up @@ -105,7 +105,7 @@ def main():
fs_spec, fs_file, fs_vfstype = device
try:
r = os.statvfs(fs_file)
except OSError, e:
except OSError as e:
utils.err("can't get info for mount point: %s: %s" % (fs_file, e))
continue

Expand Down
20 changes: 11 additions & 9 deletions collectors/0/docker.py
Expand Up @@ -2,6 +2,8 @@
# More informations on https://docs.docker.com/articles/runmetrics/
"""Imports Docker stats from /sys/fs/cgroup."""

from __future__ import print_function

import os
import re
import socket
Expand Down Expand Up @@ -60,7 +62,7 @@ def getnameandimage(containerid):
try:
r = sock.connect_ex(DOCKER_SOCK)
if (r != 0):
print >>sys.stderr, "Can not connect to %s" % (DOCKER_SOCK)
print("Can not connect to %s" % (DOCKER_SOCK), file=sys.stderr)
else:
message = 'GET /containers/' + containerid + '/json HTTP/1.1\r\nHost: http\n\n'
sock.sendall(message)
Expand All @@ -79,16 +81,16 @@ def getnameandimage(containerid):
try:
containernames[containerid] = data["Name"].lstrip('/')
except:
print >>sys.stderr, containerid+" has no Name field"
print(containerid+" has no Name field", file=sys.stderr)
try:
containerimages[containerid] = data["Config"]["Image"].replace(':', '_')
except:
print >>sys.stderr, containerid+" has no Image field"
print(containerid+" has no Image field", file=sys.stderr)
except:
print >>sys.stderr, "Can not load json"
print("Can not load json", file=sys.stderr)

except socket.timeout, e:
print >>sys.stderr, "Socket: %s" % (e,)
except socket.timeout as e:
print("Socket: %s" % (e,), file=sys.stderr)

def senddata(datatosend, containerid):
if datatosend:
Expand All @@ -97,7 +99,7 @@ def senddata(datatosend, containerid):
datatosend += " containername="+containernames[containerid]
if (containerid in containerimages):
datatosend += " containerimage="+containerimages[containerid]
print "docker.%s" % datatosend
print("docker.%s" % datatosend)
sys.stdout.flush()

def readdockerstats(path, containerid):
Expand All @@ -112,8 +114,8 @@ def readdockerstats(path, containerid):
and ((file_stat in proc_names.keys()) or (file_stat in proc_names_to_agg.keys()))):
try:
f_stat = open(path+"/"+file_stat)
except IOError, e:
print >>sys.stderr, "Failed to open input file: %s" % (e,)
except IOError as e:
print("Failed to open input file: %s" % (e,), file=sys.stderr)
return 1
ts = int(time.time())

Expand Down
3 changes: 2 additions & 1 deletion collectors/0/docker_engine.py
Expand Up @@ -13,6 +13,7 @@
# see <http://www.gnu.org/licenses/>.
"""Imports Docker stats from the docker-api"""

from __future__ import print_function
import sys

from collectors.etc import docker_engine_conf
Expand All @@ -32,7 +33,7 @@ def main():
cli = DockerMetrics(METRICS_PATH)

for m in cli.get_endpoint():
print m.get_metric_lines()
print(m.get_metric_lines())


if __name__ == "__main__":
Expand Down
22 changes: 13 additions & 9 deletions collectors/0/elasticsearch.py
Expand Up @@ -16,7 +16,6 @@
# Tested with ES 0.16.5, 0.17.x, 0.90.1 .

import errno
import httplib
try:
import json
except ImportError:
Expand All @@ -30,6 +29,11 @@
from collectors.lib import utils
from collectors.etc import elasticsearch_conf

try:
from http.client import HTTPConnection, OK
except ImportError:
from httplib import HTTPConnection, OK


COLLECTION_INTERVAL = 15 # seconds
DEFAULT_TIMEOUT = 10.0 # seconds
Expand Down Expand Up @@ -57,7 +61,7 @@ def request(server, uri, json_in = True):
"""Does a GET request of the given uri on the given HTTPConnection."""
server.request("GET", uri)
resp = server.getresponse()
if resp.status != httplib.OK:
if resp.status != OK:
raise ESError(resp)
if json_in:
return json.loads(resp.read())
Expand Down Expand Up @@ -99,10 +103,10 @@ def printmetric(metric, ts, value, tags):
# Warning, this should be called inside a lock
if tags:
tags = " " + " ".join("%s=%s" % (name.replace(" ",""), value.replace(" ",""))
for name, value in tags.iteritems())
for name, value in tags.items())
else:
tags = ""
print ("%s %d %s %s"
print("%s %d %s %s"
% (metric, ts, value, tags))

def _traverse(metric, stats, ts, tags):
Expand Down Expand Up @@ -148,7 +152,7 @@ def _collect_indices(server, metric, tags, lock):
# now print value
with lock:
printmetric(metric + ".cluster.byindex." + headerlist[count], ts, value, newtags)
except ValueError, ve:
except ValueError:
# add this as a tag
newtags[headerlist[count]] = value
count += 1
Expand Down Expand Up @@ -200,11 +204,11 @@ def main(argv):
return 1

for conf in elasticsearch_conf.get_servers():
server = httplib.HTTPConnection( *conf )
server = HTTPConnection( *conf )
try:
server.connect()
except socket.error, (erno, e):
if erno == errno.ECONNREFUSED:
except socket.error as exc:
if exc.errno == errno.ECONNREFUSED:
continue
raise
servers.append( server )
Expand All @@ -222,7 +226,7 @@ def main(argv):
t.start()
threads.append(t)
for thread in threads:
t.join()
thread.join()
time.sleep(COLLECTION_INTERVAL)

if __name__ == "__main__":
Expand Down
34 changes: 20 additions & 14 deletions collectors/0/flume.py
Expand Up @@ -26,10 +26,11 @@
Based on the elastichsearch collector
"""
"""

from __future__ import print_function

import errno
import httplib
try:
import json
except ImportError:
Expand All @@ -45,6 +46,11 @@
except ImportError:
flume_conf = None

try:
from http.client import HTTPConnection, OK
except ImportError:
from httplib import HTTPConnection, OK

COLLECTION_INTERVAL = 15 # seconds
DEFAULT_TIMEOUT = 10.0 # seconds
FLUME_HOST = "localhost"
Expand All @@ -54,7 +60,7 @@
EXCLUDE = [ 'StartTime', 'StopTime', 'Type' ]

def err(msg):
print >>sys.stderr, msg
print(msg, file=sys.stderr)

class FlumeError(RuntimeError):
"""Exception raised if we don't get a 200 OK from Flume webserver."""
Expand All @@ -66,7 +72,7 @@ def request(server, uri):
"""Does a GET request of the given uri on the given HTTPConnection."""
server.request("GET", uri)
resp = server.getresponse()
if resp.status != httplib.OK:
if resp.status != OK:
raise FlumeError(resp)
return json.loads(resp.read())

Expand Down Expand Up @@ -94,11 +100,11 @@ def main(argv):

utils.drop_privileges()
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
server = httplib.HTTPConnection(FLUME_HOST, FLUME_PORT)
server = HTTPConnection(FLUME_HOST, FLUME_PORT)
try:
server.connect()
except socket.error, (erno, e):
if erno == errno.ECONNREFUSED:
except socket.error as exc:
if exc.errno == errno.ECONNREFUSED:
return 13 # No Flume server available, ask tcollector to not respawn us.
raise
if json is None:
Expand All @@ -108,22 +114,22 @@ def main(argv):
def printmetric(metric, value, **tags):
if tags:
tags = " " + " ".join("%s=%s" % (name, value)
for name, value in tags.iteritems())
for name, value in tags.items())
else:
tags = ""
print ("flume.%s %d %s %s" % (metric, ts, value, tags))
print(("flume.%s %d %s %s" % (metric, ts, value, tags)))

while True:
# Get the metrics
ts = int(time.time()) # In case last call took a while.
stats = flume_metrics(server)

for metric in stats:
(component, name) = metric.split(".")
tags = {component.lower(): name}
for key,value in stats[metric].items():
if key not in EXCLUDE:
printmetric(key.lower(), value, **tags)
(component, name) = metric.split(".")
tags = {component.lower(): name}
for key,value in stats[metric].items():
if key not in EXCLUDE:
printmetric(key.lower(), value, **tags)

time.sleep(COLLECTION_INTERVAL)

Expand Down
6 changes: 3 additions & 3 deletions collectors/0/g1gc.py
Expand Up @@ -161,7 +161,7 @@ def sec2milli(seconds):

def flush_collector(collector):
for metric_name, value in collector['data'].items():
print metric_name % (collector['timestamp'], value)
print(metric_name % (collector['timestamp'], value))

collector['timestamp'] = None
collector['data'] = {}
Expand Down Expand Up @@ -372,11 +372,11 @@ def process_gc_log(collector):

if not collector['timestamp'] is None:
for gen, value in collector['gensize'].items():
print "%s.gc.g1.gensize %s %s gen=%s" % (prefix, current_timestamp_in_sec, value, gen)
print("%s.gc.g1.gensize %s %s gen=%s" % (prefix, current_timestamp_in_sec, value, gen))

# publish gc event count metrics
for event, value in collector['count'].items():
print "%s.gc.g1.event.count %s %s event=%s" % (prefix, current_timestamp_in_sec, value, event)
print("%s.gc.g1.event.count %s %s event=%s" % (prefix, current_timestamp_in_sec, value, event))

except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
Expand Down
10 changes: 7 additions & 3 deletions collectors/0/graphite_bridge.py
Expand Up @@ -17,9 +17,13 @@

import sys
from collectors.lib import utils
import SocketServer
import threading

try:
from socketserver import ThreadingTCPServer, BaseRequestHandler
except ImportError:
from SocketServer import ThreadingTCPServer, BaseRequestHandler

try:
from collectors.etc import graphite_bridge_conf
except ImportError:
Expand All @@ -29,12 +33,12 @@
PORT = 2003
SIZE = 8192

class GraphiteServer(SocketServer.ThreadingTCPServer):
class GraphiteServer(ThreadingTCPServer):
allow_reuse_address = True

print_lock = threading.Lock()

class GraphiteHandler(SocketServer.BaseRequestHandler):
class GraphiteHandler(BaseRequestHandler):

def handle_line(self, line):
line_parts = line.split()
Expand Down

0 comments on commit b684402

Please sign in to comment.