diff --git a/README.rst b/README.rst index 174d8dd..9a8be8c 100644 --- a/README.rst +++ b/README.rst @@ -14,6 +14,7 @@ Features - Four types of metric are supported: Counter, Gauge, Summary(without quantiles) and Histogram. - InMemoryStorage (do not use it for multiprocessing apps) - UWSGI storage - share metrics between processes +- UWAGI flush storage - sync metrics with uwsgi sharedarea by flush call - time decorator - time context manager diff --git a/conftest.py b/conftest.py index 6894b2c..c2a3152 100644 --- a/conftest.py +++ b/conftest.py @@ -22,10 +22,21 @@ def project_root(): def run_around_tests(): m = uwsgi.sharedarea_memoryview(0) for x in xrange(len(m)): - m[x] = '\x00' + m[x] = "\x00" yield + @pytest.fixture def measure_time(): return measure_time_manager + + +@pytest.fixture() +def iterations(): + return 500 + + +@pytest.fixture() +def num_workers(): + return 10 diff --git a/pyprometheus/contrib/uwsgi_features.py b/pyprometheus/contrib/uwsgi_features.py index 10efb47..4012371 100644 --- a/pyprometheus/contrib/uwsgi_features.py +++ b/pyprometheus/contrib/uwsgi_features.py @@ -15,11 +15,12 @@ import os import struct import uuid +import copy from contextlib import contextmanager - +from logging import getLogger from pyprometheus.const import TYPES -from pyprometheus.metrics import Gauge -from pyprometheus.storage import BaseStorage +from pyprometheus.metrics import Gauge, Counter +from pyprometheus.storage import BaseStorage, LocalMemoryStorage try: @@ -36,13 +37,15 @@ class InvalidUWSGISharedareaPagesize(Exception): pass +logger = getLogger("pyprometheus.uwsgi_features") + class UWSGICollector(object): """Grap UWSGI stats and export to prometheus """ def __init__(self, namespace, labels={}): self._namespace = namespace - self._labels =tuple(sorted(labels.items(), key=lambda x: x[0])) + self._labels = tuple(sorted(labels.items(), key=lambda x: x[0])) self._collectors = self.declare_metrics() @property @@ -64,40 +67,40 @@ def metric_name(self, name): :param name: """ - return ':'.join([self._namespace, name]) + return ":".join([self._namespace, name]) def declare_metrics(self): return { - 'memory': Gauge(self.metric_name("uwsgi_memory_bytes"), "UWSGI memory usage in bytes", ('type',) + self._labels), - 'processes': Gauge(self.metric_name("processes_total"), "Number of UWSGI processes", self._labels), - 'worker_status': Gauge(self.metric_name("worker_status_totla"), "Current workers status", self._labels), - 'total_requests': Gauge(self.metric_name("requests_total"), "Total processed request", self._labels), - 'buffer_size': Gauge(self.metric_name("buffer_size_bytes"), "UWSGI buffer size in bytes", self._labels), - 'started_on': Gauge(self.metric_name("started_on"), "UWSGI started on timestamp", self._labels), - 'cores': Gauge(self.metric_name("cores"), "system cores", self._labels), - - - 'process:respawn_count': Gauge(self.metric_name("process:respawn_count"), "Process respawn count", ('id', ) + self._labels), - 'process:last_spawn': Gauge(self.metric_name("process:last_spawn"), "Process last spawn", ('id', ) + self._labels), - 'process:signals': Gauge(self.metric_name("process:signals"), "Process signals total", ('id', ) + self._labels), - 'process:avg_rt': Gauge(self.metric_name("process:avg_rt"), "Process average response time", ('id', ) + self._labels), - 'process:tx': Gauge(self.metric_name("process:tx"), "Process transmitted data", ('id',) + self._labels), - - 'process:status': Gauge(self.metric_name("process:status"), "Process status", ('id', 'status') + self._labels), - 'process:running_time': Gauge(self.metric_name("process:running_time"), "Process running time", ('id', ) + self._labels), - 'process:exceptions': Gauge(self.metric_name("process:exceptions"), "Process exceptions", ('id', ) + self._labels), - 'process:requests': Gauge(self.metric_name("process:requests"), "Process requests", ('id', ) + self._labels), - 'process:delta_requests': Gauge(self.metric_name("process:delta_requests"), "Process delta_requests", ('id', ) + self._labels), - 'process:rss': Gauge(self.metric_name("process:rss"), "Process rss memory", ('id', ) + self._labels), - 'process:vsz': Gauge(self.metric_name("process:vzs"), "Process vsz address space", ('id', ) + self._labels), + "memory": Gauge(self.metric_name("uwsgi_memory_bytes"), "UWSGI memory usage in bytes", ("type",) + self._labels), + "processes": Gauge(self.metric_name("processes_total"), "Number of UWSGI processes", self._labels), + "worker_status": Gauge(self.metric_name("worker_status_totla"), "Current workers status", self._labels), + "total_requests": Gauge(self.metric_name("requests_total"), "Total processed request", self._labels), + "buffer_size": Gauge(self.metric_name("buffer_size_bytes"), "UWSGI buffer size in bytes", self._labels), + "started_on": Gauge(self.metric_name("started_on"), "UWSGI started on timestamp", self._labels), + "cores": Gauge(self.metric_name("cores"), "system cores", self._labels), + + + "process:respawn_count": Gauge(self.metric_name("process:respawn_count"), "Process respawn count", ("id", ) + self._labels), + "process:last_spawn": Gauge(self.metric_name("process:last_spawn"), "Process last spawn", ("id", ) + self._labels), + "process:signals": Gauge(self.metric_name("process:signals"), "Process signals total", ("id", ) + self._labels), + "process:avg_rt": Gauge(self.metric_name("process:avg_rt"), "Process average response time", ("id", ) + self._labels), + "process:tx": Gauge(self.metric_name("process:tx"), "Process transmitted data", ("id",) + self._labels), + + "process:status": Gauge(self.metric_name("process:status"), "Process status", ("id", "status") + self._labels), + "process:running_time": Gauge(self.metric_name("process:running_time"), "Process running time", ("id", ) + self._labels), + "process:exceptions": Gauge(self.metric_name("process:exceptions"), "Process exceptions", ("id", ) + self._labels), + "process:requests": Gauge(self.metric_name("process:requests"), "Process requests", ("id", ) + self._labels), + "process:delta_requests": Gauge(self.metric_name("process:delta_requests"), "Process delta_requests", ("id", ) + self._labels), + "process:rss": Gauge(self.metric_name("process:rss"), "Process rss memory", ("id", ) + self._labels), + "process:vsz": Gauge(self.metric_name("process:vzs"), "Process vsz address space", ("id", ) + self._labels), } def collect(self): - for name, value in [('processes', uwsgi.numproc), - ('total_requests', uwsgi.total_requests()), - ('buffer_size', uwsgi.buffer_size), - ('started_on', uwsgi.started_on), - ('cores', uwsgi.cores)]: + for name, value in [("processes", uwsgi.numproc), + ("total_requests", uwsgi.total_requests()), + ("buffer_size", uwsgi.buffer_size), + ("started_on", uwsgi.started_on), + ("cores", uwsgi.cores)]: yield self.get_sample(name, value) yield self.get_memory_samples() @@ -111,23 +114,23 @@ def get_workers_samples(self, workers): :param worker: worker stats """ - for name in ['requests', 'respawn_count', 'running_time', - 'exceptions', 'delta_requests', - 'rss', 'vsz', 'last_spawn', 'tx', 'avg_rt', 'signals']: + for name in ["requests", "respawn_count", "running_time", + "exceptions", "delta_requests", + "rss", "vsz", "last_spawn", "tx", "avg_rt", "signals"]: metric = self._collectors["process:" + name] for worker in workers: - labels = self._labels + (('id', worker['id']),) + labels = self._labels + (("id", worker["id"]),) metric.add_sample(labels, metric.build_sample(labels, - ( (TYPES.GAUGE, metric.name, '', self._labels + (('id', worker['id']),), worker[name]), ))) + ( (TYPES.GAUGE, metric.name, "", labels, worker[name]), ))) yield metric metric = self._collectors["process:status"] for worker in workers: - labels = self._labels + (('id', worker['id']), ('status', worker['status'])) + labels = self._labels + (("id", worker["id"]), ("status", worker["status"])) metric.add_sample(labels, metric.build_sample(labels, - ( (TYPES.GAUGE, metric.name, '', self._labels + (('id', worker['id']), ('status', worker['status'])), 1), ))) + ( (TYPES.GAUGE, metric.name, "", self._labels + (("id", worker["id"]), ("status", worker["status"])), 1), ))) yield metric @@ -138,23 +141,21 @@ def get_sample(self, name, value): :param value: """ metric = self._collectors[name] - return metric.build_samples([(self._labels, ( (TYPES.GAUGE, metric.name, '', self._labels, float(value)), ))]) + return metric.build_samples([(self._labels, ( (TYPES.GAUGE, metric.name, "", self._labels, float(value)), ))]) def get_memory_samples(self): """Get memory usage samples """ - metric = self._collectors['memory'] + metric = self._collectors["memory"] return metric.build_samples( - [(self._labels + (('type', 'rss'),), ( (TYPES.GAUGE, metric.name, '', self._labels + (('type', 'rss'),), uwsgi.mem()[0]), )), - (self._labels + (('type', 'vsz'),), ( (TYPES.GAUGE, metric.name, '', self._labels + (('type', 'vsz'),), uwsgi.mem()[1]), ))]) - + [(self._labels + (("type", "rss"),), ( (TYPES.GAUGE, metric.name, "", self._labels + (("type", "rss"),), uwsgi.mem()[0]), )), + (self._labels + (("type", "vsz"),), ( (TYPES.GAUGE, metric.name, "", self._labels + (("type", "vsz"),), uwsgi.mem()[1]), ))]) class UWSGIStorage(BaseStorage): - """A dict of doubles, backend by uwsgi sharedarea - """ + """A dict of doubles, backend by uwsgi sharedarea""" - SHAREDAREA_ID = int(os.environ.get('PROMETHEUS_UWSGI_SHAREDAREA', 0)) + SHAREDAREA_ID = int(os.environ.get("PROMETHEUS_UWSGI_SHAREDAREA", 0)) KEY_SIZE_SIZE = 4 KEY_VALUE_SIZE = 8 SIGN_SIZE = 10 @@ -162,7 +163,7 @@ class UWSGIStorage(BaseStorage): SIGN_POSITION = 4 AREA_SIZE_POSITION = 0 - def __init__(self, sharedarea_id=SHAREDAREA_ID): + def __init__(self, sharedarea_id=SHAREDAREA_ID, namespace="", stats=False, labels={}): self._sharedarea_id = sharedarea_id self._used = None # Changed every time then keys added @@ -170,10 +171,57 @@ def __init__(self, sharedarea_id=SHAREDAREA_ID): self._positions = {} self._rlocked = False self._wlocked = False - self._m = uwsgi.sharedarea_memoryview(self._sharedarea_id) self._keys_cache = {} + self._namespace = namespace + self._stats = stats + self._labels = tuple(sorted(labels.items(), key=lambda x: x[0])) + + self._m = uwsgi.sharedarea_memoryview(self._sharedarea_id) + self.init_memory() + self._collectors = self.declare_metrics() + + @property + def uid(self): + return "uwsgi-storage:{0}".format(self._namespace) + + @property + def text_export_header(self): + return "# {0} stats metrics".format(self.__class__.__name__) + + def metric_name(self, name): + """Make metric name with namespace + + :param name: + """ + return ":".join([self._namespace, name]) + + def declare_metrics(self): + return { + "memory_sync": Counter(self.metric_name("memory_read"), "UWSGI shared memory syncs", ("sharedarea", ) + self._labels), + "memory_size": Gauge(self.metric_name("memory_size"), "UWSGI shared memory size", ("sharedarea", ) + self._labels), + "num_keys": Gauge(self.metric_name("num_keys"), "UWSGI num_keys", ("sharedarea", ) + self._labels) + } + + def collect(self): + labels = self._labels + (("sharedarea", self._sharedarea_id), ) + # metric = self._collectors["memory_sync"] + # metric.add_sample(labels, metric.build_sample(labels, ( (TYPES.GAUGE, metric.name, "", labels, ) )) + + # yield metric + metric = self._collectors["memory_size"] + + metric.add_sample(labels, metric.build_sample(labels, ( (TYPES.GAUGE, metric.name, "", labels, self.get_area_size()), ))) + + yield metric + + metric = self._collectors["num_keys"] + metric.add_sample(labels, metric.build_sample(labels, ( (TYPES.GAUGE, metric.name, "", labels, len(self._positions)), ))) + + yield metric + + @property def m(self): return self._m @@ -216,14 +264,14 @@ def get_slice(self, start, size): def get_area_size(self): """Read area size from uwsgi """ - return struct.unpack(b'i', self.m[self.get_slice(self.AREA_SIZE_POSITION, self.AREA_SIZE_SIZE)])[0] + return struct.unpack(b"i", self.m[self.get_slice(self.AREA_SIZE_POSITION, self.AREA_SIZE_SIZE)])[0] def init_area_size(self): return self.update_area_size(self.AREA_SIZE_SIZE) def update_area_size(self, size): self._used = size - self.m[self.get_slice(self.AREA_SIZE_POSITION, self.AREA_SIZE_SIZE)] = struct.pack(b'i', size) + self.m[self.get_slice(self.AREA_SIZE_POSITION, self.AREA_SIZE_SIZE)] = struct.pack(b"i", size) return True def update_area_sign(self): @@ -262,7 +310,6 @@ def read_memory(self): self._sign = self.get_area_sign() self._positions.clear() - while pos < self._used + self.AREA_SIZE_POSITION: key_size, (key, key_value), positions = self.read_item(pos) @@ -298,7 +345,7 @@ def get_key_size(self, key): def get_binary_string(self, key, value): - item_template = '=i{0}sd'.format(len(key)).encode() + item_template = "=i{0}sd".format(len(key)).encode() return struct.pack(item_template, len(key), key, value) @@ -327,7 +374,7 @@ def read_key_string(self, position, size): :param size: int key size in bytes to read """ key_string_bytes = self.m[self.get_slice(position, size)] - return struct.unpack(b'{0}s'.format(size), key_string_bytes)[0] + return struct.unpack(b"{0}s".format(size), key_string_bytes)[0] def read_key_value(self, position): """Read float value of position @@ -335,7 +382,7 @@ def read_key_value(self, position): :param position: int offset for key value float """ key_value_bytes = self.m[self.get_slice(position, self.KEY_VALUE_SIZE)] - return struct.unpack(b'd', key_value_bytes)[0] + return struct.unpack(b"d", key_value_bytes)[0] def read_key_size(self, position): """Read key size from position @@ -343,14 +390,14 @@ def read_key_size(self, position): :param position: int offset for 4-byte key size """ key_size_bytes = self.m[self.get_slice(position, self.KEY_SIZE_SIZE)] - return struct.unpack(b'i', key_size_bytes)[0] + return struct.unpack(b"i", key_size_bytes)[0] def write_key_value(self, position, value): """Write float value to position :param position: int offset for 8-byte float value """ - self.m[self.get_slice(position, self.KEY_VALUE_SIZE)] = struct.pack(b'd', value) + self.m[self.get_slice(position, self.KEY_VALUE_SIZE)] = struct.pack(b"d", value) return value def read_item(self, position): @@ -367,7 +414,7 @@ def read_item(self, position): key = self.read_key_string(key_string_position, key_size) - key_value_position = key_string_position + key_size# + self.get_string_padding(key) + key_value_position = key_string_position + key_size # + self.get_string_padding(key) key_value = self.read_key_value(key_value_position) return (key_size, @@ -470,19 +517,73 @@ def unlock(self): self._wlocked, self._rlocked = False, False uwsgi.sharedarea_unlock(self._sharedarea_id) - def __len__(self): return len(self._positions) def clear(self): for x in xrange(self.AREA_SIZE_SIZE + self.AREA_SIZE_SIZE): - self.m[x] = '\x00' + self.m[x] = "\x00" self._positions.clear() - def get_items(self): self.validate_actuality() for key, position in self._positions.items(): yield self.unserialize_key(key), self.read_key_value(position[2]) + + def inc_items(self, items): + self.validate_actuality() + with self.lock(): + for key, value in items: + try: + positions, created = self.get_key_position(self.serialize_key(key), value) + if created: + continue + self.write_key_value(positions[2], self.read_key_value(positions[2]) + value) + except InvalidUWSGISharedareaPagesize: + logger.error("Invalid sharedarea pagesize {0} bytes".format(len(self._m))) + + def write_items(self, items): + self.validate_actuality() + with self.lock(): + for key, value in items: + try: + positions, created = self.get_key_position(self.serialize_key(key), value) + if created: + continue + self.write_key_value(positions[2], value) + except InvalidUWSGISharedareaPagesize: + logger.error("Invalid sharedarea pagesize {0} bytes".format(len(self._m))) + + +class UWSGIFlushStorage(LocalMemoryStorage): + """Storage wrapper for UWSGI storage that update couters inmemory and flush into uwsgi sharedarea + """ + SHAREDAREA_ID = int(os.environ.get("PROMETHEUS_UWSGI_SHAREDAREA", 0)) + + def __init__(self, sharedarea_id=UWSGIStorage.SHAREDAREA_ID): + self._uwsgi_storage = UWSGIStorage(sharedarea_id) + self._flush = 0 + self._get_items = 0 + self._clear = 0 + super(UWSGIFlushStorage, self).__init__() + + @property + def persistent_storage(self): + return self._uwsgi_storage + + def flush(self): + items = list(super(UWSGIFlushStorage, self).get_items()) + self._uwsgi_storage.inc_items(items) + super(UWSGIFlushStorage, self).clear() + + def get_items(self): + return self._uwsgi_storage.get_items() + + def __len__(self): + return super(UWSGIFlushStorage, self).__len__() + + def clear(self): + self._uwsgi_storage.clear() + super(UWSGIFlushStorage, self).clear() diff --git a/pyprometheus/metrics.py b/pyprometheus/metrics.py index 671b8c2..dd8022e 100644 --- a/pyprometheus/metrics.py +++ b/pyprometheus/metrics.py @@ -11,13 +11,10 @@ :github: http://github.com/Lispython/pyprometheus """ - - from pyprometheus.const import TYPES from pyprometheus.values import (MetricValue, GaugeValue, CounterValue, SummaryValue, HistogramValue) - class BaseMetric(object): value_class = MetricValue @@ -94,7 +91,7 @@ def text_export_header(self): # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary """ - return '\n'.join(["# HELP {name} {doc}", + return "\n".join(["# HELP {name} {doc}", "# TYPE {name} {metric_type}"]).format( name=self.name, doc=self.doc, @@ -103,7 +100,7 @@ def text_export_header(self): def build_samples(self, items): """Build samples from objects - [((2, 'metric_gauge_name', '', (('label1', 'value3'), ('label2', 'value4'))), 5.0)] + [((2, "metric_gauge_name", "", (("label1", "value3"), ("label2", "value4"))), 5.0)] """ for label_values, data in items: self.add_sample(label_values, self.build_sample(label_values, data)) @@ -132,19 +129,14 @@ def __getattr__(self, name): # return super(BaseMetric, self).__getattr__(name) - - - - class Gauge(BaseMetric): TYPE = "gauge" value_class = GaugeValue - PARENT_METHODS = set(('inc', 'dec', 'set', 'get', 'track_inprogress', - 'set_to_current_time', 'time', 'value')) - + PARENT_METHODS = set(("inc", "dec", "set", "get", "track_inprogress", + "set_to_current_time", "time", "value")) class Counter(BaseMetric): @@ -152,7 +144,7 @@ class Counter(BaseMetric): value_class = CounterValue - PARENT_METHODS = set(('inc', 'get', 'value')) + PARENT_METHODS = set(("inc", "get", "value")) class Summary(BaseMetric): @@ -162,9 +154,9 @@ class Summary(BaseMetric): value_class = SummaryValue - NOT_ALLOWED_LABELS = set('quantile') + NOT_ALLOWED_LABELS = set("quantile") - PARENT_METHODS = set(('observe', 'value', 'time')) + PARENT_METHODS = set(("observe", "value", "time")) def __init__(self, name, doc, labels=[], quantiles=False, registry=None): self._quantiles = list(sorted(quantiles)) if quantiles else [] @@ -176,21 +168,21 @@ def quantiles(self): def build_sample(self, label_values, data): subtypes = { - 'sum': None, - 'count': None, - 'quantiles': [] if isinstance(self._quantiles, (list, tuple)) else None + "sum": None, + "count": None, + "quantiles": [] if isinstance(self._quantiles, (list, tuple)) else None } for meta, value in data: value_class = self.value_class.SUBTYPES[meta[2]] if meta[0] == TYPES.SUMMARY_SUM: - subtypes['sum'] = value_class(self, label_values=label_values, value=value) + subtypes["sum"] = value_class(self, label_values=label_values, value=value) elif meta[0] == TYPES.SUMMARY_COUNTER: - subtypes['count'] = value_class(self, label_values=label_values, value=value) + subtypes["count"] = value_class(self, label_values=label_values, value=value) elif meta[0] == TYPES.SUMMARY_QUANTILE: - quantile = dict(meta[3])['quantile'] - subtypes['quantiles'].append( + quantile = dict(meta[3])["quantile"] + subtypes["quantiles"].append( value_class(self, label_values=label_values, quantile=quantile, value=value)) return self.value_class(self, label_values=label_values, value=subtypes) @@ -200,13 +192,13 @@ class Histogram(BaseMetric): TYPE = "histogram" DEFAULT_BUCKETS = (0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, - 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, float('inf')) + 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, float("inf")) - NOT_ALLOWED_LABELS = set('le') + NOT_ALLOWED_LABELS = set("le") value_class = HistogramValue - PARENT_METHODS = set(('observe', 'value', 'time')) + PARENT_METHODS = set(("observe", "value", "time")) def __init__(self, name, doc, labels=[], buckets=DEFAULT_BUCKETS, registry=None): self._buckets = list(sorted(buckets)) if buckets else [] @@ -219,21 +211,21 @@ def buckets(self): def build_sample(self, label_values, data): subtypes = { - 'sum': None, - 'count': None, - 'buckets': [] if isinstance(self._buckets, (list, tuple)) else None + "sum": None, + "count": None, + "buckets": [] if isinstance(self._buckets, (list, tuple)) else None } for meta, value in data: value_class = self.value_class.SUBTYPES[meta[2]] if meta[0] == TYPES.HISTOGRAM_SUM: - subtypes['sum'] = value_class(self, label_values=label_values, value=value) + subtypes["sum"] = value_class(self, label_values=label_values, value=value) elif meta[0] == TYPES.HISTOGRAM_COUNTER: - subtypes['count'] = value_class(self, label_values=label_values, value=value) + subtypes["count"] = value_class(self, label_values=label_values, value=value) elif meta[0] == TYPES.HISTOGRAM_BUCKET: - bucket = dict(meta[3])['bucket'] - subtypes['buckets'].append( + bucket = dict(meta[3])["bucket"] + subtypes["buckets"].append( value_class(self, label_values=label_values, bucket=bucket, value=value)) return self.value_class(self, label_values=label_values, value=subtypes) diff --git a/pyprometheus/storage.py b/pyprometheus/storage.py index c0ad11a..3ac5b1f 100644 --- a/pyprometheus/storage.py +++ b/pyprometheus/storage.py @@ -83,11 +83,9 @@ def label_group(self, value): class LocalMemoryStorage(BaseStorage): def __init__(self): - self._storage = defaultdict(lambda: defaultdict(lambda: defaultdict(float))) self._storage = defaultdict(float) self._lock = Lock() - def inc_value(self, key, value): with self._lock: self._storage[key] += value diff --git a/pyprometheus/values.py b/pyprometheus/values.py index 7eacee1..9b8f3dc 100644 --- a/pyprometheus/values.py +++ b/pyprometheus/values.py @@ -11,15 +11,14 @@ :github: http://github.com/Lispython/pyprometheus """ - import time from pyprometheus.const import TYPES from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager + class MetricValue(object): - """Base metric collector - """ + """Base metric collector""" TYPE = TYPES.BASE POSTFIX = "" @@ -49,7 +48,7 @@ def set_value(self, value): def __repr__(self): return u"<{0}[{1}]: {2} -> {3}>".format( self.__class__.__name__, self._metric.name, - self._labels, self.__repr_value__()) + str(self._labels).replace("'", "\""), self.__repr_value__()) def validate_labels(self, label_names, labels): if len(labels) != len(label_names): @@ -71,7 +70,7 @@ def inc(self, amount=1): def get(self): # Do not lookup storage if value 0 - if self._value != None: + if self._value is not None: return self._value return self._metric._storage.get_value(self.key) @@ -83,25 +82,25 @@ def value(self): def export_str(self): return "{name}{postfix}{{{labels}}} {value} {timestamp}".format( name=self._metric.name, postfix=self.POSTFIX, - labels=self.export_labels, timestamp=int(time.time()*1000), value=float(self.value)) + labels=self.export_labels, timestamp=int(time.time() * 1000), value=float(self.value)) @property def export_labels(self): - return ', '.join(['{0}="{1}"'.format(self.format_export_label(name), self.format_export_value(value)) + return ", ".join(["{0}=\"{1}\"".format(self.format_export_label(name), self.format_export_value(value)) for name, value in self._labels]) def format_export_label(self, label): - if label == 'bucket': - return 'le' + if label == "bucket": + return "le" return label def format_export_value(self, value): - if value == float('inf'): + if value == float("inf"): return "+Inf" - elif value == float('-inf'): + elif value == float("-inf"): return "-Inf" # elif math.isnan(value): - # return 'NaN' + # return "NaN" return value @@ -156,12 +155,12 @@ class SummaryQuantilyValue(GaugeValue): def __init__(self, metric, label_values={}, quantile=0, value=None): label_values = dict(label_values).copy() - label_values['quantile'] = quantile + label_values["quantile"] = quantile self._quantile = quantile super(SummaryQuantilyValue, self).__init__(metric, label_values, value) def validate_labels(self, label_names, labels): - if len(labels) != len(label_names)+1: + if len(labels) != len(label_names) + 1: raise RuntimeError(u"Invalid label values size: {0} != {1}".format( len(label_names), len(labels) + 1)) @@ -174,43 +173,42 @@ def key(self): # return (self.TYPE, self._metric.name, self._metric.name, self._labels) - class SummaryValue(MetricValue): - """ + u""" summary with a base metric name of exposes multiple time series during a scrape: streaming φ-quantiles (0 ≤ φ ≤ 1) of observed events, exposed as {quantile="<φ>"} the total sum of all observed values, exposed as _sum the count of events that have been observed, exposed as _count """ + TYPE = TYPES.SUMMARY SUBTYPES = { - '_sum': SummarySumValue, - '_count': SummaryCountValue, - '_quantile': SummaryQuantilyValue + "_sum": SummarySumValue, + "_count": SummaryCountValue, + "_quantile": SummaryQuantilyValue } def __init__(self, metric, label_values={}, value={}): super(SummaryValue, self).__init__(metric, label_values=label_values) - self._sum = value.pop('sum', None) or SummarySumValue(self._metric, label_values=self._label_values) - self._count = value.pop('count', None) or SummaryCountValue(self._metric, label_values=self._label_values) + self._sum = value.pop("sum", None) or SummarySumValue(self._metric, label_values=self._label_values) + self._count = value.pop("count", None) or SummaryCountValue(self._metric, label_values=self._label_values) if isinstance(self._metric.quantiles, (list, tuple)): - self._quantiles = value.pop('quantiles', []) or [SummaryQuantilyValue(self._metric, label_values=self._label_values, quantile=quantile) - for quantile in self._metric.quantiles] + self._quantiles = value.pop("quantiles", []) or [SummaryQuantilyValue(self._metric, label_values=self._label_values, quantile=quantile) + for quantile in self._metric.quantiles] else: self._quantiles = [] - def __repr_value__(self): return u"sum={sum} / count={count} = {value} [{quantiles}]".format( **{ - 'sum': self._sum.value, - 'count': self._count.value, - 'value': (self._sum.value / self._count.value) if self._count.value != 0 else '-', - 'quantiles': ', '.join([x.__repr_value__() for x in self._quantiles]) if self._quantiles else 'empty' + "sum": self._sum.value, + "count": self._count.value, + "value": (self._sum.value / self._count.value) if self._count.value != 0 else "-", + "quantiles": ", ".join([x.__repr_value__() for x in self._quantiles]) if self._quantiles else "empty" } ) @@ -222,25 +220,17 @@ def observe(self, amount): # for quantile, value in self._quantiles: # pass - @property def value(self): return { - 'sum': self._sum, - 'count': self._count, - 'quantiles': self._quantiles - } - - @property - def export_str(self): - return "" - + "sum": self._sum, + "count": self._count, + "quantiles": self._quantiles} @property def export_str(self): return "\n".join([self._sum.export_str, self._count.export_str] + [quantile.export_str for quantile in self._quantiles]) - def time(self): return TimerManager(self) @@ -265,14 +255,13 @@ class HistogramBucketValue(SummaryCountValue): def __init__(self, metric, label_values={}, bucket=None, value=None): label_values = dict(label_values).copy() - label_values['bucket'] = bucket + label_values["bucket"] = bucket self._bucket_threshold = bucket super(HistogramBucketValue, self).__init__(metric, label_values, value) def __repr_value__(self): return u"{0} -> {1}".format(self._bucket_threshold, self._value) - @property def bucket_threshold(self): return self._bucket_threshold @@ -287,30 +276,29 @@ class HistogramValue(MetricValue): TYPE = TYPES.HISTOGRAM SUBTYPES = { - '_sum': HistogramSumValue, - '_count': HistogramCountValue, - '_bucket': HistogramBucketValue + "_sum": HistogramSumValue, + "_count": HistogramCountValue, + "_bucket": HistogramBucketValue } def __init__(self, metric, label_values={}, value={}): self._buckets = [] super(HistogramValue, self).__init__(metric, label_values=label_values) - self._sum = value.pop('sum', None) or HistogramSumValue(self._metric, label_values=self._label_values) - self._count = value.pop('count', None) or HistogramCountValue(self._metric, label_values=self._label_values) + self._sum = value.pop("sum", None) or HistogramSumValue(self._metric, label_values=self._label_values) + self._count = value.pop("count", None) or HistogramCountValue(self._metric, label_values=self._label_values) - - self._buckets = (value.pop('buckets', []) or [HistogramBucketValue(self._metric, label_values=self._label_values, bucket=bucket) + self._buckets = (value.pop("buckets", []) or [HistogramBucketValue(self._metric, label_values=self._label_values, bucket=bucket) for bucket in sorted(self._metric.buckets)]) def __repr_value__(self): return u"sum={sum} / count={count} = {value} [{buckets}]".format( **{ - 'sum': self._sum.__repr_value__(), - 'count': self._count.__repr_value__(), - 'value': (self._sum.value / self._count.value) if self._count.value != 0 else '-', - #'buckets': '' - 'buckets': ', '.join([x.__repr_value__() for x in self._buckets]) if self._buckets else 'empty' + "sum": self._sum.__repr_value__(), + "count": self._count.__repr_value__(), + "value": (self._sum.value / self._count.value) if self._count.value != 0 else "-", + # "buckets": "" + "buckets": ", ".join([x.__repr_value__() for x in self._buckets]) if self._buckets else "empty" } ) @@ -324,15 +312,14 @@ def observe(self, amount): @property def value(self): return { - 'sum': self._sum, - 'count': self._count, - 'buckets': self._buckets + "sum": self._sum, + "count": self._count, + "buckets": self._buckets } @property def export_str(self): return "\n".join([self._sum.export_str, self._count.export_str] + [bucket.export_str for bucket in self._buckets]) - def time(self): return TimerManager(self) diff --git a/setup.cfg b/setup.cfg index 2a62523..5250f09 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,7 @@ [tool:pytest] python_files=test_*.py testpaths = tests -addopts=-s -p no:doctest --flake8 --cov=./ +addopts=-s -p no:doctest --flake8 --cov=./ norecursedirs=pyprometheus build bin dist docs .git flake8-max-line-length = 100 flake8-ignore = @@ -10,9 +10,11 @@ flake8-ignore = */migrations/* ALL [flake8] -ignore = E501 F403 F401 +ignore = E501,F403,F401,D100,D101,D102,D103,I004,I001,I003,Q000,D205,D400,D105 max-line-length = 100 exclude = .tox,.git,docs,.ropeproject +inline-quotes = double + [bdist_wheel] universal = 1 diff --git a/tests/test_metrics.py b/tests/test_metrics.py index e4dda42..89fbb41 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -14,7 +14,7 @@ def test_base_metric(storage_cls): storage = storage_cls() registry = BaseRegistry(storage=storage) metric_name = "test_base_metric" - metric = BaseMetric(metric_name, "test_base_metric doc", ('label1', 'label2'), registry=registry) + metric = BaseMetric(metric_name, "test_base_metric doc", ("label1", "label2"), registry=registry) assert registry.is_registered(metric) assert repr(metric) == "" @@ -29,7 +29,7 @@ def test_base_metric(storage_cls): assert str(exc_info.value) == u"Collector {0} already registered.".format(metric.uid) - labels = metric.labels({'label1': 'label1_value', 'label2': 'label2_value'}) + labels = metric.labels({"label1": "label1_value", "label2": "label2_value"}) assert isinstance(labels, MetricValue) @@ -37,7 +37,7 @@ def test_base_metric(storage_cls): assert labels.get() == 1 - assert metric.text_export_header == '\n'.join(["# HELP test_base_metric test_base_metric doc", + assert metric.text_export_header == "\n".join(["# HELP test_base_metric test_base_metric doc", "# TYPE test_base_metric untyped"]) @@ -47,7 +47,7 @@ def test_counter_metric(storage_cls): registry = BaseRegistry(storage=storage) metric_name = "counter_metric_name" - metric = Counter(metric_name, "counter_metric_name doc", ('label1', 'label2'), registry=registry) + metric = Counter(metric_name, "counter_metric_name doc", ("label1", "label2"), registry=registry) with pytest.raises(RuntimeError) as exc_info: metric.inc() @@ -67,7 +67,7 @@ def test_counter_metric(storage_cls): assert str(exc_info.value) == u"Collector {0} already registered.".format(metric.uid) - labels = metric.labels({'label1': 'label1_value', 'label2': 'label2_value'}) + labels = metric.labels({"label1": "label1_value", "label2": "label2_value"}) assert labels.get() == 0 @@ -77,12 +77,12 @@ def test_counter_metric(storage_cls): assert repr(labels) == str(labels) - assert str(labels) == " 10.0>" + assert str(labels) == " 10.0>" assert labels.key == (labels.TYPE, metric_name, labels.POSTFIX, - (('label1', 'label1_value'), ('label2', 'label2_value'))) + (("label1", "label1_value"), ("label2", "label2_value"))) - assert metric.text_export_header == '\n'.join(["# HELP counter_metric_name counter_metric_name doc", + assert metric.text_export_header == "\n".join(["# HELP counter_metric_name counter_metric_name doc", "# TYPE counter_metric_name counter"]) @@ -91,7 +91,7 @@ def test_gauge_metric(): registry = BaseRegistry(storage=storage) metric_name = "gauge_metric_name" - metric = Gauge(metric_name, metric_name + " doc", ('label1', 'label2'), registry=registry) + metric = Gauge(metric_name, metric_name + " doc", ("label1", "label2"), registry=registry) assert registry.is_registered(metric) with pytest.raises(RuntimeError) as exc_info: @@ -110,7 +110,7 @@ def test_gauge_metric(): assert str(exc_info.value) == u"Collector {0} already registered.".format(metric.uid) - labels = metric.labels({'label1': 'label1_value', 'label2': 'label2_value'}) + labels = metric.labels({"label1": "label1_value", "label2": "label2_value"}) assert labels.get() == 0 @@ -119,21 +119,21 @@ def test_gauge_metric(): assert labels.get() == 10 assert repr(labels) == str(labels) - assert str(labels) == " 10.0>" + assert str(labels) == " 10.0>" assert labels.key == (labels.TYPE, metric_name, labels.POSTFIX, - (('label1', 'label1_value'), ('label2', 'label2_value'))) + (("label1", "label1_value"), ("label2", "label2_value"))) - assert metric.text_export_header == '\n'.join(["# HELP gauge_metric_name gauge_metric_name doc", + assert metric.text_export_header == "\n".join(["# HELP gauge_metric_name gauge_metric_name doc", "# TYPE gauge_metric_name gauge"]) - with metric.labels({'label1': '1', 'label2': '1'}).time(): + with metric.labels({"label1": "1", "label2": "1"}).time(): time.sleep(1) - assert metric.labels(label1='1', label2='1').value > 1 + assert metric.labels(label1="1", label2="1").value > 1 - labels = metric.labels({'label1': 'inprogress', 'label2': 'inprogress'}) + labels = metric.labels({"label1": "inprogress", "label2": "inprogress"}) with labels.track_in_progress(): assert labels.value == 1 @@ -142,7 +142,7 @@ def test_gauge_metric(): assert labels.set_to_current_time() == labels.value - labels = metric.labels({'label1': 'time2', 'label2': 'time2'}) + labels = metric.labels({"label1": "time2", "label2": "time2"}) @labels.time() def f(*args, **kwargs): @@ -158,7 +158,7 @@ def test_summary(storage_cls): registry = BaseRegistry(storage=storage) metric_name = "summary_metric_name" - metric = Summary(metric_name, "summary_metric_name doc", ('label1', 'label2'), registry=registry) + metric = Summary(metric_name, "summary_metric_name doc", ("label1", "label2"), registry=registry) assert registry.is_registered(metric) @@ -178,42 +178,42 @@ def test_summary(storage_cls): assert str(exc_info.value) == u"Collector {0} already registered.".format(metric.uid) - labels = metric.labels({'label1': 'label1_value', 'label2': 'label2_value'}) + labels = metric.labels({"label1": "label1_value", "label2": "label2_value"}) labels.observe(10) value = labels.value - assert value['sum'].value == 10 - assert value['count'].value == 1 + assert value["sum"].value == 10 + assert value["count"].value == 1 labels.observe(14) - assert value['sum'].value == 24 - assert value['count'].value == 2 + assert value["sum"].value == 24 + assert value["count"].value == 2 - assert value['quantiles'] == [] + assert value["quantiles"] == [] - assert str(value['sum']) == " 24.0>" - assert str(value['count']) == " 2.0>" + assert str(value["sum"]) == " 24.0>" + assert str(value["count"]) == " 2.0>" - assert value['sum'].key == (value['sum'].TYPE, 'summary_metric_name', value['sum'].POSTFIX, (('label1', 'label1_value'), ('label2', 'label2_value'))) - assert value['count'].key == (value['count'].TYPE, 'summary_metric_name', value['count'].POSTFIX, (('label1', 'label1_value'), ('label2', 'label2_value'))) + assert value["sum"].key == (value["sum"].TYPE, "summary_metric_name", value["sum"].POSTFIX, (("label1", "label1_value"), ("label2", "label2_value"))) + assert value["count"].key == (value["count"].TYPE, "summary_metric_name", value["count"].POSTFIX, (("label1", "label1_value"), ("label2", "label2_value"))) - assert metric.text_export_header == '\n'.join(["# HELP summary_metric_name summary_metric_name doc", + assert metric.text_export_header == "\n".join(["# HELP summary_metric_name summary_metric_name doc", "# TYPE summary_metric_name summary"]) for x in range(3): - with metric.labels({'label1': '1', 'label2': '1'}).time(): + with metric.labels({"label1": "1", "label2": "1"}).time(): time.sleep(1) - value = metric.labels(label1='1', label2='1').value + value = metric.labels(label1="1", label2="1").value - assert value['sum'].value > 3 - assert value['count'].value == 3 + assert value["sum"].value > 3 + assert value["count"].value == 3 - labels = metric.labels({'label1': 'time2', 'label2': 'time2'}) + labels = metric.labels({"label1": "time2", "label2": "time2"}) @labels.time() def f(*args, **kwargs): @@ -223,8 +223,8 @@ def f(*args, **kwargs): f() value = labels.value - assert value['sum'].value > 3 - assert value['count'].value == 3 + assert value["sum"].value > 3 + assert value["count"].value == 3 @pytest.mark.parametrize("storage_cls", [LocalMemoryStorage, UWSGIStorage]) @@ -233,7 +233,7 @@ def test_histogram(storage_cls): registry = BaseRegistry(storage=storage) metric_name = "histogram_metric_name" - metric = Histogram(metric_name, "histogram_metric_name doc", ('label1', 'label2'), registry=registry) + metric = Histogram(metric_name, "histogram_metric_name doc", ("label1", "label2"), registry=registry) with pytest.raises(RuntimeError) as exc_info: metric.observe(10) @@ -251,45 +251,45 @@ def test_histogram(storage_cls): assert str(exc_info.value) == u"Collector {0} already registered.".format(metric.uid) - labels = metric.labels({'label1': 'label1_value', 'label2': 'label2_value'}) + labels = metric.labels({"label1": "label1_value", "label2": "label2_value"}) labels.observe(2.4) value = labels.value - assert value['sum'].value == 2.4 - assert value['count'].value == 1 + assert value["sum"].value == 2.4 + assert value["count"].value == 1 - assert str(value['sum']) == " 2.4>" - assert str(value['count']) == " 1.0>" + assert str(value["sum"]) == " 2.4>" + assert str(value["count"]) == " 1.0>" labels.observe(0.06) - assert str(value['sum']) == " 2.46>" - assert str(value['count']) == " 2.0>" + assert str(value["sum"]) == " 2.46>" + assert str(value["count"]) == " 2.0>" - buckets = dict([(x.bucket_threshold, x) for x in value['buckets']]) + buckets = {x.bucket_threshold: x for x in value["buckets"]} assert buckets[0.025].value == 0 assert buckets[0.075].value == 1 assert buckets[2.5].value == 2 - assert buckets[float('inf')].value == 2 + assert buckets[float("inf")].value == 2 - assert value['sum'].key == (value['sum'].TYPE, 'histogram_metric_name', value['sum'].POSTFIX, (('label1', 'label1_value'), ('label2', 'label2_value'))) - assert value['count'].key == (value['count'].TYPE, 'histogram_metric_name', value['count'].POSTFIX, (('label1', 'label1_value'), ('label2', 'label2_value'))) + assert value["sum"].key == (value["sum"].TYPE, "histogram_metric_name", value["sum"].POSTFIX, (("label1", "label1_value"), ("label2", "label2_value"))) + assert value["count"].key == (value["count"].TYPE, "histogram_metric_name", value["count"].POSTFIX, (("label1", "label1_value"), ("label2", "label2_value"))) - assert metric.text_export_header == '\n'.join(["# HELP histogram_metric_name histogram_metric_name doc", + assert metric.text_export_header == "\n".join(["# HELP histogram_metric_name histogram_metric_name doc", "# TYPE histogram_metric_name histogram"]) for x in range(3): - with metric.labels({'label1': '1', 'label2': '1'}).time(): + with metric.labels({"label1": "1", "label2": "1"}).time(): time.sleep(1) - value = metric.labels(label1='1', label2='1').value + value = metric.labels(label1="1", label2="1").value - assert value['sum'].value > 3 - assert value['count'].value == 3 + assert value["sum"].value > 3 + assert value["count"].value == 3 - labels = metric.labels({'label1': 'time2', 'label2': 'time2'}) + labels = metric.labels({"label1": "time2", "label2": "time2"}) @labels.time() def f(*args, **kwargs): @@ -299,8 +299,8 @@ def f(*args, **kwargs): f() value = labels.value - assert value['sum'].value > 3 - assert value['count'].value == 3 + assert value["sum"].value > 3 + assert value["count"].value == 3 @pytest.mark.parametrize("storage_cls", [LocalMemoryStorage, UWSGIStorage]) @@ -344,8 +344,8 @@ def test_metric_methods(storage_cls): time.sleep(1) - assert metric.value['sum'].value > 3 - assert metric.value['count'].value == 3 + assert metric.value["sum"].value > 3 + assert metric.value["count"].value == 3 metric = Histogram("histogram_metric_name", "histogram_metric_name doc", registry=registry) @@ -354,5 +354,5 @@ def test_metric_methods(storage_cls): time.sleep(1) - assert metric.value['sum'].value > 3 - assert metric.value['count'].value == 3 + assert metric.value["sum"].value > 3 + assert metric.value["count"].value == 3 diff --git a/tests/test_registry.py b/tests/test_registry.py index f5b6aaf..e975f8e 100644 --- a/tests/test_registry.py +++ b/tests/test_registry.py @@ -53,7 +53,7 @@ def test_base_registry(storage_cls, measure_time): name_template = "metric_{0}_name" doc_template = "doc_{0}" metrics = {} - labels = ('label1', 'label2') + labels = ("label1", "label2") labelnames = ("value1", "value2") for metric_class in [ @@ -70,7 +70,7 @@ def test_base_registry(storage_cls, measure_time): name_template.format(Histogram.TYPE), doc_template.format(Histogram.TYPE), labels, - buckets=(0.005, 0.01, 7.5, float('inf')), + buckets=(0.005, 0.01, 7.5, float("inf")), registry=registry ) @@ -93,18 +93,18 @@ def test_base_registry(storage_cls, measure_time): labels_dict = dict(zip(labels, labelnames)) - metrics['gauge'].labels(**labels_dict).inc(5) - metrics['counter'].labels(**labels_dict).inc(7) - metrics['summary'].labels(**labels_dict).observe(4) - metrics['histogram'].labels(**labels_dict).observe(6) + metrics["gauge"].labels(**labels_dict).inc(5) + metrics["counter"].labels(**labels_dict).inc(7) + metrics["summary"].labels(**labels_dict).observe(4) + metrics["histogram"].labels(**labels_dict).observe(6) labelnames2 = ("value3", "value4") labels_dict2 = dict(zip(labels, labelnames2)) - metrics['gauge'].labels(**labels_dict2).inc(5) - metrics['counter'].labels(**labels_dict2).inc(7) - metrics['summary'].labels(**labels_dict2).observe(4) - metrics['histogram'].labels(**labels_dict2).observe(6) + metrics["gauge"].labels(**labels_dict2).inc(5) + metrics["counter"].labels(**labels_dict2).inc(7) + metrics["summary"].labels(**labels_dict2).observe(4) + metrics["histogram"].labels(**labels_dict2).observe(6) assert len(list(registry.get_samples())) == 5 @@ -120,12 +120,12 @@ def test_base_registry(storage_cls, measure_time): with measure_time("registry to text"): for test1, test2 in zip(registry_to_text(registry).split("\n")[4:], lines[4:]): - if test1.startswith('#'): + if test1.startswith("#"): assert test1 == test2 else: assert test1.split()[:-1] == test2.split()[:-1] - metrics_count = map(lambda x: x.split(' ')[2], - filter(lambda x: x.startswith('# HELP'), [x for x in registry_to_text(registry).split('\n')])) + metrics_count = map(lambda x: x.split(" ")[2], + filter(lambda x: x.startswith("# HELP"), [x for x in registry_to_text(registry).split("\n")])) assert len(metrics_count) == len(set(metrics_count)) diff --git a/tests/test_storage.py b/tests/test_storage.py index b78d3cd..02dce3a 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -10,26 +10,26 @@ xrange = range DATA = ( - ((2, 'metric_gauge_name', '', (('label1', 'value1'), ('label2', 'value2'))), 5), - ((3, 'metric_counter_name', '', (('label1', 'value1'), ('label2', 'value2'))), 7), - ((5, 'metric_summary_name', '_sum', (('label1', 'value1'), ('label2', 'value2'))), 4), - ((7, 'metric_summary_name', '_count', (('label1', 'value1'), ('label2', 'value2'))), 1), - ((11, 'metric_histogram_name', '_sum', (('label1', 'value1'), ('label2', 'value2'))), 6), - ((12, 'metric_histogram_name', '_count', (('label1', 'value1'), ('label2', 'value2'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 0.005), ('label1', 'value1'), ('label2', 'value2'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 0.01), ('label1', 'value1'), ('label2', 'value2'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 7.5), ('label1', 'value1'), ('label2', 'value2'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', float('inf')), ('label1', 'value1'), ('label2', 'value2'))), 1), - ((2, 'metric_gauge_name', '', (('label1', 'value3'), ('label2', 'value4'))), 5), - ((3, 'metric_counter_name', '', (('label1', 'value3'), ('label2', 'value4'))), 7), - ((5, 'metric_summary_name', '_sum', (('label1', 'value3'), ('label2', 'value4'))), 4), - ((7, 'metric_summary_name', '_count', (('label1', 'value3'), ('label2', 'value4'))), 1), - ((11, 'metric_histogram_name', '_sum', (('label1', 'value3'), ('label2', 'value4'))), 6), - ((12, 'metric_histogram_name', '_count', (('label1', 'value3'), ('label2', 'value4'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 0.005), ('label1', 'value3'), ('label2', 'value4'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 0.01), ('label1', 'value3'), ('label2', 'value4'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 7.5), ('label1', 'value3'), ('label2', 'value4'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', float('inf')), ('label1', 'value3'), ('label2', 'value4'))), 1)) + ((2, "metric_gauge_name", "", (("label1", "value1"), ("label2", "value2"))), 5), + ((3, "metric_counter_name", "", (("label1", "value1"), ("label2", "value2"))), 7), + ((5, "metric_summary_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 4), + ((7, "metric_summary_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1), + ((11, "metric_histogram_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 6), + ((12, "metric_histogram_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", 0.005), ("label1", "value1"), ("label2", "value2"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 0.01), ("label1", "value1"), ("label2", "value2"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 7.5), ("label1", "value1"), ("label2", "value2"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", float("inf")), ("label1", "value1"), ("label2", "value2"))), 1), + ((2, "metric_gauge_name", "", (("label1", "value3"), ("label2", "value4"))), 5), + ((3, "metric_counter_name", "", (("label1", "value3"), ("label2", "value4"))), 7), + ((5, "metric_summary_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 4), + ((7, "metric_summary_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1), + ((11, "metric_histogram_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 6), + ((12, "metric_histogram_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", 0.005), ("label1", "value3"), ("label2", "value4"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 0.01), ("label1", "value3"), ("label2", "value4"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 7.5), ("label1", "value3"), ("label2", "value4"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", float("inf")), ("label1", "value3"), ("label2", "value4"))), 1)) def test_base_storage(): @@ -44,16 +44,16 @@ def test_local_memory_storage(): assert len(storage) == 0 key1 = (1, - 'metric_name1', - '', - (('key1', 'value1'), - ('key2', 'value2'))) + "metric_name1", + "", + (("key1", "value1"), + ("key2", "value2"))) key2 = (1, - 'metric_name2', - '', - (('key1', 'value1'), - ('key2', 'value2'))) + "metric_name2", + "", + (("key1", "value1"), + ("key2", "value2"))) storage.inc_value(key1, 1) assert storage.get_value(key1) == 1.0 @@ -80,18 +80,18 @@ def test_local_memory_storage(): for name, labels in items: - if name == 'metric_counter_name': + if name == "metric_counter_name": for label, label_data in labels: assert len(label_data) == 1 - if name == 'metric_gauge_name': + if name == "metric_gauge_name": for label, label_data in labels: assert len(label_data) == 1 - if name == 'metric_histogram_name': + if name == "metric_histogram_name": for label, label_data in labels: assert len(label_data) == 6 - if name == 'metric_summary_name': + if name == "metric_summary_name": for label, label_data in labels: assert len(label_data) == 2 @@ -100,10 +100,10 @@ def test_local_memory_storage(): assert len(items) == 4 -def test_local_storage_threading(measure_time): +def test_local_storage_threading(measure_time, iterations, num_workers): storage = LocalMemoryStorage() - ITERATIONS = 500 + ITERATIONS = iterations with measure_time("threading writes") as mt: def f1(): @@ -122,7 +122,7 @@ def f3(): storage.inc_value(x[0], x[1]) workers = [] - for _ in xrange(10): + for _ in xrange(num_workers): func = random.choice([f1, f2, f3]) t = threading.Thread(target=func) diff --git a/tests/test_uwsgi_collector.py b/tests/test_uwsgi_collector.py index 1b6b2bb..3cff46a 100644 --- a/tests/test_uwsgi_collector.py +++ b/tests/test_uwsgi_collector.py @@ -5,7 +5,7 @@ from multiprocessing import Process import uwsgi -from pyprometheus.contrib.uwsgi_features import UWSGICollector, UWSGIStorage +from pyprometheus.contrib.uwsgi_features import UWSGICollector, UWSGIStorage, UWSGIFlushStorage from pyprometheus.registry import BaseRegistry from pyprometheus.utils.exposition import registry_to_text try: @@ -20,46 +20,46 @@ def test_uwsgi_collector(): registry.register(uwsgi_collector) - collectors = dict([(x.name, x) for x in registry.collect()]) + collectors = {x.name: x for x in registry.collect()} - metrics_count = sorted(map(lambda x: x.split(' ')[2], - filter(lambda x: x.startswith('# HELP'), [x for x in registry_to_text(registry).split('\n')]))) + metrics_count = sorted(map(lambda x: x.split(" ")[2], + filter(lambda x: x.startswith("# HELP"), [x for x in registry_to_text(registry).split("\n")]))) assert len(metrics_count) == len(set(metrics_count)) - assert len(registry_to_text(registry).split('\n')) == 60 + assert len(registry_to_text(registry).split("\n")) == 60 - assert collectors['uwsgi_namespace:buffer_size_bytes'].get_samples()[0].value == uwsgi.buffer_size - assert collectors['uwsgi_namespace:processes_total'].get_samples()[0].value == uwsgi.numproc - assert collectors['uwsgi_namespace:requests_total'].get_samples()[0].value == uwsgi.total_requests() + assert collectors["uwsgi_namespace:buffer_size_bytes"].get_samples()[0].value == uwsgi.buffer_size + assert collectors["uwsgi_namespace:processes_total"].get_samples()[0].value == uwsgi.numproc + assert collectors["uwsgi_namespace:requests_total"].get_samples()[0].value == uwsgi.total_requests() - for name in ['requests', 'respawn_count', 'running_time', 'exceptions', 'delta_requests']: - assert collectors['uwsgi_namespace:process:{0}'.format(name)].get_samples()[0].value == uwsgi.workers()[0][name] + for name in ["requests", "respawn_count", "running_time", "exceptions", "delta_requests"]: + assert collectors["uwsgi_namespace:process:{0}".format(name)].get_samples()[0].value == uwsgi.workers()[0][name] assert uwsgi_collector.metric_name("test") == "uwsgi_namespace:test" DATA = ( - ((2, 'metric_gauge_name', '', (('label1', 'value1'), ('label2', 'value2'))), 5), - ((3, 'metric_counter_name', '', (('label1', 'value1'), ('label2', 'value2'))), 7), - ((5, 'metric_summary_name', '_sum', (('label1', 'value1'), ('label2', 'value2'))), 4), - ((7, 'metric_summary_name', '_count', (('label1', 'value1'), ('label2', 'value2'))), 1), - ((11, 'metric_histogram_name', '_sum', (('label1', 'value1'), ('label2', 'value2'))), 6), - ((12, 'metric_histogram_name', '_count', (('label1', 'value1'), ('label2', 'value2'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', '0.005'), ('label1', 'value1'), ('label2', 'value2'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', '0.01'), ('label1', 'value1'), ('label2', 'value2'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', '7.5'), ('label1', 'value1'), ('label2', 'value2'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', '+Inf'), ('label1', 'value1'), ('label2', 'value2'))), 1), - ((2, 'metric_gauge_name', '', (('label1', 'value3'), ('label2', 'value4'))), 5), - ((3, 'metric_counter_name', '', (('label1', 'value3'), ('label2', 'value4'))), 7), - ((5, 'metric_summary_name', '_sum', (('label1', 'value3'), ('label2', 'value4'))), 4), - ((7, 'metric_summary_name', '_count', (('label1', 'value3'), ('label2', 'value4'))), 1), - ((11, 'metric_histogram_name', '_sum', (('label1', 'value3'), ('label2', 'value4'))), 6), - ((12, 'metric_histogram_name', '_count', (('label1', 'value3'), ('label2', 'value4'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', '0.005'), ('label1', 'value3'), ('label2', 'value4'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 0.01), ('label1', 'value3'), ('label2', 'value4'))), 0), - ((13, 'metric_histogram_name', '_bucket', (('bucket', 7.5), ('label1', 'value3'), ('label2', 'value4'))), 1), - ((13, 'metric_histogram_name', '_bucket', (('bucket', float('inf')), ('label1', 'value3'), ('label2', 'value4'))), 1)) + ((2, "metric_gauge_name", "", (("label1", "value1"), ("label2", "value2"))), 5), + ((3, "metric_counter_name", "", (("label1", "value1"), ("label2", "value2"))), 7), + ((5, "metric_summary_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 4), + ((7, "metric_summary_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1), + ((11, "metric_histogram_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 6), + ((12, "metric_histogram_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", "0.005"), ("label1", "value1"), ("label2", "value2"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", "0.01"), ("label1", "value1"), ("label2", "value2"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", "7.5"), ("label1", "value1"), ("label2", "value2"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", "+Inf"), ("label1", "value1"), ("label2", "value2"))), 1), + ((2, "metric_gauge_name", "", (("label1", "value3"), ("label2", "value4"))), 5), + ((3, "metric_counter_name", "", (("label1", "value3"), ("label2", "value4"))), 7), + ((5, "metric_summary_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 4), + ((7, "metric_summary_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1), + ((11, "metric_histogram_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 6), + ((12, "metric_histogram_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", "0.005"), ("label1", "value3"), ("label2", "value4"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 0.01), ("label1", "value3"), ("label2", "value4"))), 0), + ((13, "metric_histogram_name", "_bucket", (("bucket", 7.5), ("label1", "value3"), ("label2", "value4"))), 1), + ((13, "metric_histogram_name", "_bucket", (("bucket", float("inf")), ("label1", "value3"), ("label2", "value4"))), 1)) def test_uwsgi_storage(): @@ -72,7 +72,7 @@ def test_uwsgi_storage(): assert (storage.get_area_size()) == 14 - assert storage.m[15] == '\x00' + assert storage.m[15] == "\x00" with storage.lock(): @@ -99,10 +99,10 @@ def test_uwsgi_storage(): assert not storage.is_actual - s = 'keyname' + s = "keyname" assert storage.get_string_padding(s) == 5 - assert len(s.encode('utf-8')) + storage.get_string_padding(s) == 12 + assert len(s.encode("utf-8")) + storage.get_string_padding(s) == 12 assert storage.validate_actuality() @@ -114,9 +114,9 @@ def test_uwsgi_storage(): assert storage.get_key_size("keyname") == 24 - storage.write_value('keyname', 10) + storage.write_value("keyname", 10) - assert storage.get_value('keyname') == 10.0 + assert storage.get_value("keyname") == 10.0 storage.clear() @@ -154,12 +154,14 @@ def test_uwsgi_storage(): assert storage2.get_value(x[0]) == x[1] -def test_multiprocessing(measure_time): +def test_multiprocessing(measure_time, iterations, num_workers): + storage = UWSGIStorage(0) storage2 = UWSGIStorage(0) storage3 = UWSGIStorage(0) - ITERATIONS = 500 - with measure_time("multiprocessing writes") as mt: + ITERATIONS = iterations + + with measure_time("multiprocessing writes {0}".format(ITERATIONS)) as mt: def f1(): for _ in xrange(ITERATIONS): for x in DATA: @@ -176,7 +178,7 @@ def f3(): storage3.inc_value(x[0], x[1]) workers = [] - for _ in xrange(10): + for _ in xrange(num_workers): func = random.choice([f1, f2, f3]) p = Process(target=func) p.start() @@ -192,3 +194,96 @@ def f3(): for x in DATA: assert storage2.get_value(x[0]) == storage.get_value(x[0]) == storage3.get_value(x[0]) == x[1] * ITERATIONS * len(workers) + + +def test_uwsgi_flush_storage(): + + storage1 = UWSGIFlushStorage(0) + storage2 = UWSGIFlushStorage(0) + + for x in xrange(10): + for k, v in DATA: + storage1.inc_value(k, v) + + storage1.get_value(k) == v + + storage2.get_value(k) == 0 + + storage1.flush() + + for x in DATA: + storage1.get_value(x[0]) == 0 + storage1.persistent_storage.get_value(x[0]) == x[1] * 10 + + +def test_uwsgi_flush_storage_multiprocessing(measure_time, iterations, num_workers): + storage = UWSGIFlushStorage(0) + storage2 = UWSGIFlushStorage(0) + storage3 = UWSGIFlushStorage(0) + ITERATIONS = iterations + with measure_time("flush storage multiprocessing writes {0}".format(ITERATIONS)) as mt: + def f1(): + for _ in xrange(ITERATIONS): + for x in DATA: + storage.inc_value(x[0], x[1]) + + storage.flush() + + def f2(): + for _ in xrange(ITERATIONS): + for x in DATA: + storage2.inc_value(x[0], x[1]) + + storage2.flush() + + def f3(): + for _ in xrange(ITERATIONS): + for x in DATA: + storage3.inc_value(x[0], x[1]) + + storage3.flush() + + workers = [] + for _ in xrange(num_workers): + func = random.choice([f1, f2, f3]) + p = Process(target=func) + p.start() + workers.append(p) + + for x in workers: + x.join() + + mt.set_num_ops(ITERATIONS * len(workers) * len(DATA)) + + storage.flush() + storage2.flush() + storage3.flush() + + with measure_time("flush storage multiprocessing reads") as mt: + mt.set_num_ops(3 * len(DATA)) + + for x in DATA: + assert storage2.get_value(x[0]) == storage.get_value(x[0]) == storage3.get_value(x[0]) == 0 + assert storage2.persistent_storage.get_value(x[0]) == storage.persistent_storage.get_value(x[0]) == storage3.persistent_storage.get_value(x[0]) + + assert storage.persistent_storage.get_value(x[0]) == x[1] * ITERATIONS * len(workers) + + +def test_uwsgi_storage_metrics(iterations): + registry = BaseRegistry() + + storage = UWSGIStorage(0, namespace="namespace", stats=True) + + registry.register(storage) + + for x in xrange(iterations): + for k, v in DATA: + storage.inc_value(k, v) + + collectors = {x.name: x for x in registry.collect()} + + metric = collectors["namespace:memory_size"] + assert metric.get_samples()[0].value == storage.get_area_size() + + metric = collectors["namespace:num_keys"] + assert metric.get_samples()[0].value == 20 diff --git a/tests_requirements.txt b/tests_requirements.txt index 1b810d7..7c11d73 100644 --- a/tests_requirements.txt +++ b/tests_requirements.txt @@ -2,8 +2,11 @@ flake8==3.2.1 tox==2.3.2 #tox-pyenv==2.3.2 + +ipdb +uwsgi==2.0.14 pytest==3.0.6 pytest-cov==2.4.0 pytest-flake8==0.8.1 -ipdb -uwsgi==2.0.14 +flake8-quotes==0.9.0 +flake8-comprehensions==1.2.1