Skip to content

Commit

Permalink
Deprecate static labels in favor of default labels
Browse files Browse the repository at this point in the history
Fixes #60
  • Loading branch information
rycus86 committed Jul 10, 2020
1 parent 26452e6 commit 53860ca
Show file tree
Hide file tree
Showing 4 changed files with 151 additions and 111 deletions.
13 changes: 8 additions & 5 deletions README.md
Expand Up @@ -170,10 +170,15 @@ Similarly, the `start_http_server` allows exposing the endpoint on an
independent Flask application on a selected HTTP port.
It also supports overriding the endpoint's path and the HTTP listen address.

You can also set static labels to add to every request managed by
a `PrometheusMetrics` instance, using the `static_labels` argument.
You can also set default labels to add to every request managed by
a `PrometheusMetrics` instance, using the `default_labels` argument.
This needs to be a dictionary, where each key will become a metric
label name, and the values their (static) values.
label name, and the values the label values.
These can be constant values, or dynamic functions, see below in the
[Labels](#Labels) section.

> The `static_labels` argument is deprecated since 0.15.0,
> please use the new `default_labels` argument.
If you use another framework over Flask (perhaps
[Connexion](https://connexion.readthedocs.io/)) then you might return
Expand All @@ -194,8 +199,6 @@ the following values are supported in the dictionary:
as the argument

Label values are evaluated within the request context.
The `static_labels` labels are excepted from this,
those need to be static values.

## Application information

Expand Down
145 changes: 91 additions & 54 deletions prometheus_flask_exporter/__init__.py
Expand Up @@ -104,8 +104,8 @@ def echo_status(status):

def __init__(self, app, path='/metrics',
export_defaults=True, defaults_prefix='flask',
group_by='path', buckets=None, static_labels=None,
response_converter=None,
group_by='path', buckets=None,
default_labels=None, response_converter=None,
excluded_paths=None, registry=None, **kwargs):
"""
Create a new Prometheus metrics export configuration.
Expand All @@ -123,7 +123,7 @@ def __init__(self, app, path='/metrics',
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param static_labels: static labels to attach to each of the
:param default_labels: default labels to attach to each of the
metrics exposed by this `PrometheusMetrics` instance
:param response_converter: a function that converts the captured
the produced response object to a Flask friendly representation
Expand All @@ -136,7 +136,7 @@ def __init__(self, app, path='/metrics',
self.path = path
self._export_defaults = export_defaults
self._defaults_prefix = defaults_prefix or 'flask'
self._static_labels = static_labels or {}
self._default_labels = default_labels or {}
self._response_converter = response_converter or make_response
self.buckets = buckets
self.version = __version__
Expand All @@ -150,6 +150,17 @@ def __init__(self, app, path='/metrics',
from prometheus_client import REGISTRY as DEFAULT_REGISTRY
self.registry = DEFAULT_REGISTRY

if kwargs.get('static_labels'):
warnings.warn(
'The `static_labels` argument of `PrometheusMetrics` is '
'deprecated since 0.15.0, please use the '
'new `default_labels` argument.', DeprecationWarning
)

for key, value in kwargs.get('static_labels', dict()).items():
if key not in self._default_labels:
self._default_labels[key] = value

if kwargs.get('group_by_endpoint') is True:
warnings.warn(
'The `group_by_endpoint` argument of `PrometheusMetrics` is '
Expand Down Expand Up @@ -331,27 +342,27 @@ def export_defaults(self, buckets=None, group_by='path',
else:
prefix = prefix + "_"

additional_labels = self._static_labels.items()
labels = self._get_combined_labels(None)

histogram = Histogram(
'%shttp_request_duration_seconds' % prefix,
'Flask HTTP request duration in seconds',
('method', duration_group_name, 'status') + tuple(map(lambda kv: kv[0], additional_labels)),
('method', duration_group_name, 'status') + labels.keys(),
registry=self.registry,
**buckets_as_kwargs
)

counter = Counter(
'%shttp_request_total' % prefix,
'Total number of HTTP requests',
('method', 'status') + tuple(map(lambda kv: kv[0], additional_labels)),
('method', 'status') + labels.keys(),
registry=self.registry
)

self.info(
'%sexporter_info' % prefix,
'Information about the Prometheus Flask exporter',
version=self.version, **self._static_labels
version=self.version
)

def before_request():
Expand All @@ -373,14 +384,18 @@ def after_request(response):
else:
group = getattr(request, duration_group)

histogram.labels(
request.method, group, _to_status_code(response.status_code),
*map(lambda kv: kv[1], additional_labels)
).observe(total_time)
histogram_labels = {
'method': request.method,
'status': _to_status_code(response.status_code),
duration_group_name: group
}
histogram_labels.update(labels.values_for(response))

histogram.labels(**histogram_labels).observe(total_time)

counter.labels(
request.method, _to_status_code(response.status_code),
*map(lambda kv: kv[1], additional_labels)
method=request.method, status=_to_status_code(response.status_code),
**labels.values_for(response)
).inc()

return response
Expand All @@ -393,6 +408,8 @@ def teardown_request(exception=None):
if any(pattern.match(request.path) for pattern in self.excluded_paths):
return

response = make_response('Exception: %s' % exception, 500)

if hasattr(request, 'prom_start_time'):
total_time = max(default_timer() - request.prom_start_time, 0)

Expand All @@ -401,14 +418,18 @@ def teardown_request(exception=None):
else:
group = getattr(request, duration_group)

histogram.labels(
request.method, group, 500,
*map(lambda kv: kv[1], additional_labels)
).observe(total_time)
histogram_labels = {
'method': request.method,
'status': 500,
duration_group_name: group
}
histogram_labels.update(labels.values_for(response))

histogram.labels(**histogram_labels).observe(total_time)

counter.labels(
request.method, 500,
*map(lambda kv: kv[1], additional_labels)
method=request.method, status=500,
**labels.values_for(response)
).inc()

return
Expand Down Expand Up @@ -539,46 +560,16 @@ def _track(self, metric_type, metric_call, metric_kwargs, name, description, lab
if labels is not None and not isinstance(labels, dict):
raise TypeError('labels needs to be a dictionary of {labelname: callable}')

if self._static_labels:
if not labels:
labels = self._static_labels
else:
# merge the default labels and the specific ones for this metric
combined = dict()
combined.update(self._static_labels)
combined.update(labels)
labels = combined
labels = self._get_combined_labels(labels)

label_names = labels.keys() if labels else tuple()
parent_metric = metric_type(
name, description, labelnames=label_names, registry=registry,
name, description, labelnames=labels.keys(), registry=registry,
**metric_kwargs
)

def argspec(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)

def label_value(f):
if not callable(f):
return lambda x: f
if argspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()

label_generator = tuple(
(key, label_value(call))
for key, call in labels.items()
) if labels else tuple()

def get_metric(response):
if label_names:
return parent_metric.labels(
**{key: call(response) for key, call in label_generator}
)
if labels.has_keys():
return parent_metric.labels(**labels.values_for(response))
else:
return parent_metric

Expand Down Expand Up @@ -654,6 +645,52 @@ def func(*args, **kwargs):

return decorator

def _get_combined_labels(self, labels):
"""
Combines the given labels with static and default labels
and wraps them into an object that can efficiently return
the keys and values of these combined labels.
"""

labels = labels.copy() if labels else dict()

if self._default_labels:
labels.update(self._default_labels.copy())

def argspec(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)

def label_value(f):
if not callable(f):
return lambda x: f
if argspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()

class CombinedLabels(object):
def __init__(self, _labels):
self.labels = _labels.items()

def keys(self):
return tuple(map(lambda k: k[0], self.labels))

def has_keys(self):
return len(self.labels) > 0

def values_for(self, response):
label_generator = tuple(
(key, label_value(call))
for key, call in self.labels
) if labels else tuple()

return {key: value(response) for key, value in label_generator}

return CombinedLabels(labels)

@staticmethod
def do_not_track():
"""
Expand Down
52 changes: 5 additions & 47 deletions prometheus_flask_exporter/multiprocess.py
Expand Up @@ -40,39 +40,21 @@ class MultiprocessPrometheusMetrics(PrometheusMetrics):

__metaclass__ = ABCMeta

def __init__(self, app=None, export_defaults=True,
defaults_prefix='flask', group_by='path',
buckets=None, static_labels=None, registry=None):
def __init__(self, app=None, **kwargs):
"""
Create a new multiprocess-aware Prometheus metrics export configuration.
:param app: the Flask application (can be `None`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param static_labels: static labels to attach to each of the
metrics exposed by this metrics instance
:param registry: the Prometheus Registry to use (can be `None` and it
will be registered with `prometheus_client.multiprocess.MultiProcessCollector`)
"""

_check_multiproc_env_var()

registry = registry or CollectorRegistry()
registry = kwargs.get('registry') or CollectorRegistry()
MultiProcessCollector(registry)

super(MultiprocessPrometheusMetrics, self).__init__(
app=app, path=None, export_defaults=export_defaults,
defaults_prefix=defaults_prefix, group_by=group_by,
buckets=buckets, static_labels=static_labels,
registry=registry
app=app, path=None, registry=registry, **kwargs
)

def start_http_server(self, port, host='0.0.0.0', endpoint=None):
Expand Down Expand Up @@ -191,36 +173,12 @@ def child_exit(server, worker):
Alternatively, you can use the instance functions as well.
"""

def __init__(self, app=None, path='/metrics', export_defaults=True,
defaults_prefix='flask', group_by='path',
buckets=None, static_labels=None, registry=None):
def __init__(self, app=None, path='/metrics', **kwargs):
"""
Create a new multiprocess-aware Prometheus metrics export configuration.
:param app: the Flask application (can be `None`)
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param static_labels: static labels to attach to each of the
metrics exposed by this metrics instance
:param registry: the Prometheus Registry to use (can be `None` and it
will be registered with `prometheus_client.multiprocess.MultiProcessCollector`)
"""

super(GunicornInternalPrometheusMetrics, self).__init__(
app=app, export_defaults=export_defaults,
defaults_prefix=defaults_prefix, group_by=group_by,
buckets=buckets, static_labels=static_labels,
registry=registry
)
super(GunicornInternalPrometheusMetrics, self).__init__(app=app, **kwargs)

if app:
self.register_endpoint(path)
Expand Down

0 comments on commit 53860ca

Please sign in to comment.