diff --git a/Pipfile b/Pipfile index 22b9fe6..d07442a 100644 --- a/Pipfile +++ b/Pipfile @@ -14,6 +14,7 @@ jaeger-client = "==4.1.0" flask-opentracing = "*" opentracing = ">=2.1" opentracing-instrumentation = "==3.2.1" +prometheus_client = ">=0.7.1" [dev-packages] requests-mock = "*" coverage = "==4.4.0" diff --git a/Pipfile.lock b/Pipfile.lock index 75c7c79..1c5ee6b 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -4,9 +4,7 @@ "sha256": "46717e53a0a7b838196e1d835cd574369f0cccc9a4ecb42c2e96577663ff8ee1" }, "pipfile-spec": 6, - "requires": { - "python_version": "3.6" - }, + "requires": {}, "sources": [ { "name": "pypi", @@ -209,6 +207,13 @@ "index": "pypi", "version": "==3.2.1" }, + "prometheus-client": { + "hashes": [ + "sha256:71cd24a2b3eb335cb800c7159f423df1bd4dcd5171b234be15e3f31ec9f622da" + ], + "index": "pypi", + "version": "==0.7.1" + }, "pyrsistent": { "hashes": [ "sha256:f3b280d030afb652f79d67c5586157c5c1355c9a58dfc7940566e28d28f3df1b" diff --git a/docs/services.md b/docs/services.md index dd03d5d..1cea7a8 100644 --- a/docs/services.md +++ b/docs/services.md @@ -14,7 +14,27 @@ Extends the Microservice with [Connexion](https://github.com/zalando/connexion) Extend the [requests library](http://docs.python-requests.org/en/master/) with trace headers and parsing JSON objects. Encapsulate common rest operations between business services propagating trace headers if set up. +## Metrics +Adds ![Prometheus](https://prometheus.io/) metrics using the ![Prometheus Client +Library](https://github.com/prometheus/client_python). + +At the moment, the next metrics are available: +- Incoming requests latency as a histogram +- Incoming requests number as a counter, divided by HTTP method, endpoint and + HTTP status +- Total number of log events divided by level +- If `tracer` service activated and it's jaeger, it will show its metrics + +To use this service, you may add the next to you configuration file: + +```yaml +pyms: + metrics: true +``` + +This will add the endpoint `/metrics` to your microservice, which will expose +the metrics. ## How to contrib: create your own service: -TODO \ No newline at end of file +TODO diff --git a/docs/structure.md b/docs/structure.md index 855a51a..3d61791 100644 --- a/docs/structure.md +++ b/docs/structure.md @@ -8,7 +8,7 @@ With the function `create_app` initialize the Flask app, register [blueprints](h and initialize all libraries such as Swagger, database, trace system, custom logger format, etc. ### pyms/flask/services -Integrations and wrappers over common libs like request, swagger, connexion +Integrations and wrappers over common libs like request, swagger, connexion or metrics. ### pyms/flask/healthcheck This view is usually used by Kubernetes, Eureka and other systems to check if our application is running. @@ -17,4 +17,4 @@ This view is usually used by Kubernetes, Eureka and other systems to check if ou Print logger in JSON format to send to server like Elasticsearch. Inject span traces in logger. ### pyms/tracer -Create an injector `flask_opentracing.FlaskTracer` to use in our projects. \ No newline at end of file +Create an injector `flask_opentracing.FlaskTracer` to use in our projects. diff --git a/pyms/flask/app/create_app.py b/pyms/flask/app/create_app.py index 33f1af4..6d4f99b 100644 --- a/pyms/flask/app/create_app.py +++ b/pyms/flask/app/create_app.py @@ -70,6 +70,7 @@ def init_logger(self): log_handler.setFormatter(formatter) self.application.logger.addHandler(log_handler) + self.application.logger.propagate = False if self.application.config["DEBUG"]: @@ -99,6 +100,15 @@ def init_app(self) -> Flask: return application + def init_metrics(self): + if getattr(self, "metrics", False) and self.metrics: + self.application.register_blueprint(self.metrics.metrics_blueprint) + self.metrics.add_logger_handler( + self.application.logger, + self.application.config["APP_NAME"] + ) + self.metrics.monitor(self.application) + def create_app(self): """Initialize the Flask app, register blueprints and initialize all libraries like Swagger, database, @@ -121,6 +131,8 @@ def create_app(self): self.init_logger() + self.init_metrics() + return self.application def add_error_handlers(self): diff --git a/pyms/flask/services/metrics.py b/pyms/flask/services/metrics.py new file mode 100644 index 0000000..81575dd --- /dev/null +++ b/pyms/flask/services/metrics.py @@ -0,0 +1,71 @@ +import time +import logging + +from flask import Blueprint, Response, request +from prometheus_client import Counter, Histogram, generate_latest +from pyms.flask.services.driver import DriverService + +# Based on https://github.com/sbarratt/flask-prometheus +# and https://github.com/korfuri/python-logging-prometheus/ + +FLASK_REQUEST_LATENCY = Histogram( + "flask_request_latency_seconds", "Flask Request Latency", ["method", "endpoint"] +) +FLASK_REQUEST_COUNT = Counter( + "flask_request_count", "Flask Request Count", ["method", "endpoint", "http_status"] +) + +LOGGER_TOTAL_MESSAGES = Counter( + "python_logging_messages_total", + "Count of log entries by service and level.", + ["service", "level"], +) + + +def before_request(): + request.start_time = time.time() + + +def after_request(response): + request_latency = time.time() - request.start_time + FLASK_REQUEST_LATENCY.labels(request.method, request.path).observe(request_latency) + FLASK_REQUEST_COUNT.labels(request.method, request.path, response.status_code).inc() + + return response + + +class Service(DriverService): + service = "metrics" + + def __init__(self, service, *args, **kwargs): + super().__init__(service, *args, **kwargs) + self.metrics_blueprint = Blueprint("metrics", __name__) + self.serve_metrics() + + def monitor(self, app): + app.before_request(before_request) + app.after_request(after_request) + + def serve_metrics(self): + @self.metrics_blueprint.route("/metrics", methods=["GET"]) + def metrics(): + return Response( + generate_latest(), + mimetype="text/print()lain", + content_type="text/plain; charset=utf-8", + ) + + def add_logger_handler(self, logger, service_name): + logger.addHandler(MetricsLogHandler(service_name)) + return logger + + +class MetricsLogHandler(logging.Handler): + """A LogHandler that exports logging metrics for Prometheus.io.""" + + def __init__(self, app_name): + super(MetricsLogHandler, self).__init__() + self.app_name = app_name + + def emit(self, record): + LOGGER_TOTAL_MESSAGES.labels(self.app_name, record.levelname).inc() diff --git a/pyms/flask/services/tracer.py b/pyms/flask/services/tracer.py index ac34b6c..2a92d53 100644 --- a/pyms/flask/services/tracer.py +++ b/pyms/flask/services/tracer.py @@ -1,8 +1,11 @@ import logging +from jaeger_client.metrics.prometheus import PrometheusMetricsFactory + from pyms.constants import LOGGER_NAME from pyms.flask.services.driver import DriverService from pyms.utils.utils import check_package_exists, import_package, import_from +from pyms.config.conf import get_conf logger = logging.getLogger(LOGGER_NAME) @@ -43,7 +46,11 @@ def init_jaeger_tracer(self): 'reporting_host': self.host } } - + metrics_config = get_conf(service="pyms.metrics", empty_init=True, memoize=False) + metrics = "" + if metrics_config: + service_name = self.component_name.lower().replace("-", "_").replace(" ", "_") + metrics = PrometheusMetricsFactory() config = Config(config={ **{'sampler': { 'type': 'const', @@ -54,6 +61,7 @@ def init_jaeger_tracer(self): }, **host }, service_name=self.component_name, + metrics_factory=metrics, validate=True) return config.initialize_tracer() diff --git a/requirements-tests.txt b/requirements-tests.txt index d9626ad..a61f2d2 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -42,7 +42,7 @@ mkdocs==1.0.4 mock==2.0.0 more-itertools==7.2.0 nose==1.3.7 -numpy==1.13.3 +numpy==1.16.1 openapi-spec-validator==0.2.8 opentracing==2.2.0 opentracing-instrumentation==3.2.1 @@ -52,7 +52,7 @@ pkginfo==1.5.0.1 pluggy==0.13.0 protobuf==3.9.0rc1 py==1.8.0 -py-ms==1.0.0 +py-ms==1.0.1 Pygments==2.3.1 pylint==2.4.3 pyparsing==2.4.2 @@ -85,3 +85,4 @@ webencodings==0.5.1 Werkzeug==0.16.0 wrapt==1.11.2 zipp==0.6.0 +prometheus_client==0.7.1 diff --git a/tests/config-tests-metrics.yml b/tests/config-tests-metrics.yml new file mode 100644 index 0000000..5860384 --- /dev/null +++ b/tests/config-tests-metrics.yml @@ -0,0 +1,11 @@ +--- +pyms: + metrics: true + tracer: + client: "jaeger" + component_name: "Python Microservice" +my-ms: + DEBUG: true + TESTING: true + APP_NAME: "Python Microservice" + APPLICATION_ROOT: / diff --git a/tests/config-tests.yml b/tests/config-tests.yml index 99304f2..010d26d 100644 --- a/tests/config-tests.yml +++ b/tests/config-tests.yml @@ -1,4 +1,5 @@ pyms: + metrics: true requests: data: data swagger: @@ -17,4 +18,4 @@ my-ms: subservice1: test: input subservice2: - test: output \ No newline at end of file + test: output diff --git a/tests/test_metrics.py b/tests/test_metrics.py new file mode 100644 index 0000000..a8d6f80 --- /dev/null +++ b/tests/test_metrics.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +import os +import unittest.mock + +from flask import current_app +from prometheus_client import generate_latest + +from pyms.constants import CONFIGMAP_FILE_ENVIRONMENT +from pyms.flask.app import Microservice + +class TestMetricsFlask(unittest.TestCase): + + BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + + def setUp(self): + os.environ[CONFIGMAP_FILE_ENVIRONMENT] = os.path.join(self.BASE_DIR, "config-tests-metrics.yml") + ms = Microservice(service="my-ms", path=__file__) + self.app = ms.create_app() + self.client = self.app.test_client() + + def test_metrics_latency(self): + self.client.get("/") + self.client.get("/metrics") + generated_latency_root = b'flask_request_latency_seconds_bucket{endpoint="/",le="0.005",method="GET"}' + generated_latency_metrics = b'flask_request_latency_seconds_bucket{endpoint="/metrics",le="0.005",method="GET"}' + assert generated_latency_root in generate_latest() + assert generated_latency_metrics in generate_latest() + + def test_metrics_count(self): + self.client.get("/") + self.client.get("/metrics") + generated_count_root = b'flask_request_count_total{endpoint="/",http_status="200",method="GET"}' + generated_count_metrics = b'flask_request_count_total{endpoint="/metrics",http_status="200",method="GET"}' + assert generated_count_root in generate_latest() + assert generated_count_metrics in generate_latest() + + def test_metrics_logger(self): + self.client.get("/") + self.client.get("/metrics") + generated_logger = b'python_logging_messages_total{level="INFO",service="Python Microservice With Flask and Lightstep"}' + assert generated_logger in generate_latest() + + def test_metrics_jaeger(self): + self.client.get("/") + self.client.get("/metrics") + generated_logger = b'jaeger:reporter_spans_total' + assert generated_logger in generate_latest()