diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..813ffb91 --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +ignore = E501 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index e482cf89..00000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,11 +0,0 @@ - - -## What you expected to happen? - -## What happened? - - -## How to reproduce it? - diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 8bb40ff9..00000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,22 +0,0 @@ - - -## What does this do? - - -## Why is it a good idea? - - -## Context - - -## Questions - diff --git a/.gitignore b/.gitignore index adbed8c0..80bdfe77 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,14 @@ build/ dist/ .uptodate /.env -/junit-*.xml +test-results/junit-*.xml /.cache .ensure-* /.tox /.coverage +/venv*/ +/.idea/ +/.vscode/ + +# Documentation +docs/build diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000..cee51dff --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,30 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.10" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Build documentation with MkDocs +#mkdocs: +# configuration: mkdocs.yml + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: docs/requirements.txt + - method: setuptools + path: . diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 26edd862..3d775fa7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,313 @@ Changelog ========= +<<<<<<< HEAD +0.x.x (?) +================== + +* Added ... +* Added Minimum option for Timeseries +* Added Maximum option for Timeseries +* Added Number of decimals displays option for Timeseries +* Added Bar_Chart_ panel support +* Extended SqlTarget to support parsing queries from files +* Fix AlertCondition backwards compatibility (``useNewAlerts`` default to ``False``) +* Added RateMetricAgg_ for ElasticSearch + +.. _`Bar_Chart`: https://grafana.com/docs/grafana/latest/panels-visualizations/visualizations/bar-chart/ +.. _`RateMetricAgg`: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-rate-aggregation.html + +0.7.0 (2022-10-02) +================== + +* Added Grafana 8.x new Alert Rule +* Added Grafana 9.x new Alert Rule +* Added ePict_ plugin. +* Added ae3e plotly panel support +* Added datasource parameter to Influxdb targets +* Added missing units for Boolean, Data, Data Rate, Date & Time, Energy, Length, + Mass, and Misc +* Fix typo in unit constant ``GIGA_WATT`` (was ``GAGA_WATT``) +* Fix typo in unit constant ``NORMAL_CUBIC_METER`` (was ``NORMAIL_CUBIC_METER``) + +.. _`ePict`: https://grafana.com/grafana/plugins/larona-epict-panel/ + +0.6.3 (2022-03-30) +================== + +* Added Azure Monitor Target +* Added ``legendCalcs`` parameter to TimeSeries Panel +* Added ``hide`` parameter to ElasticsearchTarget +* Added ExpressionTarget support for ElasticSearch data sources + + +0.6.2 (2022-02-24) +================== + +* Added percentage type for thresholds +* Added ``datasource`` parameter to CloudWatch targets +* Added support for auto panels ids to AlertList panel +* Added ``SeriesOverride`` options (dashes and Z-index) +* Added support for fields value in Stat panel +* Added ``alertName`` parameter to AlertList panel +* Added ``thresholdsStyleMode`` parameter to TimeSeries panel +* Added Histogram panel support +* Dashboard upload script updated to support overwriting dashboards + +0.6.1 (2021-11-23) +================== + +* Added new SqlTarget to core to be able to define SQL queries as well +* Added missing attributes to the Logs panel +* Added Cloudwatch Logs Insights Target +* Added overrides to panels +* Extend ``SeriesOverride`` options + +Changes +------- + +* Fix Text panel (and add tests) + + **ATTENTION:** This might break panels generated for Grafana <8.0.6 + +0.6.0 (2021-10-26) +=================== + +* Added Discrete panel (https://grafana.com/grafana/plugins/natel-discrete-panel/) +* Added support for colors in stat mapping panel with StatValueMappings & StatRangeMappings +* Added missing auto interval properties in Template +* Added param to RowPanel to collapse the row +* Added StateTimeline panel which was added in Grafana v8 +* Added support for timeseries panel added in Grafana v8 +* Added MinMetricAgg and PercentilesMetricAgg to Elasticsearch +* Added support for News panel +* Added support for Pie Chart v2 from Grafana v8 + +Changes +------- + +* Refine expectations of is_color_code +* Deprecated StatMapping, StatValueMapping & StatRangeMapping +* Change YAxis min value default from 0 to None +* Support for Table panel for Grafana v8 may have broken backwards compatibility in old Table panel +* Breaking change, support for styled columns in tables removed, no longer used in Grafana v8 new Table +* Move development to ``main`` branch on GitHub. If you have work tracking the ``master`` you will need to update this. + +0.5.14 (2021-09-14) +================== + +* Added colour overrides to pie chart panel +* Added missing attributes from xAxis class +* Added transformations for the Panel class (https://grafana.com/docs/grafana/latest/panels/reference-transformation-functions/) +* Added Worldmap panel (https://grafana.com/grafana/plugins/grafana-worldmap-panel/) +* Added missing fill gradient to Graph panel +* Added missing align to graph panel +* Added missing show percentage attribute to Pie chart panel +* Added ``extraJson`` attribute to the Panel class for overriding the panel with raw JSON +* Added inline script support for Elasticsearch metrics +* Selected needs to be set as a bool value for templating to work. + +0.5.13 (2021-05-17) +=================== + +* Added a test for the Alert class. + +Changes +------- + +* Bugfix: changed 'target' validator in AlertNotification to accept CloudwatchMetricsTarget +* Moved the alertRuleTag field from Graph to Alert. + +0.5.12 (2021-04-24) +=================== + +* Added hide parameter to CloudwatchMetricsTarget class +* Added table-driven example dashboard and upload script + +Changes +------- + +* bugfix load_dashboard add support for old python version 2.x, 3.3 and 3.4 +* Fix default target datasource to work with newer versions of Grafana +* Removed re-defined maxDataPoints field from multiple panels +* Fix the AlertList class and add a test for it + +Thanks to all those who have contributed to this release. + + +0.5.11 (2021-04-06) +=================== + +* Added timeField field for the Elasticsearch target to allow the alert to change its state +* Added nameFilter field for the AlertList panel +* Added dashboardTags field for the AlertList panel + +Thanks a lot for your contributions to this release, @dafna-starkware + +0.5.10 (2021-03-21) +=================== + +* Added Logs panel (https://grafana.com/docs/grafana/latest/panels/visualizations/logs-panel/) +* Added Cloudwatch metrics datasource (https://grafana.com/docs/grafana/latest/datasources/cloudwatch/) +* Added option to hide dashboard time picker +* Added Notification for Alert +* Added alertRuleTags field to the graph panel +* Added support for thresholds to graph panel +* Added support for Elasticsearch alert condition +* Added support for using gridPos for dashboard panels +* Added support for Humio Data Source. (https://grafana.com/grafana/plugins/humio-datasource/) + + +Changes +------- + +* Replace deprecated attr.assoc with attr.evolve + + + +0.5.9 (2020-12-18) +================== + +* Added Alert Threshold enabled/disabled to Graphs. +* Added constants for all Grafana value formats +* Added support for repetitions to Stat Panels +* Added textMode option to Stat Panels +* Add Panel object for all panels to inherit from +* Add Dashboard list panel (https://grafana.com/docs/grafana/latest/panels/visualizations/dashboard-list-panel/) + + +Changes +------- + +* Change supported python versions from 3.6 to 3.9 +* Added hide parameter to Target +* Updated dependencies (docs, build, CI) +* Consistent coding style + + +0.5.8 (2020-11-02) +================== + +This release adds quite a few new classes to grafanalib, ElasticSearch support was improved and support for InfluxDB data sources was added. + +We would also very much like to welcome James Gibson as new maintainer of grafanalib. Thanks a lot for stepping up to this role! + +Changes +------- + +* Added more YAxis formats, added Threshold and SeriesOverride types +* dataLinks support in graphs +* Add Elasticsearch bucket script pipeline aggregator +* Added ability to hide metrics for Elasticsearch MetricAggs +* Add derivative metric aggregation for Elasticsearch +* Add ``Stat`` class (and ``StatMapping``, ``StatValueMapping``, ``StatRangeMapping``) to support the Stat panel +* Add ``Svg`` class to support the SVG panel +* Add ``PieChart`` class for creating Pie Chart panels +* Add `transparent` setting to classes that were missing it (Heatmap, PieChart) +* Add InfluxDB data source +* Add ``auto_ref_ids`` to ``Graph`` + +Thanks a lot for your contributions to this release, @DWalker487, @JamesGibo, @daveworth, @dholbach, @fauust, @larsderidder, @matthewmrichter. + + +0.5.7 (2020-05-11) +================== + +Changes +------- + +* Fix crasher instatiating elasticsearch panels. +* Remove unused ``tools/`` directory. + +Thanks a lot for your contributions to this release, @DWalker487, @dholbach and @matthewmrichter. + + +0.5.6 (2020-05-05) +================== + +Changes +------- + +* Add ``Heatmap`` class (and ``HeatmapColor``) to support the Heatmap panel (#170) +* Add ``BarGuage`` for creating bar guages panels in grafana 6 +* Add ``GuagePanel`` for creating guages in grafana 6 +* Add data links support to ``Graph``, ``BarGuage``, and ``GuagePanel`` panels +* Removed gfdatasource - feature is built in to Grafana since v5. +* Generate API docs for readthedocs.org +* Fix AlertList panel generation +* Add both upper and lower case `"time"` pattern for time_series column format in Table class +* Drop testing of Python 2.7, it has been EOL'ed and CI was broken + due to this. +* Automatically test documentation examples. +* Point to dev meeting resources. +* Add description attribute to Dashboard. +* Add support for custom variables. +* Point out documentation on readthedocs more clearly. +* Add average metric aggregation for elastic search +* Bugfix to query ordering in Elasticsearch TermsGroupBy +* Added all parameters for StringColumnStyle +* Add Elasticsearch Sum metric aggregator +* Add ``Statusmap`` class (and ``StatusmapColor``) to support the Statusmap panel plugin +* Bugfix to update default ``Threshold`` values for ``GaugePanel`` and ``BarGauge`` +* Use Github Actions for CI. +* Fix test warnings. +* Update ``BarGauge`` and ``GaugePanel`` default Threshold values. +* Update release instructions. + +Thanks a lot to the contributions from @DWalker487, @bboreham, @butlerx, @dholbach, @franzs, @jaychitalia95, @matthewmrichter and @number492 for this release! + +0.5.5 (2020-02-17) +================== + +It's been a while since the last release and we are happy to get this one into your hands. +0.5.5 is a maintenance release, most importantly it adds support for Python >= 3.5. + +We are very delighted to welcome Matt Richter on board as maintainer. + +Changes +------- + +* Automate publishing to PyPI with GitHub Actions +* Update README.rst to make the example work +* Bump Dockerfile to use Alpine 3.10 as base +* Fix up ``load_source()`` call which doesn't exist in Python 3.5 +* Update versions of Python tested +* Repair tests +* pin to attrs 19.2 and fix deprecated arguments + +Many thanks to contributors @bboreham, @dholbach, @ducksecops, @kevingessner, @matthewmrichter, @uritau. + +0.5.4 (2019-08-30) +================== + +Changes +------- + +* Add 'diff', 'percent_diff' and 'count_non_null' as RTYPE +* Support for changing sort value in Template Variables. +* Sort tooltips by value in Weave/Stacked-Charts +* Add ``for`` parameter for alerts on Grafana 6.X +* Add ``STATE_OK`` for alerts +* Add named values for the Template.hide parameter +* Add cardinality metric aggregator for ElasticSearch +* Add Threshold and Series Override types +* Add more YAxis formats + +Many thanks to contributors @kevingessner, @2easy, @vicmarbev, @butlerx. + +0.5.3 (2018-07-19) +================== + +Changes +------- + +* Minor markup tweaks to the README +0.5.9 (2019-05-12) +0.5.10 (2020-04-14) +=================== + +======= +>>>>>>> main 0.6.1 (2020-04-23) ================== * Set default refresh to 1m instead of 10s to reduce load on metric sources. diff --git a/COPYING.LGPL-3 b/COPYING.LGPL-3 deleted file mode 100644 index 89c7d69e..00000000 --- a/COPYING.LGPL-3 +++ /dev/null @@ -1,175 +0,0 @@ -./tools/integration/assert.sh is a copy of - - https://github.com/lehmannro/assert.sh/blob/master/assert.sh - -Since it was imported from its original source, it has only received -cosmetic modifications. As it is licensed under the LGPL-3, here's the -license text in its entirety: - - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..bcde4db6 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,17 @@ +In alphabetical order: + +The maintainers are generally available in Slack at +https://weave-community.slack.com/ in #grafanalib (https://weave-community.slack.com/archives/C9C9K6T4P) +(obtain an invitation at https://slack.weave.works/). + + +James Gibson, BBC (github: @JamesGibo, slack: James G) + +Retired maintainers: + +- Bryan Boreham +- Daniel Holbach +- Jonathan Lange +- Matt Richter + +Thank you for your involvement, and let us not say "farewell" ... diff --git a/Makefile b/Makefile index ad42ed5f..8a9ce2ee 100644 --- a/Makefile +++ b/Makefile @@ -1,31 +1,9 @@ .PHONY: all clean clean-deps lint test deps coverage .DEFAULT_GOAL := all -# Boiler plate for bulding Docker containers. -# All this must go at top of file I'm afraid. -IMAGE_PREFIX := quay.io/weaveworks -IMAGE_TAG := $(shell ./tools/image-tag) -GIT_REVISION := $(shell git rev-parse HEAD) -UPTODATE := .uptodate - -# Building Docker images is now automated. The convention is every directory -# with a Dockerfile in it builds an image calls quay.io/weaveworks/. -# Dependencies (i.e. things that go in the image) still need to be explicitly -# declared. -%/$(UPTODATE): %/Dockerfile - $(SUDO) docker build --build-arg=revision=$(GIT_REVISION) -t $(IMAGE_PREFIX)/$(shell basename $(@D)) $(@D)/ - $(SUDO) docker tag $(IMAGE_PREFIX)/$(shell basename $(@D)) $(IMAGE_PREFIX)/$(shell basename $(@D)):$(IMAGE_TAG) - touch $@ - -# Get a list of directories containing Dockerfiles -DOCKERFILES=$(shell find * -type f -name Dockerfile ! -path "tools/*" ! -path "vendor/*") -UPTODATE_FILES=$(patsubst %/Dockerfile,%/$(UPTODATE),$(DOCKERFILES)) -DOCKER_IMAGE_DIRS=$(patsubst %/Dockerfile,%,$(DOCKERFILES)) -IMAGE_NAMES=$(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)/%,$(shell basename $(dir)))) - # Python-specific stuff TOX := $(shell command -v tox 2> /dev/null) -PIP := $(shell command -v pip 2> /dev/null) +PIP := $(shell command -v pip3 2> /dev/null) FLAKE8 := $(shell command -v flake8 2> /dev/null) .ensure-tox: .ensure-pip @@ -49,19 +27,14 @@ ifndef FLAKE8 endif touch .ensure-pip -images: - $(info $(IMAGE_NAMES)) - -all: $(UPTODATE_FILES) test lint coverage +all: test lint coverage deps: setup.py .ensure-tox tox.ini $(VIRTUALENV_BIN)/flake8 $(VIRTUALENV_BIN)/py.test: $(DEPS_UPTODATE) -gfdatasource/$(UPTODATE): gfdatasource/* - lint: .ensure-flake8 - $(FLAKE8) gfdatasource/gfdatasource grafanalib + $(FLAKE8) grafanalib test: .ensure-tox $(TOX) --skip-missing-interpreters @@ -70,8 +43,6 @@ coverage: $(TOX) -e coverage clean: - $(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true - rm -rf $(UPTODATE_FILES) rm -rf grafanalib.egg-info rm -f .ensure-pip .ensure-tox .ensure-flake8 find . -name '*.pyc' | xargs rm diff --git a/README.rst b/README.rst index 90b4ec86..5762d975 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,11 @@ -========== -grafanalib -========== +=============================== +Getting Started with grafanalib +=============================== -.. image:: https://circleci.com/gh/weaveworks/grafanalib.svg?style=shield - :target: https://circleci.com/gh/weaveworks/grafanalib +.. image:: https://readthedocs.org/projects/grafanalib/badge/?version=main + :alt: Documentation Status + :scale: 100% + :target: https://grafanalib.readthedocs.io/en/main Do you like `Grafana `_ but wish you could version your dashboard configuration? Do you find yourself repeating common patterns? If @@ -11,111 +13,28 @@ so, grafanalib is for you. grafanalib lets you generate Grafana dashboards from simple Python scripts. -Writing dashboards -================== - -The following will configure a dashboard with a single row, with one QPS graph -broken down by status code and another latency graph showing median and 99th -percentile latency: - -.. code-block:: python - - from grafanalib.core import * - - - dashboard = Dashboard( - title="Frontend Stats", - rows=[ - Row(panels=[ - Graph( - title="Frontend QPS", - dataSource='My Prometheus', - targets=[ - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"1.."}[1m]))', - legendFormat="1xx", - refId='A', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"2.."}[1m]))', - legendFormat="2xx", - refId='B', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"3.."}[1m]))', - legendFormat="3xx", - refId='C', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"4.."}[1m]))', - legendFormat="4xx", - refId='D', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"5.."}[1m]))', - legendFormat="5xx", - refId='E', - ), - ], - yAxes=[ - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ], - alert=Alert( - name="Too many 500s on Nginx", - message="More than 5 QPS of 500s on Nginx for 5 minutes", - alertConditions=[ - AlertCondition( - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"5.."}[1m]))', - legendFormat="5xx", - refId='A', - ), - timeRange=TimeRange("5m", "now"), - evaluator=GreaterThan(5), - operator=OP_AND, - reducerType=RTYPE_SUM, - ), - ], - ) - ), - Graph( - title="Frontend latency", - dataSource='My Prometheus', - targets=[ - Target( - expr='histogram_quantile(0.5, sum(irate(nginx_http_request_duration_seconds_bucket{job="default/frontend"}[1m])) by (le))', - legendFormat="0.5 quantile", - refId='A', - ), - Target( - expr='histogram_quantile(0.99, sum(irate(nginx_http_request_duration_seconds_bucket{job="default/frontend"}[1m])) by (le))', - legendFormat="0.99 quantile", - refId='B', - ), - ], - yAxes=single_y_axis(format=SECONDS_FORMAT), - ), - ]), - ], - ).auto_panel_ids() - -There is a fair bit of repetition here, but once you figure out what works for -your needs, you can factor that out. -See `our Weave-specific customizations `_ for inspiration. - -Generating dashboards -===================== - -If you save the above as ``frontend.dashboard.py`` (the suffix must be -``.dashboard.py``), you can then generate the JSON dashboard with: +How it works +============ -.. code-block:: console +Take a look at `the examples directory +`_, +e.g. `this dashboard +`_ +will configure a dashboard with a single row, with one QPS graph broken down +by status code and another latency graph showing median and 99th percentile +latency. - $ generate-dashboard -o frontend.json frontend.dashboard.py +In the code is a fair bit of repetition here, but once you figure out what +works for your needs, you can factor that out. +See `our Weave-specific customizations +`_ +for inspiration. -Installation -============ +You can read the entire grafanlib documentation on `readthedocs.io +`_. + +Getting started +=============== grafanalib is just a Python package, so: @@ -123,13 +42,22 @@ grafanalib is just a Python package, so: $ pip install grafanalib + +Generate the JSON dashboard like so: + +.. code-block:: console + + $ curl -o example.dashboard.py https://raw.githubusercontent.com/weaveworks/grafanalib/main/grafanalib/tests/examples/example.dashboard.py + $ generate-dashboard -o frontend.json example.dashboard.py + + Support ======= This library is in its very early stages. We'll probably make changes that break backwards compatibility, although we'll try hard not to. -grafanalib works with Python 2.7, 3.4, 3.5, and 3.6. +grafanalib works with Python 3.6 through 3.10. Developing ========== @@ -141,28 +69,34 @@ If you're working on the project, and need to build from source, it's done as fo $ . ./.env/bin/activate $ pip install -e . -`gfdatasource` -============== +Configuring Grafana Datasources +=============================== -This module also provides a script and docker image which can configure grafana -with new sources, or enable app plugins. +This repo used to contain a program ``gfdatasource`` for configuring +Grafana data sources, but it has been retired since Grafana now has a +built-in way to do it. See https://grafana.com/docs/administration/provisioning/#datasources -The script answers the `--help` with full usage information, but basic -invocation looks like this: +Community +========= -.. code-block:: console +We currently don't follow a roadmap for ``grafanalib`` and both `maintainers +` have recently +become somewhat occupied otherwise. + +We'd like you to join the ``grafanalib`` community! If you would like to +help out maintaining ``grafanalib`` that would be great. It's a fairly laid-back +and straight-forward project. Please talk to us on Slack (see the links below). - $ --grafana-url http://grafana. datasource --data-source-url http://datasource - $ --grafana-url http://grafana. app --id my-plugin +We follow the `CNCF Code of Conduct `_. Getting Help -============ +------------ If you have any questions about, feedback for or problems with ``grafanalib``: -- Invite yourself to the `#weave-community `_ slack channel. -- Ask a question on the `#weave-community `_ slack channel. -- Send an email to `weave-users@weave.works `_. -- `File an issue `_. +- Read the documentation at https://grafanalib.readthedocs.io +- Invite yourself to the `Weave Users Slack `_. +- Ask a question on the `#grafanalib `_ slack channel. +- `File an issue `_. Your feedback is always welcome! diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 6051b017..00000000 --- a/circle.yml +++ /dev/null @@ -1,27 +0,0 @@ -machine: - services: - - docker - post: - - pyenv global 2.7.12 3.4.4 3.5.2 3.6.1 - environment: - PATH: $HOME/bin:$PATH - SRCDIR: /home/ubuntu/src/github.com/weaveworks/grafanalib - -dependencies: - override: - - pip install tox flake8 - - make deps - -test: - override: - - "mkdir -p $(dirname $SRCDIR) && cp -r $(pwd)/ $SRCDIR" - - "mkdir -p $CIRCLE_TEST_REPORTS/py.test/" - - cd $SRCDIR; make all - - mv $SRCDIR/junit-*.xml $CIRCLE_TEST_REPORTS/py.test/ - -deployment: - push: - branch: master - commands: - - docker login -e '.' -u "$QUAY_USER" -p "$QUAY_PASSWORD" quay.io - - docker push quay.io/weaveworks/gfdatasource:$(./tools/image-tag) diff --git a/docs/CHANGELOG.rst b/docs/CHANGELOG.rst new file mode 120000 index 00000000..e22698ba --- /dev/null +++ b/docs/CHANGELOG.rst @@ -0,0 +1 @@ +../CHANGELOG.rst \ No newline at end of file diff --git a/docs/CODE_OF_CONDUCT.rst b/docs/CODE_OF_CONDUCT.rst index ecb796f6..528dd2e3 100644 --- a/docs/CODE_OF_CONDUCT.rst +++ b/docs/CODE_OF_CONDUCT.rst @@ -6,7 +6,6 @@ Weaveworks follows the `CNCF Community Code of Conduct v1.0`_. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Weaveworks project maintainer, -or -Alexis Richardson . +or `Alexis Richardson `. .. _`CNCF Community Code of Conduct v1.0`: https://github.com/cncf/foundation/blob/0ce4694e5103c0c24ca90c189da81e5408a46632/code-of-conduct.md diff --git a/docs/CONTRIBUTING.rst b/docs/CONTRIBUTING.rst index 5d53e0a4..78c68c33 100644 --- a/docs/CONTRIBUTING.rst +++ b/docs/CONTRIBUTING.rst @@ -11,7 +11,7 @@ If something comes up during a code review or on a ticket that you think should Code of conduct =============== -We have a `code of conduct`_, and we enforce it. Please take a look! +We have a :doc:`code of conduct <../CODE_OF_CONDUCT>`, and we enforce it. Please take a look! Coding guidelines ================= @@ -33,6 +33,9 @@ Conventions * Local variables are ``snake_cased`` * We're kind of fussy about indentation: 4 spaces everywhere, follow the examples in `core.py`_ if you're uncertain +* Triple Double quotes `"""` for docstrings +* Double quotes "" for human readable message or when string used for interpolation +* Single quotes '' for symbol like strings Testing ------- @@ -42,6 +45,10 @@ Lots of grafanalib is just simple data structures, so we aren't fastidious about However, tests are strongly encouraged for anything with non-trivial logic. Please try to use `hypothesis`_ for your tests. +.. code-block:: console + + $ make all + Gotchas ------- @@ -55,7 +62,7 @@ Submitting a PR * We are very grateful for all PRs, and deeply appreciate the work and effort involved! * We try to review PRs as quickly as possible, but it might take a couple of weeks to get around to reviewing your PR—sorry, we know that sucks -* Please add an entry to the `CHANGELOG`_ in your PR +* Please add an entry to the :doc:`CHANGELOG <../CHANGELOG>` in your PR * It helps a lot if the PR description provides some context on what you are trying to do and why you think it's a good idea * The smaller the PR, the more quickly we'll be able to review it @@ -71,5 +78,4 @@ Filing a bug .. _`CHANGELOG`: ../CHANGELOG.rst .. _`attr.Factory`: http://www.attrs.org/en/stable/api.html#attr.Factory .. _`hypothesis`: http://hypothesis.works/ -.. _`core.py`: ../grafanalib/core.py -.. _`code of conduct`: ./CODE_OF_CONDUCT.rst +.. _`core.py`: https://github.com/weaveworks/grafanalib/blob/main/grafanalib/core.py diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d912a3f6 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= -W +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/api/grafanalib.rst b/docs/api/grafanalib.rst new file mode 100644 index 00000000..7ac152a6 --- /dev/null +++ b/docs/api/grafanalib.rst @@ -0,0 +1,93 @@ +grafanalib package +================== + +Submodules +---------- + +grafanalib.cloudwatch module +---------------------------- + +.. automodule:: grafanalib.cloudwatch + :members: + :undoc-members: + :show-inheritance: + +grafanalib.core module +---------------------- + +.. automodule:: grafanalib.core + :members: + :undoc-members: + :show-inheritance: + +grafanalib.elasticsearch module +------------------------------- + +.. automodule:: grafanalib.elasticsearch + :members: + :undoc-members: + :show-inheritance: + +grafanalib.formatunits module +----------------------------- + +.. automodule:: grafanalib.formatunits + :members: + :undoc-members: + :show-inheritance: + +grafanalib.influxdb module +-------------------------- + +.. automodule:: grafanalib.influxdb + :members: + :undoc-members: + :show-inheritance: + +grafanalib.opentsdb module +-------------------------- + +.. automodule:: grafanalib.opentsdb + :members: + :undoc-members: + :show-inheritance: + +grafanalib.prometheus module +---------------------------- + +.. automodule:: grafanalib.prometheus + :members: + :undoc-members: + :show-inheritance: + +grafanalib.validators module +---------------------------- + +.. automodule:: grafanalib.validators + :members: + :undoc-members: + :show-inheritance: + +grafanalib.weave module +----------------------- + +.. automodule:: grafanalib.weave + :members: + :undoc-members: + :show-inheritance: + +grafanalib.zabbix module +------------------------ + +.. automodule:: grafanalib.zabbix + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: grafanalib + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/modules.rst b/docs/api/modules.rst new file mode 100644 index 00000000..be75b7a3 --- /dev/null +++ b/docs/api/modules.rst @@ -0,0 +1,7 @@ +grafanalib +========== + +.. toctree:: + :maxdepth: 4 + + grafanalib diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..f530f9c8 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,54 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# + +import os +import sys +sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'grafanalib' +copyright = '2021, grafanalib community' +author = 'grafanalib community' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] diff --git a/docs/example.dashboard.py b/docs/example.dashboard.py deleted file mode 100644 index 21421565..00000000 --- a/docs/example.dashboard.py +++ /dev/null @@ -1,79 +0,0 @@ -from grafanalib.core import * - - -dashboard = Dashboard( - title="Frontend Stats", - rows=[ - Row(panels=[ - Graph( - title="Frontend QPS", - dataSource='My Prometheus', - targets=[ - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"1.."}[1m]))', - legendFormat="1xx", - refId='A', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"2.."}[1m]))', - legendFormat="2xx", - refId='B', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"3.."}[1m]))', - legendFormat="3xx", - refId='C', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"4.."}[1m]))', - legendFormat="4xx", - refId='D', - ), - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"5.."}[1m]))', - legendFormat="5xx", - refId='E', - ), - ], - yAxes=[ - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ], - alert=Alert( - name="Too many 500s on Nginx", - message="More than 5 QPS of 500s on Nginx for 5 minutes", - alertConditions=[ - AlertCondition( - Target( - expr='sum(irate(nginx_http_requests_total{job="default/frontend",status=~"5.."}[1m]))', - legendFormat="5xx", - refId='A', - ), - timeRange=TimeRange("5m", "now"), - evaluator=GreaterThan(5), - operator=OP_AND, - reducerType=RTYPE_SUM, - ), - ], - ) - ), - Graph( - title="Frontend latency", - dataSource='My Prometheus', - targets=[ - Target( - expr='histogram_quantile(0.5, sum(irate(nginx_http_request_duration_seconds_bucket{job="default/frontend"}[1m])) by (le))', - legendFormat="0.5 quantile", - refId='A', - ), - Target( - expr='histogram_quantile(0.99, sum(irate(nginx_http_request_duration_seconds_bucket{job="default/frontend"}[1m])) by (le))', - legendFormat="0.99 quantile", - refId='B', - ), - ], - yAxes=single_y_axis(format=SECONDS_FORMAT), - ), - ]), - ], -).auto_panel_ids() diff --git a/docs/getting-started.rst b/docs/getting-started.rst new file mode 100644 index 00000000..08e0dd47 --- /dev/null +++ b/docs/getting-started.rst @@ -0,0 +1,208 @@ +=============================== +Getting Started with grafanalib +=============================== + +Do you like `Grafana `_ but wish you could version your +dashboard configuration? Do you find yourself repeating common patterns? If +so, grafanalib is for you. + +grafanalib lets you generate Grafana dashboards from simple Python scripts. + +Grafana migrates dashboards to the latest Grafana schema version on import, +meaning that dashboards created with grafanalib are supported by +all versions of Grafana. You may find that some of the latest features are +missing from grafanalib, please refer to the `module documentation +`_ for information +about supported features. If you find a missing feature please raise an issue +or submit a PR to the GitHub `repository `_ + +Writing dashboards +================== + +The following will configure a dashboard with a couple of example panels that +use the random walk and Prometheus datasources. + +.. literalinclude:: ../grafanalib/tests/examples/example.dashboard.py + :language: python + +There is a fair bit of repetition here, but once you figure out what works for +your needs, you can factor that out. +See `our Weave-specific customizations +`_ +for inspiration. + +Generating dashboards +===================== + +If you save the above as ``example.dashboard.py`` (the suffix must be +``.dashboard.py``), you can then generate the JSON dashboard with: + +.. code-block:: console + + $ generate-dashboard -o frontend.json example.dashboard.py + +Uploading dashboards from code +=============================== + +Sometimes you may need to generate and upload dashboard directly from Python +code. The following example provides minimal code boilerplate for it: + +.. literalinclude:: ../grafanalib/tests/examples/example.upload-dashboard.py + :language: python + +Alternatively Grafana supports file based provisioning, where dashboard files +are periodically loaded into the Grafana database. Tools like Anisble can +assist with the deployment. + +Writing Alerts +============== + +Between Grafana versions there have been significant changes in how alerts +are managed. Bellow are some example of how to configure alerting in +Grafana v8 and Grafana v9. + +Alerts in Grafana v8 +-------------------- + +The following will configure a couple of alerts inside a group. + +.. literalinclude:: ../grafanalib/tests/examples/example.alertsv8.alertgroup.py + :language: python + +Although this example has a fair amount of boilerplate, when creating large numbers +of similar alerts it can save lots of time to programmatically fill these fields. + +Each ``AlertGroup`` represents a folder within Grafana's alerts tab. This consists +of one or more ``AlertRulev8``, which contains one or more triggers. Triggers define +what will cause the alert to fire. + +A trigger is made up of a ``Target`` (a Grafana query on a datasource) and an +``AlertCondition`` (a condition this query must satisfy in order to alert). + +Finally, there are additional settings like: + +* How the alert will behave when data sources have problems (``noDataAlertState`` and ``errorAlertState``) + +* How frequently the trigger is evaluated (``evaluateInterval``) + +* How long the AlertCondition needs to be met before the alert fires (``evaluateFor``) + +* Annotations and labels, which help provide contextual information and direct where + your alerts will go + +Alerts in Grafana v9 +-------------------- + +The following will configure a couple of alerts inside a group for Grafana v9.x+. + +.. literalinclude:: ../grafanalib/tests/examples/example.alertsv9.alertgroup.py + :language: python + +Although this example has a fair amount of boilerplate, when creating large numbers +of similar alerts it can save lots of time to programmatically fill these fields. + +Each ``AlertGroup`` represents a folder within Grafana's alerts tab. This consists +of one or more ``AlertRulev9``, which contains a list of triggers, that define what +will cause the alert to fire. + +A trigger can either be a ``Target`` (a Grafana query on a datasource) or an +``AlertExpression`` (a expression performed on one of the triggers). + +An ``AlertExpression`` can be one of 4 types + +* Classic - Contains and list of ``AlertCondition``'s that are evaluated +* Reduce - Reduce the queried data +* Resample - Resample the queried data +* Math - Expression with the condition for the rule + +Finally, there are additional settings like: + +* How the alert will behave when data sources have problems (``noDataAlertState`` and ``errorAlertState``) + +* How frequently the each rule in the Alert Group is evaluated (``evaluateInterval``) + +* How long the AlertCondition needs to be met before the alert fires (``evaluateFor``) + +* Annotations and labels, which help provide contextual information and direct where + your alerts will go + + +Generating Alerts +================= + +If you save either of the above examples for Grafana v8 or v9 as ``example.alertgroup.py`` +(the suffix must be ``.alertgroup.py``), you can then generate the JSON alert with: + +.. code-block:: console + + $ generate-alertgroup -o alerts.json example.alertgroup.py + +Uploading alerts from code +========================== + +As Grafana does not currently have a user interface for importing alertgroup JSON, +you must either upload the alerts via Grafana's REST API or use file based provisioning. + +Uploading alerts from code using REST API +----------------------------------------- + +The following example provides minimal code boilerplate for it: + +.. literalinclude:: ../grafanalib/tests/examples/example.upload-alerts.py + :language: python + +Uploading alerts from code using File Based Provisioning +-------------------------------------------------------- + +The alternative to using Grafana's REST API is to use its file based provisioning for +alerting. + +The following example uses the ``AlertFileBasedProvisioning`` class to provision a list +of alert groups: + +.. literalinclude:: ../grafanalib/tests/examples/example.alertsv9.alertfilebasedprovisioning.py + :language: python + +Save the above example as ``example.alertfilebasedprovisioning.py`` +(the suffix must be ``.alertfilebasedprovisioning.py``), you can then generate the JSON alert with: + +.. code-block:: console + + $ generate-alertgroup -o alerts.json example.alertfilebasedprovisioning.py + +Then place the file in the ``provisioning/alerting`` directory and start Grafana +Tools like Anisble can assist with the deployment of the alert file. + +Installation +============ + +grafanalib is just a Python package, so: + +.. code-block:: console + + $ pip install grafanalib + +Support +======= + +This library is in its very early stages. We'll probably make changes that +break backwards compatibility, although we'll try hard not to. + +grafanalib works with Python 3.7, 3.8, 3.9, 3.10 and 3.11. + +Developing +========== +If you're working on the project, and need to build from source, it's done as follows: + +.. code-block:: console + + $ virtualenv .env + $ . ./.env/bin/activate + $ pip install -e . + +Configuring Grafana Datasources +=============================== + +This repo used to contain a program ``gfdatasource`` for configuring +Grafana data sources, but it has been retired since Grafana now has a +built-in way to do it. See https://grafana.com/docs/administration/provisioning/#datasources diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..81e6c49c --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,30 @@ +.. grafanalib documentation master file, created by + sphinx-quickstart on Mon Feb 17 14:29:44 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to grafanalib's documentation! +====================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + getting-started + + api/grafanalib + api/modules + + CONTRIBUTING + CODE_OF_CONDUCT + releasing + + CHANGELOG + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/releasing.rst b/docs/releasing.rst index 6298f168..21af9783 100644 --- a/docs/releasing.rst +++ b/docs/releasing.rst @@ -2,14 +2,39 @@ Release process =============== +Pre-release +----------- + * Pick a new version number (e.g. ``X.Y.Z``) -* Update `CHANGELOG <../CHANGELOG.rst>`_ with that number -* Update `setup.py <../setup.py>`_ with that number -* Tag the repo with ``vX.Y.Z`` -* Upload to PyPI: +* Update `CHANGELOG `_ with that number +* Update `setup.py `_ with that number + +Smoke-testing +------------- + +* Run + + .. code-block:: console + + $ python setup.py install --user + +* Check ``~/.local/bin/generate-dashboard`` for the update version. +* Try the example on `README `_. + +Releasing +--------- + +* Head to ``_ and create the release there. +* Wait for GitHub Actions to complete the build and release. +* Confirm on ``_ that the release made it there. + +Follow-up +--------- + +* Run .. code-block:: console - $ rm -rf dist - $ python setup.py sdist bdist_wheel - $ twine upload dist/* + $ pip intall grafanalib -U + +* Check if the upgrade worked and the test above still passes. diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..a3f75dea --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +sphinx == 6.1.3 +sphinx_rtd_theme == 1.2.2 \ No newline at end of file diff --git a/grafanalib/_gen.py b/grafanalib/_gen.py index fdfff743..4094edf1 100644 --- a/grafanalib/_gen.py +++ b/grafanalib/_gen.py @@ -1,33 +1,17 @@ """Generate JSON Grafana dashboards.""" import argparse -import imp import json import os import sys DASHBOARD_SUFFIX = '.dashboard.py' +ALERTGROUP_SUFFIX = '.alertgroup.py' - -class DashboardError(Exception): - """Raised when there is something wrong with a dashboard.""" - - -def load_dashboard(path): - """Load a ``Dashboard`` from a Python definition. - - :param str path: Path to a *.dashboard.py file that defines a variable, - ``dashboard``. - :return: A ``Dashboard`` - """ - module = imp.load_source("dashboard", path) - marker = object() - dashboard = getattr(module, 'dashboard', marker) - if dashboard is marker: - raise DashboardError( - "Dashboard definition {} does not define 'dashboard'".format(path)) - return dashboard +""" +Common generation functionality +""" class DashboardEncoder(json.JSONEncoder): @@ -40,6 +24,14 @@ def default(self, obj): return json.JSONEncoder.default(self, obj) +class DashboardError(Exception): + """Raised when there is something wrong with a dashboard.""" + + +class AlertGroupError(Exception): + """Raised when there is something wrong with an alertgroup.""" + + def write_dashboard(dashboard, stream): json.dump( dashboard.to_json_data(), stream, sort_keys=True, indent=2, @@ -47,18 +39,146 @@ def write_dashboard(dashboard, stream): stream.write('\n') +write_alertgroup = write_dashboard + + +class DefinitionError(Exception): + """Raised when there is a problem loading a Grafanalib type from a python definition.""" + + +def loader(path): + """Load a grafanalib type from a Python definition. + + :param str path: Path to a *..py file that defines a variable called . + """ + gtype = path.split(".")[-2] + + if sys.version_info[0] == 3 and sys.version_info[1] >= 5: + import importlib.util + spec = importlib.util.spec_from_file_location(gtype, path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + elif sys.version_info[0] == 3 and (sys.version_info[1] >= 3 or sys.version_info[1] <= 4): + from importlib.machinery import SourceFileLoader + module = SourceFileLoader(gtype, path).load_module() + elif sys.version_info[0] == 2: + import imp + module = imp.load_source(gtype, path) + else: + import importlib + module = importlib.load_source(gtype, path) + + marker = object() + grafanalibtype = getattr(module, gtype, marker) + if grafanalibtype is marker: + raise DefinitionError( + "Definition {} does not define a variable '{}'".format(path, gtype)) + return grafanalibtype + + +def run_script(f): + sys.exit(f(sys.argv[1:])) + + +""" +AlertGroup generation +""" + + +def print_alertgroup(dashboard): + write_dashboard(dashboard, stream=sys.stdout) + + +def write_alertgroups(paths): + for path in paths: + assert path.endswith(ALERTGROUP_SUFFIX) + dashboard = loader(path) + with open(get_alertgroup_json_path(path), 'w') as json_file: + write_dashboard(dashboard, json_file) + + +def get_alertgroup_json_path(path): + assert path.endswith(ALERTGROUP_SUFFIX) + return '{}.json'.format(path[:-len(ALERTGROUP_SUFFIX)]) + + +def alertgroup_path(path): + abspath = os.path.abspath(path) + if not abspath.endswith(ALERTGROUP_SUFFIX): + raise argparse.ArgumentTypeError( + 'AlertGroup file {} does not end with {}'.format( + path, ALERTGROUP_SUFFIX)) + return abspath + + +def generate_alertgroups(args): + """Script for generating multiple alertgroups at a time""" + parser = argparse.ArgumentParser(prog='generate-alertgroups') + parser.add_argument( + 'alertgroups', metavar='ALERT', type=os.path.abspath, + nargs='+', help='Path to alertgroup definition', + ) + opts = parser.parse_args(args) + try: + write_alertgroups(opts.alertgroups) + except AlertGroupError as e: + sys.stderr.write('ERROR: {}\n'.format(e)) + return 1 + return 0 + + +def generate_alertgroup(args): + parser = argparse.ArgumentParser(prog='generate-alertgroup') + parser.add_argument( + '--output', '-o', type=os.path.abspath, + help='Where to write the alertgroup JSON' + ) + parser.add_argument( + 'alertgroup', metavar='ALERT', type=os.path.abspath, + help='Path to alertgroup definition', + ) + opts = parser.parse_args(args) + try: + alertgroup = loader(opts.alertgroup) + if not opts.output: + print_alertgroup(alertgroup) + else: + with open(opts.output, 'w') as output: + write_alertgroup(alertgroup, output) + except AlertGroupError as e: + sys.stderr.write('ERROR: {}\n'.format(e)) + return 1 + return 0 + + +def generate_alertgroups_script(): + """Entry point for generate-alertgroups.""" + run_script(generate_alertgroups) + + +def generate_alertgroup_script(): + """Entry point for generate-alertgroup.""" + run_script(generate_alertgroup) + + +""" +Dashboard generation +""" + + def print_dashboard(dashboard): write_dashboard(dashboard, stream=sys.stdout) def write_dashboards(paths): for path in paths: - dashboard = load_dashboard(path) - with open(get_json_path(path), 'w') as json_file: + assert path.endswith(DASHBOARD_SUFFIX) + dashboard = loader(path) + with open(get_dashboard_json_path(path), 'w') as json_file: write_dashboard(dashboard, json_file) -def get_json_path(path): +def get_dashboard_json_path(path): assert path.endswith(DASHBOARD_SUFFIX) return '{}.json'.format(path[:-len(DASHBOARD_SUFFIX)]) @@ -100,7 +220,7 @@ def generate_dashboard(args): ) opts = parser.parse_args(args) try: - dashboard = load_dashboard(opts.dashboard) + dashboard = loader(opts.dashboard) if not opts.output: print_dashboard(dashboard) else: @@ -112,15 +232,11 @@ def generate_dashboard(args): return 0 -def run_script(f): - sys.exit(f(sys.argv[1:])) - - def generate_dashboards_script(): """Entry point for generate-dashboards.""" run_script(generate_dashboards) def generate_dashboard_script(): - """Entry point for generate-dasboard.""" + """Entry point for generate-dashboard.""" run_script(generate_dashboard) diff --git a/grafanalib/azuremonitor.py b/grafanalib/azuremonitor.py new file mode 100644 index 00000000..7cb324e8 --- /dev/null +++ b/grafanalib/azuremonitor.py @@ -0,0 +1,120 @@ +"""Helpers to create Azure Monitor specific Grafana queries.""" + +import attr +from attr.validators import instance_of + + +@attr.s +class AzureMonitorMetricsTarget(object): + """ + Generates Azure Monitor Metrics target JSON structure. + + Grafana docs on using Azure Monitor: + https://grafana.com/docs/grafana/latest/datasources/azuremonitor/#querying-azure-monitor-metrics + + :param aggregation: Metrics Aggregation (Total, None, Minimum, Maximum, Average, Count) + :param dimensionFilters: Dimension Filters + :param metricsDefinition: Metrics Definition https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported + :param metricNamespace: Metrics Namespace + :param resourceGroup: Resource Group the resource resides in + :param timeGrain: Time Granularity + :param queryType: Type of Query (Azure Monitor in this case) + :param subscription: Azure Subscription ID to scope to + :param refId: Reference ID for the target + """ + + aggregation = attr.ib(default="Total") + dimensionFilters = attr.ib(factory=list, validator=instance_of(list)) + metricDefinition = attr.ib(default="") + metricName = attr.ib(default="") + metricNamespace = attr.ib(default="") + resourceGroup = attr.ib(default="") + resourceName = attr.ib(default="") + timeGrain = attr.ib(default="auto") + queryType = attr.ib(default="Azure Monitor") + subscription = attr.ib(default="") + refId = attr.ib(default="") + alias = attr.ib(default="") + + def to_json_data(self): + return { + "azureMonitor": { + "aggregation": self.aggregation, + "alias": self.alias, + "dimensionFilters": self.dimensionFilters, + "metricDefinition": self.metricDefinition, + "metricName": self.metricName, + "metricNamespace": self.metricNamespace, + "resourceGroup": self.resourceGroup, + "resourceName": self.resourceName, + "timeGrain": self.timeGrain, + }, + "queryType": self.queryType, + "refId": self.refId, + "subscription": self.subscription, + } + + +@attr.s +class AzureLogsTarget(object): + """ + Generates Azure Monitor Logs target JSON structure. + + Grafana docs on using Azure Logs: + https://grafana.com/docs/grafana/latest/datasources/azuremonitor/#querying-azure-monitor-logs + + :param query: Query to execute + :param resource: Identification string for resource e.g. /subscriptions/1234-abcd/resourceGroups/myResourceGroup/providers/Microsoft.DataFactory/factories/myDataFactory + :param resultFormat: Output Format of the logs + :param queryType: Type of Query (Azure Log Analytics in this case) + :param subscription: Azure Subscription ID to scope to + :param refId: Reference ID for the target + """ + + query = attr.ib(default="") + resource = attr.ib(default="") + resultFormat = attr.ib(default="table") + queryType = attr.ib(default="Azure Log Analytics") + subscription = attr.ib(default="") + refId = attr.ib(default="") + + def to_json_data(self): + return { + "azureLogAnalytics": { + "query": self.query, + "resource": self.resource, + "resultFormat": self.resultFormat, + }, + "queryType": self.queryType, + "refId": self.refId, + "subscription": self.subscription, + } + + +@attr.s +class AzureResourceGraphTarget(object): + """ + Generates Azure Resource Graph target JSON structure. + + Grafana docs on using Azure Resource Graph: + https://grafana.com/docs/grafana/latest/datasources/azuremonitor/#querying-azure-resource-graph + + :param query: Query to execute + :param queryType: Type of Query (Azure Resource Graph in this case) + :param subscription: Azure Subscription ID to scope to + :param refId: Reference ID for the target + """ + + query = attr.ib(default="") + resource = attr.ib(default="") + queryType = attr.ib(default="Azure Resource Graph") + subscription = attr.ib(default="") + refId = attr.ib(default="") + + def to_json_data(self): + return { + "azureResourceGraph": {"query": self.query}, + "queryType": self.queryType, + "refId": self.refId, + "subscription": self.subscription, + } diff --git a/grafanalib/cloudwatch.py b/grafanalib/cloudwatch.py new file mode 100644 index 00000000..a9a22248 --- /dev/null +++ b/grafanalib/cloudwatch.py @@ -0,0 +1,114 @@ +"""Helpers to create Cloudwatch-specific Grafana queries.""" + +import attr + +from attr.validators import instance_of +from grafanalib.core import Target + + +@attr.s +class CloudwatchMetricsTarget(Target): + """ + Generates Cloudwatch target JSON structure. + + Grafana docs on using Cloudwatch: + https://grafana.com/docs/grafana/latest/datasources/cloudwatch/ + + AWS docs on Cloudwatch metrics: + https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html + + :param alias: legend alias + :param dimensions: Cloudwatch dimensions dict + :param expression: Cloudwatch Metric math expressions + :param id: unique id + :param matchExact: Only show metrics that exactly match all defined dimension names. + :param metricName: Cloudwatch metric name + :param namespace: Cloudwatch namespace + :param period: Cloudwatch data period + :param refId: target reference id + :param region: Cloudwatch region + :param statistics: Cloudwatch mathematic statistics (to be deprecated, prefer `statistic` instead) + :param statistic: Cloudwatch mathematic statistic + :param hide: controls if given metric is displayed on visualization + :param datasource: Grafana datasource name + """ + alias = attr.ib(default="") + dimensions = attr.ib(factory=dict, validator=instance_of(dict)) + expression = attr.ib(default="") + id = attr.ib(default="") + matchExact = attr.ib(default=True, validator=instance_of(bool)) + metricName = attr.ib(default="") + namespace = attr.ib(default="") + period = attr.ib(default="") + refId = attr.ib(default="") + region = attr.ib(default="default") + statistics = attr.ib(default=["Average"], validator=instance_of(list)) + statistic = attr.ib(default="Average") + hide = attr.ib(default=False, validator=instance_of(bool)) + datasource = attr.ib(default=None) + + def to_json_data(self): + + return { + "alias": self.alias, + "dimensions": self.dimensions, + "expression": self.expression, + "id": self.id, + "matchExact": self.matchExact, + "metricName": self.metricName, + "namespace": self.namespace, + "period": self.period, + "refId": self.refId, + "region": self.region, + "statistics": self.statistics, + "statistic": self.statistic, + "hide": self.hide, + "datasource": self.datasource, + } + + +@attr.s +class CloudwatchLogsInsightsTarget(Target): + """ + Generates Cloudwatch Logs Insights target JSON structure. + + Grafana docs on using Cloudwatch: + https://grafana.com/docs/grafana/latest/datasources/cloudwatch/ + + AWS docs on Cloudwatch Logs Insights: + https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html + + :param expression: Cloudwatch Logs Insights expressions + :param id: unique id + :param logGroupNames: List of Cloudwatch log groups to query + :param namespace: Cloudwatch namespace + :param refId: target reference id + :param region: Cloudwatch region + :param statsGroups: Cloudwatch statsGroups + :param hide: controls if given metric is displayed on visualization + :param datasource: Grafana datasource name + """ + expression = attr.ib(default="") + id = attr.ib(default="") + logGroupNames = attr.ib(factory=list, validator=instance_of(list)) + namespace = attr.ib(default="") + refId = attr.ib(default="") + region = attr.ib(default="default") + statsGroups = attr.ib(factory=list, validator=instance_of(list)) + hide = attr.ib(default=False, validator=instance_of(bool)) + datasource = attr.ib(default=None) + + def to_json_data(self): + + return { + "expression": self.expression, + "id": self.id, + "logGroupNames": self.logGroupNames, + "namespace": self.namespace, + "queryMode": "Logs", + "refId": self.refId, + "region": self.region, + "statsGroups": self.statsGroups, + "hide": self.hide, + "datasource": self.datasource, + } diff --git a/grafanalib/core.py b/grafanalib/core.py index c6a455bd..e14f0a82 100644 --- a/grafanalib/core.py +++ b/grafanalib/core.py @@ -1,3 +1,4 @@ + """Low-level functions for building Grafana dashboards. The functions in this module don't enforce Weaveworks policy, and only mildly @@ -5,13 +6,14 @@ arbitrary Grafana JSON. """ -import attr -from attr.validators import instance_of, in_ import itertools import math -from numbers import Number import string import warnings +from numbers import Number + +import attr +from attr.validators import in_, instance_of @attr.s @@ -40,7 +42,7 @@ class Pixels(object): num = attr.ib(validator=instance_of(int)) def to_json_data(self): - return '{}px'.format(self.num) + return "{}px".format(self.num) @attr.s @@ -48,7 +50,7 @@ class Percent(object): num = attr.ib(default=100, validator=instance_of(Number)) def to_json_data(self): - return '{}%'.format(self.num) + return "{}%".format(self.num) GREY1 = RGBA(216, 200, 27, 0.27) @@ -59,6 +61,7 @@ def to_json_data(self): ORANGE = RGBA(237, 129, 40, 0.89) RED = RGBA(245, 54, 54, 0.9) BLANK = RGBA(0, 0, 0, 0.0) +WHITE = RGB(255, 255, 255) INDIVIDUAL = 'individual' CUMULATIVE = 'cumulative' @@ -71,15 +74,37 @@ def to_json_data(self): ABSOLUTE_TYPE = 'absolute' DASHBOARD_TYPE = 'dashboard' +ROW_TYPE = 'row' GRAPH_TYPE = 'graph' +DISCRETE_TYPE = 'natel-discrete-panel' +EPICT_TYPE = 'larona-epict-panel' +STAT_TYPE = 'stat' SINGLESTAT_TYPE = 'singlestat' +STATE_TIMELINE_TYPE = 'state-timeline' TABLE_TYPE = 'table' TEXT_TYPE = 'text' -ALERTLIST_TYPE = "alertlist" +ALERTLIST_TYPE = 'alertlist' +BARGAUGE_TYPE = 'bargauge' +GAUGE_TYPE = 'gauge' +DASHBOARDLIST_TYPE = 'dashlist' +LOGS_TYPE = 'logs' +HEATMAP_TYPE = 'heatmap' +STATUSMAP_TYPE = 'flant-statusmap-panel' +SVG_TYPE = 'marcuscalidus-svg-panel' +PIE_CHART_TYPE = 'grafana-piechart-panel' +PIE_CHART_V2_TYPE = 'piechart' +TIMESERIES_TYPE = 'timeseries' +WORLD_MAP_TYPE = 'grafana-worldmap-panel' +NEWS_TYPE = 'news' +HISTOGRAM_TYPE = 'histogram' +AE3E_PLOTLY_TYPE = 'ae3e-plotly-panel' +BAR_CHART_TYPE = 'barchart' AJAX_TYPE = 'ryantxu-ajax-panel' DEFAULT_FILL = 1 DEFAULT_REFRESH = '1m' +DEFAULT_ALERT_EVALUATE_INTERVAL = '1m' +DEFAULT_ALERT_EVALUATE_FOR = '5m' DEFAULT_ROW_HEIGHT = Pixels(250) DEFAULT_LINE_WIDTH = 2 DEFAULT_POINT_RADIUS = 5 @@ -95,131 +120,137 @@ def to_json_data(self): SCHEMA_VERSION = 12 -# Y Axis formats -# more here: https://github.com/grafana/grafana/blob/master/packages/grafana-ui/src/utils/valueFormats/categories.ts # noqa: E501 - -# MISC -NO_FORMAT = "none" -SHORT_FORMAT = "short" -PERCENT_FORMAT = "percent" # 1%, 80%, etc -PERCENT_UNIT_FORMAT = "percentunit" # 0.01, 0.8 - -# Date -DATE_TIME_ISO_FORMAT = "dateTimeAsIso" # YYYY-MM-DD HH:mm:ss -DATE_TIME_US_FORMAT = "dateTimeAsUS" # DD/MM/YYYY h:mm:ss a -DATE_TIME_NOW = "dateTimeFromNow" - -# Time -HERTZ_FORMAT = "hertz" -NANOSECONDS_FORMAT = "ns" -MICROSECONDS_FORMAT = "µs" # on a Mac: opt + m -MILLISECONDS_FORMAT = "ms" -SECONDS_FORMAT = "s" -MINUTES_FORMAT = "m" -HOURS_FORMAT = "h" -DAYS_FORMAT = "d" -DURATION_MS_FORMAT = "dtdurationms" # duration in milliseconds -DURATION_FORMAT = "dtdurations" # duration in seconds -TIMETICKS_FORMAT = "timeticks" -CLOCK_MS_FORMAT = "clockms" -CLOCK_S_FORMAT = "clocks" - -# Throughput -OPS_FORMAT = "ops" # ops per second -REQ_PER_SEC_FORMAT = "reqps" -READS_PER_SEC_FORMAT = "rps" -WRITES_PER_SEC_FORMAT = "wps" -IOPS_FORMAT = "iops" -OPM_FORMAT = "opm" -READS_PER_MIN = "rpm" -WRITES_PER_MIN = "wpm" - - -# Data -BITS_FORMAT = "bits" -BYTES_FORMAT = "bytes" -KIBIBYTES_FORMAT = "kbytes" # 2^10 -MEBIBYTES_FORMAT = "mbytes" # 2^20 -GIBIBYTES_FORMAT = "gbytes" # 2^30 - -KILOBYTES_FORMAT = "deckbytes" # 10^3 -MEGABYTES_FORMAT = "decmbytes" # 10^6 -GIGABYTES_FORMAT = "decgbytes" # 10^9 - -# Data rate +# (DEPRECATED: use formatunits.py) Y Axis formats +DURATION_FORMAT = 'dtdurations' +NO_FORMAT = 'none' +OPS_FORMAT = 'ops' +PERCENT_UNIT_FORMAT = 'percentunit' +DAYS_FORMAT = 'd' +HOURS_FORMAT = 'h' +MINUTES_FORMAT = 'm' +SECONDS_FORMAT = 's' +MILLISECONDS_FORMAT = 'ms' +SHORT_FORMAT = 'short' +BYTES_FORMAT = 'bytes' +BITS_PER_SEC_FORMAT = 'bps' +BYTES_PER_SEC_FORMAT = 'Bps' +NONE_FORMAT = 'none' +JOULE_FORMAT = 'joule' +WATTHOUR_FORMAT = 'watth' +WATT_FORMAT = 'watt' +KWATT_FORMAT = 'kwatt' +KWATTHOUR_FORMAT = 'kwatth' +VOLT_FORMAT = 'volt' +BAR_FORMAT = 'pressurebar' +PSI_FORMAT = 'pressurepsi' +CELSIUS_FORMAT = 'celsius' +KELVIN_FORMAT = 'kelvin' +GRAM_FORMAT = 'massg' +EUR_FORMAT = 'currencyEUR' +USD_FORMAT = 'currencyUSD' +METER_FORMAT = 'lengthm' +SQUARE_METER_FORMAT = 'areaM2' +CUBIC_METER_FORMAT = 'm3' +LITRE_FORMAT = 'litre' +PERCENT_FORMAT = 'percent' +VOLT_AMPERE_FORMAT = 'voltamp' PACKETS_PER_SEC_FORMAT = "pps" -BITS_PER_SEC_FORMAT = "bps" -BYTES_PER_SEC_FORMAT = "Bps" -KILOBYTES_PER_SEC_FORMAT = "KBs" -KILOBITS_PER_SEC_FORMAT = "Kbits" -MEGABYTES_PER_SEC_FORMAT = "MBs" -MEGABITES_PER_SEC_FORMAT = "Mbits" -GIGABYTES_PER_SEC_FORMAT = "GBs" -GIGABITS_PER_SEC_FORMAT = "Gbits" # Alert rule state -STATE_NO_DATA = "no_data" -STATE_ALERTING = "alerting" -STATE_KEEP_LAST_STATE = "keep_state" +STATE_NO_DATA = 'no_data' +STATE_ALERTING = 'alerting' +STATE_KEEP_LAST_STATE = 'keep_state' +STATE_OK = 'ok' # Evaluator -EVAL_GT = "gt" -EVAL_LT = "lt" -EVAL_WITHIN_RANGE = "within_range" -EVAL_OUTSIDE_RANGE = "outside_range" -EVAL_NO_VALUE = "no_value" - -# Reducer Type avg/min/max/sum/count/last/median -RTYPE_AVG = "avg" -RTYPE_MIN = "min" -RTYPE_MAX = "max" -RTYPE_SUM = "sum" -RTYPE_COUNT = "count" -RTYPE_LAST = "last" -RTYPE_MEDIAN = "median" +EVAL_GT = 'gt' +EVAL_LT = 'lt' +EVAL_WITHIN_RANGE = 'within_range' +EVAL_OUTSIDE_RANGE = 'outside_range' +EVAL_NO_VALUE = 'no_value' + +# Reducer Type +# avg/min/max/sum/count/last/median/diff/percent_diff/count_non_null +RTYPE_AVG = 'avg' +RTYPE_MIN = 'min' +RTYPE_MAX = 'max' +RTYPE_SUM = 'sum' +RTYPE_COUNT = 'count' +RTYPE_LAST = 'last' +RTYPE_MEDIAN = 'median' +RTYPE_DIFF = 'diff' +RTYPE_PERCENT_DIFF = 'percent_diff' +RTYPE_COUNT_NON_NULL = 'count_non_null' # Condition Type -CTYPE_QUERY = "query" +CTYPE_QUERY = 'query' # Operator -OP_AND = "and" -OP_OR = "or" +OP_AND = 'and' +OP_OR = 'or' + +# Alert Expression Types +# classic/reduce/resample/math +EXP_TYPE_CLASSIC = 'classic_conditions' +EXP_TYPE_REDUCE = 'reduce' +EXP_TYPE_RESAMPLE = 'resample' +EXP_TYPE_MATH = 'math' + +# Alert Expression Reducer Function +EXP_REDUCER_FUNC_MIN = 'min' +EXP_REDUCER_FUNC_MAX = 'max' +EXP_REDUCER_FUNC_MEAN = 'mean' +EXP_REDUCER_FUNC_SUM = 'sum' +EXP_REDUCER_FUNC_COUNT = 'count' +EXP_REDUCER_FUNC_LAST = 'last' + +# Alert Expression Reducer Mode +EXP_REDUCER_MODE_STRICT = 'strict' +EXP_REDUCER_FUNC_DROP_NN = 'dropNN' +EXP_REDUCER_FUNC_REPLACE_NN = 'replaceNN' # Text panel modes -TEXT_MODE_MARKDOWN = "markdown" -TEXT_MODE_HTML = "html" -TEXT_MODE_TEXT = "text" +TEXT_MODE_MARKDOWN = 'markdown' +TEXT_MODE_HTML = 'html' +TEXT_MODE_TEXT = 'text' # Datasource plugins -PLUGIN_ID_GRAPHITE = "graphite" -PLUGIN_ID_PROMETHEUS = "prometheus" -PLUGIN_ID_INFLUXDB = "influxdb" -PLUGIN_ID_OPENTSDB = "opentsdb" -PLUGIN_ID_ELASTICSEARCH = "elasticsearch" -PLUGIN_ID_CLOUDWATCH = "cloudwatch" +PLUGIN_ID_GRAPHITE = 'graphite' +PLUGIN_ID_PROMETHEUS = 'prometheus' +PLUGIN_ID_INFLUXDB = 'influxdb' +PLUGIN_ID_OPENTSDB = 'opentsdb' +PLUGIN_ID_ELASTICSEARCH = 'elasticsearch' +PLUGIN_ID_CLOUDWATCH = 'cloudwatch' # Target formats -TIME_SERIES_TARGET_FORMAT = "time_series" -TABLE_TARGET_FORMAT = "table" +TIME_SERIES_TARGET_FORMAT = 'time_series' +TABLE_TARGET_FORMAT = 'table' # Table Transforms -AGGREGATIONS_TRANSFORM = "timeseries_aggregations" -ANNOTATIONS_TRANSFORM = "annotations" -COLUMNS_TRANSFORM = "timeseries_to_columns" -JSON_TRANSFORM = "json" -ROWS_TRANSFORM = "timeseries_to_rows" -TABLE_TRANSFORM = "table" +AGGREGATIONS_TRANSFORM = 'timeseries_aggregations' +ANNOTATIONS_TRANSFORM = 'annotations' +COLUMNS_TRANSFORM = 'timeseries_to_columns' +JSON_TRANSFORM = 'json' +ROWS_TRANSFORM = 'timeseries_to_rows' +TABLE_TRANSFORM = 'table' # AlertList show selections -ALERTLIST_SHOW_CURRENT = "current" -ALERTLIST_SHOW_CHANGES = "changes" +ALERTLIST_SHOW_CURRENT = 'current' +ALERTLIST_SHOW_CHANGES = 'changes' # AlertList state filter options -ALERTLIST_STATE_OK = "ok" -ALERTLIST_STATE_PAUSED = "paused" -ALERTLIST_STATE_NO_DATA = "no_data" -ALERTLIST_STATE_EXECUTION_ERROR = "execution_error" -ALERTLIST_STATE_ALERTING = "alerting" +ALERTLIST_STATE_OK = 'ok' +ALERTLIST_STATE_PAUSED = 'paused' +ALERTLIST_STATE_NO_DATA = 'no_data' +ALERTLIST_STATE_EXECUTION_ERROR = 'execution_error' +ALERTLIST_STATE_ALERTING = 'alerting' +ALERTLIST_STATE_PENDING = 'pending' + +# Alert Rule state filter options (Grafana 8.x) +ALERTRULE_STATE_DATA_OK = 'OK' +ALERTRULE_STATE_DATA_NODATA = 'No Data' +ALERTRULE_STATE_DATA_ALERTING = 'Alerting' +ALERTRULE_STATE_DATA_ERROR = 'Error' # Display Sort Order SORT_ASC = 1 @@ -233,6 +264,42 @@ def to_json_data(self): SHOW = 0 HIDE_LABEL = 1 HIDE_VARIABLE = 2 +SORT_DISABLED = 0 +SORT_ALPHA_ASC = 1 +SORT_ALPHA_DESC = 2 +SORT_NUMERIC_ASC = 3 +SORT_NUMERIC_DESC = 4 +SORT_ALPHA_IGNORE_CASE_ASC = 5 +SORT_ALPHA_IGNORE_CASE_DESC = 6 + +GAUGE_CALC_LAST = 'last' +GAUGE_CALC_FIRST = 'first' +GAUGE_CALC_MIN = 'min' +GAUGE_CALC_MAX = 'max' +GAUGE_CALC_MEAN = 'mean' +GAUGE_CALC_TOTAL = 'total' +GAUGE_CALC_COUNT = 'count' +GAUGE_CALC_RANGE = 'range' +GAUGE_CALC_DELTA = 'delta' +GAUGE_CALC_STEP = 'step' +GAUGE_CALC_DIFFERENCE = 'difference' +GAUGE_CALC_LOGMIN = 'logmin' +GAUGE_CALC_CHANGE_COUNT = 'changeCount' +GAUGE_CALC_DISTINCT_COUNT = 'distinctCount' + +ORIENTATION_HORIZONTAL = 'horizontal' +ORIENTATION_VERTICAL = 'vertical' + +GAUGE_DISPLAY_MODE_BASIC = 'basic' +GAUGE_DISPLAY_MODE_LCD = 'lcd' +GAUGE_DISPLAY_MODE_GRADIENT = 'gradient' + +GRAPH_TOOLTIP_MODE_NOT_SHARED = 0 +GRAPH_TOOLTIP_MODE_SHARED_CROSSHAIR = 1 +GRAPH_TOOLTIP_MODE_SHARED_TOOLTIP = 2 # Shared crosshair AND tooltip + +DEFAULT_AUTO_COUNT = 30 +DEFAULT_MIN_AUTO_INTERVAL = '10s' @attr.s @@ -251,23 +318,153 @@ def to_json_data(self): MAPPING_TYPE_VALUE_TO_TEXT = 1 MAPPING_TYPE_RANGE_TO_TEXT = 2 -MAPPING_VALUE_TO_TEXT = Mapping("value to text", MAPPING_TYPE_VALUE_TO_TEXT) -MAPPING_RANGE_TO_TEXT = Mapping("range to text", MAPPING_TYPE_RANGE_TO_TEXT) +MAPPING_VALUE_TO_TEXT = Mapping('value to text', MAPPING_TYPE_VALUE_TO_TEXT) +MAPPING_RANGE_TO_TEXT = Mapping('range to text', MAPPING_TYPE_RANGE_TO_TEXT) # Value types min/max/avg/current/total/name/first/delta/range -VTYPE_MIN = "min" -VTYPE_MAX = "max" -VTYPE_AVG = "avg" -VTYPE_CURR = "current" -VTYPE_TOTAL = "total" -VTYPE_NAME = "name" -VTYPE_FIRST = "first" -VTYPE_DELTA = "delta" -VTYPE_RANGE = "range" +VTYPE_MIN = 'min' +VTYPE_MAX = 'max' +VTYPE_AVG = 'avg' +VTYPE_CURR = 'current' +VTYPE_TOTAL = 'total' +VTYPE_NAME = 'name' +VTYPE_FIRST = 'first' +VTYPE_DELTA = 'delta' +VTYPE_RANGE = 'range' VTYPE_DEFAULT = VTYPE_AVG +@attr.s +class ePictBox(object): + """ + ePict Box. + + :param angle: Rotation angle of box + :param backgroundColor: Dito + :param blinkHigh: Blink if below threshold + :param blinkLow: Blink if above threshold + :param color: Text color + :param colorHigh: High value color + :param colorLow: Low value color + :param colorMedium: In between value color + :param colorSymbol: Whether to enable background color for symbol + :param customSymbol: URL to custom symbol (will set symbol to "custom" if set) + :param decimal: Number of decimals + :param fontSize: Dito + :param hasBackground: Whether to enable background color for text + :param hasOrb: Whether an orb should be displayed + :param hasSymbol: Whether a (custom) symbol should be displayed + :param isUsingThresholds: Whether to enable thresholds. + :param orbHideText: Whether to hide text next to orb + :param orbLocation: Orb location (choose from 'Left', 'Right', 'Top' or 'Bottom') + :param orbSize: Dito + :param prefix: Value prefix to be displayed (e.g. °C) + :param prefixSize: Dito + :param selected: Dont know + :param serie: Which series to use data from + :param suffix: Value suffix to be displayed + :param suffixSize: Dito + :param symbol: Automatically placed by the plugin format: `data:image/svg+xml;base64,`, check manually. + :param symbolDefHeight: Dont know + :param symbolDefWidth: Dont know + :param symbolHeight: Dito + :param symbolHideText: Whether to hide value text next to symbol + :param symbolWidth: Dito + :param text: Dont know + :param thresholds: Coloring thresholds: Enter 2 + comma-separated numbers. 20,60 will produce: value <= 20 -> green; + value between 20 and 60 -> yellow; value >= 60 -> red. If set, it will also set + isUsingThresholds to True + :param url: URL to open when clicked on + :param xpos: X in (0, X size of image) + :param ypos: Y in (0, Y size of image) + """ + + angle = attr.ib(default=0, validator=instance_of(int)) + backgroundColor = attr.ib(default="#000", validator=instance_of((RGBA, RGB, str))) + blinkHigh = attr.ib(default=False, validator=instance_of(bool)) + blinkLow = attr.ib(default=False, validator=instance_of(bool)) + color = attr.ib(default="#000", validator=instance_of((RGBA, RGB, str))) + colorHigh = attr.ib(default="#000", validator=instance_of((RGBA, RGB, str))) + colorLow = attr.ib(default="#000", validator=instance_of((RGBA, RGB, str))) + colorMedium = attr.ib(default="#000", validator=instance_of((RGBA, RGB, str))) + colorSymbol = attr.ib(default=False, validator=instance_of(bool)) + customSymbol = attr.ib(default="", validator=instance_of(str)) + decimal = attr.ib(default=0, validator=instance_of(int)) + fontSize = attr.ib(default=12, validator=instance_of(int)) + hasBackground = attr.ib(default=False, validator=instance_of(bool)) + hasOrb = attr.ib(default=False, validator=instance_of(bool)) + hasSymbol = attr.ib(default=False, validator=instance_of(bool)) + isUsingThresholds = attr.ib(default=False, validator=instance_of(bool)) + orbHideText = attr.ib(default=False, validator=instance_of(bool)) + orbLocation = attr.ib( + default="Left", + validator=in_(['Left', 'Right', 'Top', 'Bottom']) + ) + orbSize = attr.ib(default=13, validator=instance_of(int)) + prefix = attr.ib(default="", validator=instance_of(str)) + prefixSize = attr.ib(default=10, validator=instance_of(int)) + selected = attr.ib(default=False, validator=instance_of(bool)) + serie = attr.ib(default="", validator=instance_of(str)) + suffix = attr.ib(default="", validator=instance_of(str)) + suffixSize = attr.ib(default=10, validator=instance_of(int)) + symbol = attr.ib(default="", validator=instance_of(str)) + symbolDefHeight = attr.ib(default=32, validator=instance_of(int)) + symbolDefWidth = attr.ib(default=32, validator=instance_of(int)) + symbolHeight = attr.ib(default=32, validator=instance_of(int)) + symbolHideText = attr.ib(default=False, validator=instance_of(bool)) + symbolWidth = attr.ib(default=32, validator=instance_of(int)) + text = attr.ib(default="N/A", validator=instance_of(str)) + thresholds = attr.ib(default="", validator=instance_of(str)) + url = attr.ib(default="", validator=instance_of(str)) + xpos = attr.ib(default=0, validator=instance_of(int)) + ypos = attr.ib(default=0, validator=instance_of(int)) + + def to_json_data(self): + self.symbol = "custom" if self.customSymbol else self.symbol + self.isUsingThresholds = bool(self.thresholds) + + return { + "angle": self.angle, + "backgroundColor": self.backgroundColor, + "blinkHigh": self.blinkHigh, + "blinkLow": self.blinkLow, + "color": self.color, + "colorHigh": self.colorHigh, + "colorLow": self.colorLow, + "colorMedium": self.colorMedium, + "colorSymbol": self.colorSymbol, + "customSymbol": self.customSymbol, + "decimal": self.decimal, + "fontSize": self.fontSize, + "hasBackground": self.hasBackground, + "hasOrb": self.hasOrb, + "hasSymbol": self.hasSymbol, + "isUsingThresholds": self.isUsingThresholds, + "orbHideText": self.orbHideText, + "orbLocation": self.orbLocation, + "orbSize": self.orbSize, + "prefix": self.prefix, + "prefixSize": self.prefixSize, + "selected": self.selected, + "serie": self.serie, + "suffix": self.suffix, + "suffixSize": self.suffixSize, + "symbol": self.symbol, + "symbolDefHeight": self.symbolDefHeight, + "symbolDefWidth": self.symbolDefWidth, + "symbolHeight": self.symbolHeight, + "symbolHideText": self.symbolHideText, + "symbolWidth": self.symbolWidth, + "text": self.text, + "thresholds": self.thresholds, + "url": self.url, + "xpos": self.xpos, + "ypos": self.ypos, + } + + @attr.s class Grid(object): @@ -330,6 +527,42 @@ def to_json_data(self): } +def is_valid_max_per_row(instance, attribute, value): + if ((value is not None) and not isinstance(value, int)): + raise ValueError("{attr} should either be None or an integer".format( + attr=attribute)) + + +@attr.s +class Repeat(object): + """ + Panel repetition settings. + + :param direction: The direction into which to repeat ('h' or 'v') + :param variable: The name of the variable over whose values to repeat + :param maxPerRow: The maximum number of panels per row in horizontal repetition + """ + + direction = attr.ib(default=None) + variable = attr.ib(default=None) + maxPerRow = attr.ib(default=None, validator=is_valid_max_per_row) + + def to_json_data(self): + return { + 'direction': self.direction, + 'variable': self.variable, + 'maxPerRow': self.maxPerRow, + } + + +def is_valid_target(instance, attribute, value): + """ + Check if a given attribute is a valid Target + """ + if value.refId == "": + raise ValueError(f"{attribute.name} should have non-empty 'refId' attribute") + + @attr.s class Target(object): """ @@ -340,6 +573,7 @@ class Target(object): expr = attr.ib(default="") format = attr.ib(default=TIME_SERIES_TARGET_FORMAT) + hide = attr.ib(default=False, validator=instance_of(bool)) legendFormat = attr.ib(default="") interval = attr.ib(default="", validator=instance_of(str)) intervalFactor = attr.ib(default=2) @@ -354,8 +588,10 @@ class Target(object): def to_json_data(self): return { 'expr': self.expr, + 'query': self.expr, 'target': self.target, 'format': self.format, + 'hide': self.hide, 'interval': self.interval, 'intervalFactor': self.intervalFactor, 'legendFormat': self.legendFormat, @@ -364,7 +600,6 @@ def to_json_data(self): 'step': self.step, 'instant': self.instant, 'datasource': self.datasource, - 'hide': self.hide, } @@ -403,6 +638,41 @@ def to_json_data(self): } +@attr.s +class SqlTarget(Target): + """ + Metric target to support SQL queries + """ + + rawSql = attr.ib(default="") + rawQuery = attr.ib(default=True) + srcFilePath = attr.ib(default="", validator=instance_of(str)) + sqlParams = attr.ib(factory=dict, validator=instance_of(dict)) + + def __attrs_post_init__(self): + """Override rawSql if a path to a source file is provided, + if it is a parameterized query, fill in the parameters. + srcFilePath: this will containt the path to the source file + sqlParams: this will contain the sql parameters to use in the read query + """ + if self.srcFilePath: + with open(self.srcFilePath, "r") as f: + self.rawSql = f.read() + if self.sqlParams is not None: + self.rawSql = self.rawSql.format(**self.sqlParams) + + def to_json_data(self): + """Override the Target to_json_data to add additional fields. + rawSql: this will contain the actual SQL queries + rawQuery: this is set to True by default as in case of False + the rawSql would be unused + """ + super_json = super(SqlTarget, self).to_json_data() + super_json["rawSql"] = self.rawSql + super_json["rawQuery"] = self.rawQuery + return super_json + + @attr.s class Tooltip(object): @@ -421,7 +691,7 @@ def to_json_data(self): def is_valid_xaxis_mode(instance, attribute, value): - XAXIS_MODES = ("time", "series") + XAXIS_MODES = ('time', 'series') if value not in XAXIS_MODES: raise ValueError("{attr} should be one of {choice}".format( attr=attribute, choice=XAXIS_MODES)) @@ -429,14 +699,25 @@ def is_valid_xaxis_mode(instance, attribute, value): @attr.s class XAxis(object): + """ + X Axis + + :param mode: Mode of axis can be time, series or histogram + :param name: X axis name + :param value: list of values eg. ["current"] or ["avg"] + :param show: show X axis + """ - mode = attr.ib(default="time", validator=is_valid_xaxis_mode) + mode = attr.ib(default='time', validator=is_valid_xaxis_mode) name = attr.ib(default=None) values = attr.ib(default=attr.Factory(list)) show = attr.ib(validator=instance_of(bool), default=True) def to_json_data(self): return { + 'mode': self.mode, + 'name': self.name, + 'values': self.values, 'show': self.show, } @@ -446,13 +727,22 @@ class YAxis(object): """A single Y axis. Grafana graphs have two Y axes: one on the left and one on the right. + + :param decimals: Defines how many decimals are displayed for Y value. (default auto) + :param format: The display unit for the Y value + :param label: The Y axis label. (default “") + :param logBase: The scale to use for the Y value, linear, or logarithmic. (default linear) + :param max: The maximum Y value + :param min: The minimum Y value + :param show: Show or hide the axis """ + decimals = attr.ib(default=None) format = attr.ib(default=None) label = attr.ib(default=None) logBase = attr.ib(default=1) max = attr.ib(default=None) - min = attr.ib(default=0) + min = attr.ib(default=None) show = attr.ib(default=True, validator=instance_of(bool)) def to_json_data(self): @@ -529,52 +819,34 @@ def _balance_panels(panels): auto_span = math.ceil( (TOTAL_SPAN - allotted_spans) / (len(no_span_set) or 1)) return [ - attr.assoc(panel, span=auto_span) if panel.span is None else panel + attr.evolve(panel, span=auto_span) if panel.span is None else panel for panel in panels ] @attr.s -class Row(object): - # TODO: jml would like to separate the balancing behaviour from this - # layer. - panels = attr.ib(default=attr.Factory(list), converter=_balance_panels) - collapse = attr.ib( - default=False, validator=instance_of(bool), - ) - editable = attr.ib( - default=True, validator=instance_of(bool), - ) - height = attr.ib( - default=attr.Factory(lambda: DEFAULT_ROW_HEIGHT), - validator=instance_of(Pixels), - ) - showTitle = attr.ib(default=None) - title = attr.ib(default=None) - repeat = attr.ib(default=None) - - def _iter_panels(self): - return iter(self.panels) +class GridPos(object): + """GridPos describes the panel size and position in grid coordinates. + + :param h: height of the panel, grid height units each represents + 30 pixels + :param w: width of the panel 1-24 (the width of the dashboard + is divided into 24 columns) + :param x: x cordinate of the panel, in same unit as w + :param y: y cordinate of the panel, in same unit as h + """ - def _map_panels(self, f): - return attr.assoc(self, panels=list(map(f, self.panels))) + h = attr.ib() + w = attr.ib() + x = attr.ib() + y = attr.ib() def to_json_data(self): - showTitle = False - title = "New row" - if self.title is not None: - showTitle = True - title = self.title - if self.showTitle is not None: - showTitle = self.showTitle return { - 'collapse': self.collapse, - 'editable': self.editable, - 'height': self.height, - 'panels': self.panels, - 'showTitle': showTitle, - 'title': title, - 'repeat': self.repeat, + 'h': self.h, + 'w': self.w, + 'x': self.x, + 'y': self.y } @@ -588,6 +860,20 @@ def to_json_data(self): } +@attr.s +class DataLink(object): + title = attr.ib() + linkUrl = attr.ib(default="", validator=instance_of(str)) + isNewTab = attr.ib(default=False, validator=instance_of(bool)) + + def to_json_data(self): + return { + 'title': self.title, + 'url': self.linkUrl, + 'targetBlank': self.isNewTab, + } + + @attr.s class DataSourceInput(object): name = attr.ib() @@ -598,12 +884,12 @@ class DataSourceInput(object): def to_json_data(self): return { - "description": self.description, - "label": self.label, - "name": self.name, - "pluginId": self.pluginId, - "pluginName": self.pluginName, - "type": "datasource", + 'description': self.description, + 'label': self.label, + 'name': self.name, + 'pluginId': self.pluginId, + 'pluginName': self.pluginName, + 'type': 'datasource', } @@ -616,11 +902,11 @@ class ConstantInput(object): def to_json_data(self): return { - "description": self.description, - "label": self.label, - "name": self.name, - "type": "constant", - "value": self.value, + 'description': self.description, + 'label': self.label, + 'name': self.name, + 'type': 'constant', + 'value': self.value, } @@ -638,24 +924,24 @@ class DashboardLink(object): def to_json_data(self): title = self.dashboard if self.title is None else self.title return { - "dashUri": self.uri, - "dashboard": self.dashboard, - "keepTime": self.keepTime, - "title": title, - "type": self.type, - "url": self.uri, + 'dashUri': self.uri, + 'dashboard': self.dashboard, + 'keepTime': self.keepTime, + 'title': title, + 'type': self.type, + 'url': self.uri, } @attr.s class ExternalLink(object): - '''ExternalLink creates a top-level link attached to a dashboard. + """ExternalLink creates a top-level link attached to a dashboard. - :param url: the URL to link to - :param title: the text of the link - :param keepTime: if true, the URL params for the dashboard's - current time period are appended - ''' + :param url: the URL to link to + :param title: the text of the link + :param keepTime: if true, the URL params for the dashboard's + current time period are appended + """ uri = attr.ib() title = attr.ib() keepTime = attr.ib( @@ -665,10 +951,10 @@ class ExternalLink(object): def to_json_data(self): return { - "keepTime": self.keepTime, - "title": self.title, - "type": 'link', - "url": self.uri, + 'keepTime': self.keepTime, + 'title': self.title, + 'type': 'link', + 'url': self.uri, } @@ -677,24 +963,27 @@ class Template(object): """Template create a new 'variable' for the dashboard, defines the variable name, human name, query to fetch the values and the default value. - :param default: the default value for the variable - :param dataSource: where to fetch the values for the variable from - :param label: the variable's human label - :param name: the variable's name - :param query: the query users to fetch the valid values of the variable - :param refresh: Controls when to update values in the dropdown - :param allValue: specify a custom all value with regex, - globs or lucene syntax. - :param includeAll: Add a special All option whose value includes - all options. - :param regex: Regex to filter or capture specific parts of the names - return by your data source query. - :param multi: If enabled, the variable will support the selection of - multiple options at the same time. - :param type: The template type, can be one of: query (default), - interval, datasource, custom, constant, adhoc. - :param hide: Hide this variable in the dashboard, can be one of: - SHOW (default), HIDE_LABEL, HIDE_VARIABLE + :param default: the default value for the variable + :param dataSource: where to fetch the values for the variable from + :param label: the variable's human label + :param name: the variable's name + :param query: the query users to fetch the valid values of the variable + :param refresh: Controls when to update values in the dropdown + :param allValue: specify a custom all value with regex, + globs or lucene syntax. + :param includeAll: Add a special All option whose value includes + all options. + :param regex: Regex to filter or capture specific parts of the names + return by your data source query. + :param multi: If enabled, the variable will support the selection of + multiple options at the same time. + :param type: The template type, can be one of: query (default), + interval, datasource, custom, constant, adhoc. + :param hide: Hide this variable in the dashboard, can be one of: + SHOW (default), HIDE_LABEL, HIDE_VARIABLE + :param auto: Interval will be dynamically calculated by dividing time range by the count specified in auto_count. + :param autoCount: Number of intervals for dividing the time range. + :param autoMin: Smallest interval for auto interval generator. """ name = attr.ib() @@ -724,6 +1013,16 @@ class Template(object): validator=instance_of(int)) type = attr.ib(default='query') hide = attr.ib(default=SHOW) + sort = attr.ib(default=SORT_ALPHA_ASC) + auto = attr.ib( + default=False, + validator=instance_of(bool), + ) + autoCount = attr.ib( + default=DEFAULT_AUTO_COUNT, + validator=instance_of(int) + ) + autoMin = attr.ib(default=DEFAULT_MIN_AUTO_INTERVAL) def __attrs_post_init__(self): if self.type == 'custom': @@ -731,9 +1030,9 @@ def __attrs_post_init__(self): for value in self.query.split(','): is_default = value == self.default option = { - "selected": is_default, - "text": value, - "value": value, + 'selected': is_default, + 'text': value, + 'value': value, } if is_default: self._current = option @@ -745,6 +1044,7 @@ def __attrs_post_init__(self): break else: self._current = { + 'selected': False if self.default is None or not self.default else True, 'text': self.default, 'value': self.default, 'tags': [], @@ -764,11 +1064,14 @@ def to_json_data(self): 'query': self.query, 'refresh': self.refresh, 'regex': self.regex, - 'sort': 1, + 'sort': self.sort, 'type': self.type, 'useTags': self.useTags, 'tagsQuery': self.tagsQuery, 'tagValuesQuery': self.tagValuesQuery, + 'auto': self.auto, + 'auto_min': self.autoMin, + 'auto_count': self.autoCount } @@ -799,39 +1102,57 @@ def to_json_data(self): @attr.s class TimePicker(object): + """ + Time Picker + + :param refreshIntervals: dashboard auto-refresh interval options + :param timeOptions: dashboard time range options + :param nowDelay: exclude recent data that may be incomplete, as a + number + unit (s: second, m: minute, h: hour, etc) + :param hidden: hide the time picker from dashboard + """ refreshIntervals = attr.ib() timeOptions = attr.ib() + nowDelay = attr.ib( + default=None, + ) + hidden = attr.ib( + default=False, + validator=instance_of(bool), + ) def to_json_data(self): return { 'refresh_intervals': self.refreshIntervals, 'time_options': self.timeOptions, + 'nowDelay': self.nowDelay, + 'hidden': self.hidden } DEFAULT_TIME_PICKER = TimePicker( refreshIntervals=[ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" + '5s', + '10s', + '30s', + '1m', + '5m', + '15m', + '30m', + '1h', + '2h', + '1d' ], timeOptions=[ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" + '5m', + '15m', + '1h', + '6h', + '12h', + '24h', + '2d', + '7d', + '30d' ] ) @@ -843,8 +1164,8 @@ class Evaluator(object): def to_json_data(self): return { - "type": self.type, - "params": self.params, + 'type': self.type, + 'params': self.params, } @@ -892,69 +1213,519 @@ class AlertCondition(object): """ A condition on an alert. - :param Target target: Metric the alert condition is based on. + :param Target target: Metric the alert condition is based on. Not required at instantiation for Grafana 8.x alerts. :param Evaluator evaluator: How we decide whether we should alert on the metric. e.g. ``GreaterThan(5)`` means the metric must be greater than 5 to trigger the condition. See ``GreaterThan``, ``LowerThan``, ``WithinRange``, ``OutsideRange``, ``NoValue``. :param TimeRange timeRange: How long the condition must be true for before - we alert. + we alert. For Grafana 8.x alerts, this should be specified in the AlertRule instead. :param operator: One of ``OP_AND`` or ``OP_OR``. How this condition combines with other conditions. :param reducerType: RTYPE_* + Supported reducer types: + RTYPE_AVG = 'avg' + RTYPE_MIN = 'min' + RTYPE_MAX = 'max' + RTYPE_SUM = 'sum' + RTYPE_COUNT = 'count' + RTYPE_LAST = 'last' + RTYPE_MEDIAN = 'median' + RTYPE_DIFF = 'diff' + RTYPE_PERCENT_DIFF = 'percent_diff' + RTYPE_COUNT_NON_NULL = 'count_non_null' + :param useNewAlerts: Whether or not the alert condition is used as part of the Grafana 8.x alerts. + Defaults to False for compatibility with old Grafana alerts, but automatically overridden to true + when used inside ``AlertExpression`` or ``AlertRulev8`` :param type: CTYPE_* """ - target = attr.ib(validator=instance_of(Target)) - evaluator = attr.ib(validator=instance_of(Evaluator)) - timeRange = attr.ib(validator=instance_of(TimeRange)) - operator = attr.ib() - reducerType = attr.ib() - type = attr.ib(default=CTYPE_QUERY) + target = attr.ib(default=None, validator=attr.validators.optional(is_valid_target)) + evaluator = attr.ib(default=None, validator=instance_of(Evaluator)) + timeRange = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(TimeRange))) + operator = attr.ib(default=OP_AND) + reducerType = attr.ib(default=RTYPE_LAST) + useNewAlerts = attr.ib(default=False) + + type = attr.ib(default=CTYPE_QUERY, kw_only=True) + + def __get_query_params(self): + # Grafana 8.x alerts do not put the time range in the query params. + if self.useNewAlerts: + return [self.target.refId] + + return [self.target.refId, self.timeRange.from_time, self.timeRange.to_time] def to_json_data(self): - queryParams = [ - self.target.refId, self.timeRange.from_time, self.timeRange.to_time - ] - return { - "evaluator": self.evaluator, - "operator": { - "type": self.operator, + condition = { + 'evaluator': self.evaluator.to_json_data(), + 'operator': { + 'type': self.operator, }, - "query": { - "model": self.target, - "params": queryParams, + 'query': { + 'model': self.target.to_json_data(), + 'params': self.__get_query_params(), }, - "reducer": { - "params": [], - "type": self.reducerType, + 'reducer': { + 'params': [], + 'type': self.reducerType, }, - "type": self.type, + 'type': self.type, } + # Grafana 8.x alerts do not put the target inside the alert condition. + if self.useNewAlerts: + del condition['query']['model'] + + return condition + @attr.s -class Alert(object): +class AlertExpression(object): + """ + A alert expression to be evaluated in Grafana v9.x+ + + :param refId: Expression reference ID (A,B,C,D,...) + :param expression: Input reference ID (A,B,C,D,...) for expression to evaluate, or in the case of the Math type the expression to evaluate + :param conditions: list of AlertConditions + :param expressionType: Expression type EXP_TYPE_* + Supported expression types: + EXP_TYPE_CLASSIC + EXP_TYPE_REDUCE + EXP_TYPE_RESAMPLE + EXP_TYPE_MATH + :param hide: Hide alert boolean + :param intervalMs: Expression evaluation interval + :param maxDataPoints: Maximum number fo data points to be evaluated + + :param reduceFunction: Reducer function (Only used if expressionType=EXP_TYPE_REDUCE) + Supported reducer functions: + EXP_REDUCER_FUNC_MIN + EXP_REDUCER_FUNC_MAX + EXP_REDUCER_FUNC_MEAN + EXP_REDUCER_FUNC_SUM + EXP_REDUCER_FUNC_COUNT + EXP_REDUCER_FUNC_LAST + :param reduceMode: Reducer mode (Only used if expressionType=EXP_TYPE_REDUCE) + Supported reducer modes: + EXP_REDUCER_MODE_STRICT + EXP_REDUCER_FUNC_DROP_NN + EXP_REDUCER_FUNC_REPLACE_NN + :param reduceReplaceWith: When using mode EXP_REDUCER_FUNC_REPLACE_NN number that will replace non numeric values + + :param resampleWindow: Intervale to resample to eg. 10s, 1m, 30m, 1h + :param resampleDownsampler: 'mean', 'min', 'max', 'sum' + :param resampleUpsampler: + 'fillna' - Fill with NaN's + 'pad' - fill with the last known value + 'backfilling' - fill with the next know value + """ + + refId = attr.ib() + expression = attr.ib(validator=instance_of(str)) + conditions = attr.ib(default=attr.Factory(list), validator=attr.validators.deep_iterable( + member_validator=instance_of(AlertCondition), + iterable_validator=instance_of(list) + )) + expressionType = attr.ib( + default=EXP_TYPE_CLASSIC, + validator=in_([ + EXP_TYPE_CLASSIC, + EXP_TYPE_REDUCE, + EXP_TYPE_RESAMPLE, + EXP_TYPE_MATH + ]) + ) + hide = attr.ib(default=False, validator=instance_of(bool)) + intervalMs = attr.ib(default=1000, validator=instance_of(int)) + maxDataPoints = attr.ib(default=43200, validator=instance_of(int)) + + reduceFunction = attr.ib( + default=EXP_REDUCER_FUNC_MEAN, + validator=in_([ + EXP_REDUCER_FUNC_MIN, + EXP_REDUCER_FUNC_MAX, + EXP_REDUCER_FUNC_MEAN, + EXP_REDUCER_FUNC_SUM, + EXP_REDUCER_FUNC_COUNT, + EXP_REDUCER_FUNC_LAST + ]) + ) + reduceMode = attr.ib( + default=EXP_REDUCER_MODE_STRICT, + validator=in_([ + EXP_REDUCER_MODE_STRICT, + EXP_REDUCER_FUNC_DROP_NN, + EXP_REDUCER_FUNC_REPLACE_NN + ]) + ) + reduceReplaceWith = attr.ib(default=0) + + resampleWindow = attr.ib(default='10s', validator=instance_of(str)) + resampleDownsampler = attr.ib(default='mean') + resampleUpsampler = attr.ib(default='fillna') + def to_json_data(self): + + conditions = [] + + for condition in self.conditions: + # discard unused features of condition as of grafana 8.x + condition.useNewAlerts = True + if condition.target is None: + condition.target = Target(refId=self.expression) + conditions += [condition.to_json_data()] + + expression = { + 'refId': self.refId, + 'queryType': '', + 'relativeTimeRange': { + 'from': 0, + 'to': 0 + }, + 'datasourceUid': '-100', + 'model': { + 'conditions': conditions, + 'datasource': { + 'type': '__expr__', + 'uid': '-100' + }, + 'expression': self.expression, + 'hide': self.hide, + 'intervalMs': self.intervalMs, + 'maxDataPoints': self.maxDataPoints, + 'refId': self.refId, + 'type': self.expressionType, + 'reducer': self.reduceFunction, + 'settings': { + 'mode': self.reduceMode, + 'replaceWithValue': self.reduceReplaceWith + }, + 'downsampler': self.resampleDownsampler, + 'upsampler': self.resampleUpsampler, + 'window': self.resampleWindow + } + } + + return expression + + +@attr.s +class Alert(object): + """ + :param alertRuleTags: Key Value pairs to be sent with Alert notifications. + """ name = attr.ib() message = attr.ib() alertConditions = attr.ib() executionErrorState = attr.ib(default=STATE_ALERTING) - frequency = attr.ib(default="60s") + frequency = attr.ib(default='60s') handler = attr.ib(default=1) noDataState = attr.ib(default=STATE_NO_DATA) notifications = attr.ib(default=attr.Factory(list)) + gracePeriod = attr.ib(default='5m') + alertRuleTags = attr.ib( + default=attr.Factory(dict), + validator=attr.validators.deep_mapping( + key_validator=attr.validators.instance_of(str), + value_validator=attr.validators.instance_of(str), + mapping_validator=attr.validators.instance_of(dict), + ) + ) + + def to_json_data(self): + return { + 'conditions': self.alertConditions, + 'executionErrorState': self.executionErrorState, + 'frequency': self.frequency, + 'handler': self.handler, + 'message': self.message, + 'name': self.name, + 'noDataState': self.noDataState, + 'notifications': self.notifications, + 'for': self.gracePeriod, + 'alertRuleTags': self.alertRuleTags, + } + + +@attr.s +class AlertGroup(object): + """ + Create an alert group of Grafana 8.x alerts + :param name: Alert group name + :param rules: List of AlertRule + :param folder: Folder to hold alert (Grafana 9.x) + :param evaluateInterval: Interval at which the group of alerts is to be evaluated + """ + name = attr.ib() + rules = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + folder = attr.ib(default='alert', validator=instance_of(str)) + evaluateInterval = attr.ib(default='1m', validator=instance_of(str)) + + def group_rules(self, rules): + grouped_rules = [] + for each in rules: + each.rule_group = self.name + grouped_rules.append(each.to_json_data()) + return grouped_rules + + def to_json_data(self): + return { + 'name': self.name, + 'interval': self.evaluateInterval, + 'rules': self.group_rules(self.rules), + 'folder': self.folder + } + + +def is_valid_triggers(instance, attribute, value): + """Validator for AlertRule triggers""" + for trigger in value: + if not isinstance(trigger, tuple): + raise ValueError(f"{attribute.name} must be a list of [(Target, AlertCondition)] tuples") + + if list(map(type, trigger)) != [Target, AlertCondition]: + raise ValueError(f"{attribute.name} must be a list of [(Target, AlertCondition)] tuples") + + is_valid_target(instance, "alert trigger target", trigger[0]) + + +def is_valid_triggersv9(instance, attribute, value): + """Validator for AlertRule triggers for Grafana v9""" + for trigger in value: + if not (isinstance(trigger, Target) or isinstance(trigger, AlertExpression)): + raise ValueError(f"{attribute.name} must either be a Target or AlertExpression") + + if isinstance(trigger, Target): + is_valid_target(instance, "alert trigger target", trigger) + + +@attr.s +class AlertRulev8(object): + """ + Create a Grafana 8.x Alert Rule + + :param title: The alert's title, must be unique per folder + :param triggers: A list of Target and AlertCondition tuples, [(Target, AlertCondition)]. + The Target specifies the query, and the AlertCondition specifies how this is used to alert. + Several targets and conditions can be defined, alerts can fire based on all conditions + being met by specifying OP_AND on all conditions, or on any single condition being met + by specifying OP_OR on all conditions. + :param annotations: Summary and annotations + :param labels: Custom Labels for the metric, used to handle notifications + + :param evaluateInterval: The frequency of evaluation. Must be a multiple of 10 seconds. For example, 30s, 1m + :param evaluateFor: The duration for which the condition must be true before an alert fires + :param noDataAlertState: Alert state if no data or all values are null + Must be one of the following: + [ALERTRULE_STATE_DATA_OK, ALERTRULE_STATE_DATA_ALERTING, ALERTRULE_STATE_DATA_NODATA ] + :param errorAlertState: Alert state if execution error or timeout + Must be one of the following: + [ALERTRULE_STATE_DATA_OK, ALERTRULE_STATE_DATA_ALERTING, ALERTRULE_STATE_DATA_ERROR ] + + :param timeRangeFrom: Time range interpolation data start from + :param timeRangeTo: Time range interpolation data finish at + :param uid: Alert UID should be unique + :param dashboard_uid: Dashboard UID that should be use for linking on alert message + :param panel_id: Panel ID that should should be use for linking on alert message + """ + + title = attr.ib() + triggers = attr.ib(validator=is_valid_triggers) + annotations = attr.ib(factory=dict, validator=instance_of(dict)) + labels = attr.ib(factory=dict, validator=instance_of(dict)) + + evaluateInterval = attr.ib(default=DEFAULT_ALERT_EVALUATE_INTERVAL, validator=instance_of(str)) + evaluateFor = attr.ib(default=DEFAULT_ALERT_EVALUATE_FOR, validator=instance_of(str)) + noDataAlertState = attr.ib( + default=ALERTRULE_STATE_DATA_ALERTING, + validator=in_([ + ALERTRULE_STATE_DATA_OK, + ALERTRULE_STATE_DATA_ALERTING, + ALERTRULE_STATE_DATA_NODATA + ]) + ) + errorAlertState = attr.ib( + default=ALERTRULE_STATE_DATA_ALERTING, + validator=in_([ + ALERTRULE_STATE_DATA_OK, + ALERTRULE_STATE_DATA_ALERTING, + ALERTRULE_STATE_DATA_ERROR + ]) + ) + timeRangeFrom = attr.ib(default=300, validator=instance_of(int)) + timeRangeTo = attr.ib(default=0, validator=instance_of(int)) + uid = attr.ib(default=None, validator=attr.validators.optional(instance_of(str))) + dashboard_uid = attr.ib(default="", validator=instance_of(str)) + panel_id = attr.ib(default=0, validator=instance_of(int)) + + rule_group = attr.ib(default="") + + def to_json_data(self): + data = [] + conditions = [] + + for target, condition in self.triggers: + data += [{ + "refId": target.refId, + "relativeTimeRange": { + "from": self.timeRangeFrom, + "to": self.timeRangeTo + }, + "datasourceUid": target.datasource, + "model": target.to_json_data(), + }] + + # discard unused features of condition as of grafana 8.x + condition.useNewAlerts = True + + condition.target = target + conditions += [condition.to_json_data()] + + data += [{ + "refId": "CONDITION", + "datasourceUid": "-100", + "model": { + "conditions": conditions, + "refId": "CONDITION", + "type": "classic_conditions" + } + }] + + return { + "for": self.evaluateFor, + "labels": self.labels, + "annotations": self.annotations, + "grafana_alert": { + "title": self.title, + "condition": "CONDITION", + "data": data, + "intervalSeconds": self.evaluateInterval, + "exec_err_state": self.errorAlertState, + "no_data_state": self.noDataAlertState, + "uid": self.uid, + "rule_group": self.rule_group, + } + } + + +@attr.s +class AlertRulev9(object): + """ + Create a Grafana 9.x+ Alert Rule + + :param title: The alert's title, must be unique per folder + :param triggers: A list of Targets and AlertConditions. + The Target specifies the query, and the AlertCondition specifies how this is used to alert. + :param annotations: Summary and annotations + Dictionary with one of the following key or custom key + ['runbook_url', 'summary', 'description', '__alertId__', '__dashboardUid__', '__panelId__'] + :param labels: Custom Labels for the metric, used to handle notifications + :param condition: Set one of the queries or expressions as the alert condition by refID (Grafana 9.x) + + :param evaluateFor: The duration for which the condition must be true before an alert fires + The Interval is set by the alert group + :param noDataAlertState: Alert state if no data or all values are null + Must be one of the following: + [ALERTRULE_STATE_DATA_OK, ALERTRULE_STATE_DATA_ALERTING, ALERTRULE_STATE_DATA_NODATA ] + :param errorAlertState: Alert state if execution error or timeout + Must be one of the following: + [ALERTRULE_STATE_DATA_OK, ALERTRULE_STATE_DATA_ALERTING, ALERTRULE_STATE_DATA_ERROR ] + + :param timeRangeFrom: Time range interpolation data start from + :param timeRangeTo: Time range interpolation data finish at + :param uid: Alert UID should be unique + :param dashboard_uid: Dashboard UID that should be use for linking on alert message + :param panel_id: Panel ID that should should be use for linking on alert message + """ + + title = attr.ib() + triggers = attr.ib(factory=list, validator=is_valid_triggersv9) + annotations = attr.ib(factory=dict, validator=instance_of(dict)) + labels = attr.ib(factory=dict, validator=instance_of(dict)) + + evaluateFor = attr.ib(default=DEFAULT_ALERT_EVALUATE_FOR, validator=instance_of(str)) + noDataAlertState = attr.ib( + default=ALERTRULE_STATE_DATA_ALERTING, + validator=in_([ + ALERTRULE_STATE_DATA_OK, + ALERTRULE_STATE_DATA_ALERTING, + ALERTRULE_STATE_DATA_NODATA + ]) + ) + errorAlertState = attr.ib( + default=ALERTRULE_STATE_DATA_ALERTING, + validator=in_([ + ALERTRULE_STATE_DATA_OK, + ALERTRULE_STATE_DATA_ALERTING, + ALERTRULE_STATE_DATA_ERROR + ]) + ) + condition = attr.ib(default='B') + timeRangeFrom = attr.ib(default=300, validator=instance_of(int)) + timeRangeTo = attr.ib(default=0, validator=instance_of(int)) + uid = attr.ib(default=None, validator=attr.validators.optional(instance_of(str))) + dashboard_uid = attr.ib(default="", validator=instance_of(str)) + panel_id = attr.ib(default=0, validator=instance_of(int)) + + def to_json_data(self): + data = [] + + for trigger in self.triggers: + if isinstance(trigger, Target): + target = trigger + data += [{ + "refId": target.refId, + "relativeTimeRange": { + "from": self.timeRangeFrom, + "to": self.timeRangeTo + }, + "datasourceUid": target.datasource, + "model": target.to_json_data(), + }] + else: + data += [trigger.to_json_data()] + + return { + "uid": self.uid, + "for": self.evaluateFor, + "labels": self.labels, + "annotations": self.annotations, + "grafana_alert": { + "title": self.title, + "condition": self.condition, + "data": data, + "no_data_state": self.noDataAlertState, + "exec_err_state": self.errorAlertState, + }, + } + + +@attr.s +class AlertFileBasedProvisioning(object): + """ + Used to generate JSON data valid for file based alert provisioning + + param alertGroup: List of AlertGroups + """ + + groups = attr.ib() + + def to_json_data(self): + return { + 'apiVersion': 1, + 'groups': self.groups, + } + + +@attr.s +class Notification(object): + + uid = attr.ib() def to_json_data(self): return { - "conditions": self.alertConditions, - "executionErrorState": self.executionErrorState, - "frequency": self.frequency, - "handler": self.handler, - "message": self.message, - "name": self.name, - "noDataState": self.noDataState, - "notifications": self.notifications, + 'uid': self.uid, } @@ -962,16 +1733,24 @@ def to_json_data(self): class Dashboard(object): title = attr.ib() - rows = attr.ib() annotations = attr.ib( default=attr.Factory(Annotations), validator=instance_of(Annotations), ) + description = attr.ib(default="", validator=instance_of(str)) editable = attr.ib( default=True, validator=instance_of(bool), ) gnetId = attr.ib(default=None) + + # Documented in Grafana 6.1.6, and obsoletes sharedCrosshair. Requires a + # newer schema than the current default of 12. + graphTooltip = attr.ib( + default=GRAPH_TOOLTIP_MODE_NOT_SHARED, + validator=instance_of(int), + ) + hideControls = attr.ib( default=False, validator=instance_of(bool), @@ -979,7 +1758,9 @@ class Dashboard(object): id = attr.ib(default=None) inputs = attr.ib(default=attr.Factory(list)) links = attr.ib(default=attr.Factory(list)) + panels = attr.ib(default=attr.Factory(list), validator=instance_of(list)) refresh = attr.ib(default=DEFAULT_REFRESH) + rows = attr.ib(default=attr.Factory(list), validator=instance_of(list)) schemaVersion = attr.ib(default=SCHEMA_VERSION) sharedCrosshair = attr.ib( default=True, @@ -1008,9 +1789,21 @@ def _iter_panels(self): for panel in row._iter_panels(): yield panel - def _map_panels(self, f): - return attr.assoc(self, rows=[r._map_panels(f) for r in self.rows]) - + for panel in self.panels: + if hasattr(panel, 'panels'): + yield panel + for row_panel in panel._iter_panels(): + yield row_panel + else: + yield panel + + def _map_panels(self, f): + return attr.evolve( + self, + rows=[r._map_panels(f) for r in self.rows], + panels=[p._map_panels(f) for p in self.panels] + ) + def auto_panel_ids(self): """Give unique IDs all the panels without IDs. @@ -1023,18 +1816,26 @@ def auto_panel_ids(self): auto_ids = (i for i in itertools.count(1) if i not in ids) def set_id(panel): - return panel if panel.id else attr.assoc(panel, id=next(auto_ids)) + return panel if panel.id else attr.evolve(panel, id=next(auto_ids)) return self._map_panels(set_id) def to_json_data(self): + if self.panels and self.rows: + print( + "Warning: You are using both panels and rows in this dashboard, please use one or the other. " + "Panels should be used in preference over rows, see example dashboard for help." + ) return { '__inputs': self.inputs, 'annotations': self.annotations, + 'description': self.description, 'editable': self.editable, 'gnetId': self.gnetId, + 'graphTooltip': self.graphTooltip, 'hideControls': self.hideControls, 'id': self.id, 'links': self.links, + 'panels': self.panels if not self.rows else [], 'refresh': self.refresh, 'rows': self.rows, 'schemaVersion': self.schemaVersion, @@ -1051,28 +1852,272 @@ def to_json_data(self): } +def _deep_update(base_dict, extra_dict): + if extra_dict is None: + return base_dict + + for k, v in extra_dict.items(): + if k in base_dict and hasattr(base_dict[k], "to_json_data"): + base_dict[k] = base_dict[k].to_json_data() + + if k in base_dict and isinstance(base_dict[k], dict): + _deep_update(base_dict[k], v) + else: + base_dict[k] = v + + @attr.s -class Graph(object): +class Panel(object): """ - Generates Graph panel json structure. + Generic panel for shared defaults - :param dataSource: DataSource's name - :param minSpan: Minimum width for each panel + :param cacheTimeout: metric query result cache ttl + :param dataSource: Grafana datasource name + :param description: optional panel description + :param editable: defines if panel is editable via web interfaces + :param height: defines panel height + :param hideTimeOverride: hides time overrides + :param id: panel id + :param interval: defines time interval between metric queries + :param links: additional web links + :param maxDataPoints: maximum metric query results, + that will be used for rendering + :param minSpan: minimum span number :param repeat: Template's name to repeat Graph on + :param span: defines the number of spans that will be used for panel + :param targets: list of metric requests for chosen datasource + :param thresholds: single stat thresholds + :param thresholdType: type of threshold, absolute or percentage + :param timeFrom: time range that Override relative time + :param title: of the panel + :param transparent: defines if panel should be transparent + :param transformations: defines transformations applied to the table + :param extraJson: raw JSON additions or overrides added to the JSON output + of this panel, can be used for using unsupported features """ - title = attr.ib() - targets = attr.ib() - aliasColors = attr.ib(default=attr.Factory(dict)) - bars = attr.ib(default=False, validator=instance_of(bool)) dataSource = attr.ib(default=None) + targets = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + title = attr.ib(default="") + cacheTimeout = attr.ib(default=None) decimals = attr.ib(default=1) description = attr.ib(default=None) editable = attr.ib(default=True, validator=instance_of(bool)) error = attr.ib(default=False, validator=instance_of(bool)) + height = attr.ib(default=None) + gridPos = attr.ib(default=None) + hideTimeOverride = attr.ib(default=False, validator=instance_of(bool)) + id = attr.ib(default=None) + interval = attr.ib(default=None) + links = attr.ib(default=attr.Factory(list)) + maxDataPoints = attr.ib(default=100) + minSpan = attr.ib(default=None) + repeat = attr.ib(default=attr.Factory(Repeat), validator=instance_of(Repeat)) + span = attr.ib(default=None) + thresholds = attr.ib(default=attr.Factory(list)) + thresholdType = attr.ib(default='absolute') + timeFrom = attr.ib(default=None) + timeShift = attr.ib(default=None) + transparent = attr.ib(default=False, validator=instance_of(bool)) + transformations = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + extraJson = attr.ib(default=None, validator=attr.validators.optional(instance_of(dict))) + xAxis = attr.ib(default=attr.Factory(XAxis), validator=instance_of(XAxis)) + # XXX: This isn't a *good* default, rather it's the default Grafana uses. + yAxes = attr.ib( + default=attr.Factory(YAxes), + converter=to_y_axes, + validator=instance_of(YAxes), + ) + alert = attr.ib(default=None) + + def _map_panels(self, f): + return f(self) + + def panel_json(self, overrides): + res = { + 'cacheTimeout': self.cacheTimeout, + 'datasource': self.dataSource, + 'decimals': self.decimals, + 'description': self.description, + 'editable': self.editable, + 'error': self.error, + 'fieldConfig': { + 'defaults': { + 'thresholds': { + 'mode': self.thresholdType, + 'steps': self.thresholds + }, + }, + }, + 'height': self.height, + 'gridPos': self.gridPos, + 'hideTimeOverride': self.hideTimeOverride, + 'id': self.id, + 'interval': self.interval, + 'links': self.links, + 'maxDataPoints': self.maxDataPoints, + 'minSpan': self.minSpan, + 'repeat': self.repeat.variable, + 'repeatDirection': self.repeat.direction, + 'maxPerRow': self.repeat.maxPerRow, + 'span': self.span, + 'targets': self.targets, + 'timeFrom': self.timeFrom, + 'timeShift': self.timeShift, + 'title': self.title, + 'transparent': self.transparent, + 'transformations': self.transformations, + } + _deep_update(res, overrides) + _deep_update(res, self.extraJson) + return res + + +@attr.s +class ePict(Panel): + """ + Generates ePict panel json structure. + https://grafana.com/grafana/plugins/larona-epict-panel/ + :param autoScale: Whether to auto scale image to panel size. + :param bgURL: Where to load the image from. + :param boxes: The info boxes to be placed on the image. + """ + + bgURL = attr.ib(default='', validator=instance_of(str)) + + autoScale = attr.ib(default=True, validator=instance_of(bool)) + boxes = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=instance_of(ePictBox), + iterable_validator=instance_of(list), + ), + ) + + def to_json_data(self): + graph_object = { + 'type': EPICT_TYPE, + + 'options': { + 'autoScale': self.autoScale, + 'bgURL': self.bgURL, + 'boxes': self.boxes + } + } + return self.panel_json(graph_object) + + +@attr.s +class RowPanel(Panel): + """ + Generates Row panel json structure. + + :param title: title of the panel + :param collapsed: set True if row should be collapsed + :param panels: list of panels in the row, only to be used when collapsed=True + """ + panels = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + collapsed = attr.ib(default=False, validator=instance_of(bool)) + + def _iter_panels(self): + return iter(self.panels) + + def _map_panels(self, f): + self = f(self) + return attr.evolve(self, panels=list(map(f, self.panels))) + + def to_json_data(self): + return self.panel_json( + { + 'collapsed': self.collapsed, + 'panels': self.panels, + 'type': ROW_TYPE + } + ) + + +@attr.s +class Row(object): + """ + Legacy support for old row, when not used with gridpos + """ + # TODO: jml would like to separate the balancing behaviour from this + # layer. + try: + panels = attr.ib(default=attr.Factory(list), converter=_balance_panels) + except TypeError: + panels = attr.ib(default=attr.Factory(list), convert=_balance_panels) + collapse = attr.ib( + default=False, validator=instance_of(bool), + ) + editable = attr.ib( + default=True, validator=instance_of(bool), + ) + height = attr.ib( + default=attr.Factory(lambda: DEFAULT_ROW_HEIGHT), + validator=instance_of(Pixels), + ) + showTitle = attr.ib(default=None) + title = attr.ib(default="") + repeat = attr.ib(default=None) + + def _iter_panels(self): + return iter(self.panels) + + def _map_panels(self, f): + return attr.evolve(self, panels=list(map(f, self.panels))) + + def to_json_data(self): + showTitle = False + title = "New row" + if self.title is not None: + showTitle = True + title = self.title + if self.showTitle is not None: + showTitle = self.showTitle + return { + 'collapse': self.collapse, + 'editable': self.editable, + 'height': self.height, + 'panels': self.panels, + 'showTitle': showTitle, + 'title': title, + 'repeat': self.repeat, + } + + +@attr.s +class Graph(Panel): + """ + Generates Graph panel json structure. + + :param alert: List of AlertConditions + :param align: Select to align left and right Y-axes by value + :param alignLevel: Available when Align is selected. Value to use for alignment of left and right Y-axes + :param bars: Display values as a bar chart + :param dataLinks: List of data links hooked to datapoints on the graph + :param fill: Area fill, amount of color fill for a series. (default 1, 0 is none) + :param fillGradient: Degree of gradient on the area fill. (0 is no gradient, 10 is a steep gradient. Default is 0.) + :param lines: Display values as a line graph + :param points: Display points for values (default False) + :param pointRadius: Controls how large the points are + :param stack: Each series is stacked on top of another + :param percentage: Available when Stack is selected. Each series is drawn as a percentage of the total of all series + :param thresholds: List of GraphThresholds - Only valid when alert not defined + + """ + + alert = attr.ib(default=None) + alertThreshold = attr.ib(default=True, validator=instance_of(bool)) + aliasColors = attr.ib(default=attr.Factory(dict)) + align = attr.ib(default=False, validator=instance_of(bool)) + alignLevel = attr.ib(default=0, validator=instance_of(int)) + bars = attr.ib(default=False, validator=instance_of(bool)) + dataLinks = attr.ib(default=attr.Factory(list)) + error = attr.ib(default=False, validator=instance_of(bool)) fill = attr.ib(default=1, validator=instance_of(int)) + fillGradient = attr.ib(default=0, validator=instance_of(int)) grid = attr.ib(default=attr.Factory(Grid), validator=instance_of(Grid)) - id = attr.ib(default=None) isNew = attr.ib(default=True, validator=instance_of(bool)) legend = attr.ib( default=attr.Factory(Legend), @@ -1080,33 +2125,32 @@ class Graph(object): ) lines = attr.ib(default=True, validator=instance_of(bool)) lineWidth = attr.ib(default=DEFAULT_LINE_WIDTH) - links = attr.ib(default=attr.Factory(list)) - minSpan = attr.ib(default=None) nullPointMode = attr.ib(default=NULL_CONNECTED) percentage = attr.ib(default=False, validator=instance_of(bool)) pointRadius = attr.ib(default=DEFAULT_POINT_RADIUS) points = attr.ib(default=False, validator=instance_of(bool)) renderer = attr.ib(default=DEFAULT_RENDERER) - repeat = attr.ib(default=None) seriesOverrides = attr.ib(default=attr.Factory(list)) - span = attr.ib(default=None) stack = attr.ib(default=False, validator=instance_of(bool)) steppedLine = attr.ib(default=False, validator=instance_of(bool)) - timeFrom = attr.ib(default=None) - timeShift = attr.ib(default=None) tooltip = attr.ib( default=attr.Factory(Tooltip), validator=instance_of(Tooltip), ) - transparent = attr.ib(default=False, validator=instance_of(bool)) + thresholds = attr.ib(default=attr.Factory(list)) xAxis = attr.ib(default=attr.Factory(XAxis), validator=instance_of(XAxis)) - # XXX: This isn't a *good* default, rather it's the default Grafana uses. - yAxes = attr.ib( - default=attr.Factory(YAxes), - converter=to_y_axes, - validator=instance_of(YAxes), - ) - alert = attr.ib(default=None) + try: + yAxes = attr.ib( + default=attr.Factory(YAxes), + converter=to_y_axes, + validator=instance_of(YAxes), + ) + except TypeError: + yAxes = attr.ib( + default=attr.Factory(YAxes), + convert=to_y_axes, + validator=instance_of(YAxes), + ) def to_json_data(self): graphObject = { @@ -1119,43 +2163,46 @@ def to_json_data(self): 'error': self.error, 'fill': self.fill, 'grid': self.grid, - 'id': self.id, 'isNew': self.isNew, 'legend': self.legend, 'lines': self.lines, 'linewidth': self.lineWidth, - 'links': self.links, 'minSpan': self.minSpan, 'nullPointMode': self.nullPointMode, + 'options': { + 'dataLinks': self.dataLinks, + 'alertThreshold': self.alertThreshold, + }, 'percentage': self.percentage, 'pointradius': self.pointRadius, 'points': self.points, 'renderer': self.renderer, - 'repeat': self.repeat, 'seriesOverrides': self.seriesOverrides, - 'span': self.span, 'stack': self.stack, 'steppedLine': self.steppedLine, - 'targets': self.targets, - 'timeFrom': self.timeFrom, - 'timeShift': self.timeShift, - 'title': self.title, 'tooltip': self.tooltip, - 'transparent': self.transparent, + 'thresholds': self.thresholds, 'type': GRAPH_TYPE, 'xaxis': self.xAxis, 'yaxes': self.yAxes, + 'yaxis': { + 'align': self.align, + 'alignLevel': self.alignLevel + } } if self.alert: graphObject['alert'] = self.alert - return graphObject + graphObject['thresholds'] = [] + if self.thresholds and self.alert: + print("Warning: Graph threshold ignored as Alerts defined") + return self.panel_json(graphObject) def _iter_targets(self): for target in self.targets: yield target def _map_targets(self, f): - return attr.assoc(self, targets=[f(t) for t in self.targets]) + return attr.evolve(self, targets=[f(t) for t in self.targets]) def auto_ref_ids(self): """Give unique IDs all the panels without IDs. @@ -1165,46 +2212,191 @@ def auto_ref_ids(self): an ``refId`` property set will keep that property, all others will have auto-generated IDs provided for them. """ - ref_ids = set( - [target.refId for target in self._iter_targets() if target.refId]) + ref_ids = set([t.refId for t in self._iter_targets() if t.refId]) + double_candidate_refs = \ + [p[0] + p[1] for p + in itertools.product(string.ascii_uppercase, repeat=2)] candidate_ref_ids = itertools.chain( string.ascii_uppercase, - itertools.product(string.ascii_uppercase, repeat=2) + double_candidate_refs, ) + auto_ref_ids = (i for i in candidate_ref_ids if i not in ref_ids) - def set_refid(target): - return target if target.refId else attr.assoc(target, refId=next(auto_ref_ids)) # noqa: E501 + def set_refid(t): + return t if t.refId else attr.evolve(t, refId=next(auto_ref_ids)) + return self._map_targets(set_refid) @attr.s -class SparkLine(object): - fillColor = attr.ib( - default=attr.Factory(lambda: BLUE_RGBA), - validator=instance_of(RGBA), - ) - full = attr.ib(default=False, validator=instance_of(bool)) - lineColor = attr.ib( - default=attr.Factory(lambda: BLUE_RGB), - validator=instance_of(RGB), +class TimeSeries(Panel): + """Generates Time Series panel json structure added in Grafana v8 + + Grafana doc on time series: https://grafana.com/docs/grafana/latest/panels/visualizations/time-series/ + + :param axisPlacement: auto(Default), left. right, hidden + :param axisLabel: axis label string + :param barAlignment: bar alignment + -1 (left), 0 (centre, default), 1 + :param colorMode: Color mode + palette-classic (Default), + :param drawStyle: how to display your time series data + line (Default), bars, points + :param fillOpacity: fillOpacity + :param gradientMode: gradientMode + :param legendDisplayMode: refine how the legend appears in your visualization + list (Default), table, hidden + :param legendPlacement: bottom (Default), right + :param legendCalcs: which calculations should be displayed in the legend. Defaults to an empty list. + Possible values are: allIsNull, allIsZero, changeCount, count, delta, diff, diffperc, + distinctCount, firstNotNull, max, mean, min, logmin, range, step, total. For more information see + :param lineInterpolation: line interpolation + linear (Default), smooth, stepBefore, stepAfter + :param lineWidth: line width, default 1 + :param mappings: To assign colors to boolean or string values, use Value mappings + :param overrides: To override the base characteristics of certain timeseries data + :param pointSize: point size, default 5 + :param scaleDistributionType: axis scale linear or log + :param scaleDistributionLog: Base of if logarithmic scale type set, default 2 + :param spanNulls: connect null values, default False + :param showPoints: show points + auto (Default), always, never + :param stacking: dict to enable stacking, {"mode": "normal", "group": "A"} + :param thresholds: single stat thresholds + :param tooltipMode: When you hover your cursor over the visualization, Grafana can display tooltips + single (Default), multi, none + :param unit: units + :param thresholdsStyleMode: thresholds style mode off (Default), area, line, line+area + :param valueMin: Minimum value for Panel + :param valueMax: Maximum value for Panel + :param valueDecimals: Number of display decimals + """ + + axisPlacement = attr.ib(default='auto', validator=instance_of(str)) + axisLabel = attr.ib(default='', validator=instance_of(str)) + barAlignment = attr.ib(default=0, validator=instance_of(int)) + colorMode = attr.ib(default='palette-classic', validator=instance_of(str)) + drawStyle = attr.ib(default='line', validator=instance_of(str)) + fillOpacity = attr.ib(default=0, validator=instance_of(int)) + gradientMode = attr.ib(default='none', validator=instance_of(str)) + legendDisplayMode = attr.ib(default='list', validator=instance_of(str)) + legendPlacement = attr.ib(default='bottom', validator=instance_of(str)) + legendCalcs = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=in_([ + 'lastNotNull', + 'min', + 'mean', + 'max', + 'last', + 'firstNotNull', + 'first', + 'sum', + 'count', + 'range', + 'delta', + 'step', + 'diff', + 'logmin', + 'allIsZero', + 'allIsNull', + 'changeCount', + 'distinctCount', + 'diffperc', + 'allValues' + ]), + iterable_validator=instance_of(list), + ), ) - show = attr.ib(default=False, validator=instance_of(bool)) + lineInterpolation = attr.ib(default='linear', validator=instance_of(str)) + lineWidth = attr.ib(default=1, validator=instance_of(int)) + mappings = attr.ib(default=attr.Factory(list)) + overrides = attr.ib(default=attr.Factory(list)) + pointSize = attr.ib(default=5, validator=instance_of(int)) + scaleDistributionType = attr.ib(default='linear', validator=instance_of(str)) + scaleDistributionLog = attr.ib(default=2, validator=instance_of(int)) + spanNulls = attr.ib(default=False, validator=instance_of(bool)) + showPoints = attr.ib(default='auto', validator=instance_of(str)) + stacking = attr.ib(factory=dict, validator=instance_of(dict)) + tooltipMode = attr.ib(default='single', validator=instance_of(str)) + unit = attr.ib(default='', validator=instance_of(str)) + thresholdsStyleMode = attr.ib(default='off', validator=instance_of(str)) + + valueMin = attr.ib(default=None, validator=attr.validators.optional(instance_of(int))) + valueMax = attr.ib(default=None, validator=attr.validators.optional(instance_of(int))) + valueDecimals = attr.ib(default=None, validator=attr.validators.optional(instance_of(int))) def to_json_data(self): - return { - 'fillColor': self.fillColor, - 'full': self.full, - 'lineColor': self.lineColor, - 'show': self.show, - } + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'color': { + 'mode': self.colorMode + }, + 'custom': { + 'axisPlacement': self.axisPlacement, + 'axisLabel': self.axisLabel, + 'drawStyle': self.drawStyle, + 'lineInterpolation': self.lineInterpolation, + 'barAlignment': self.barAlignment, + 'lineWidth': self.lineWidth, + 'fillOpacity': self.fillOpacity, + 'gradientMode': self.gradientMode, + 'spanNulls': self.spanNulls, + 'showPoints': self.showPoints, + 'pointSize': self.pointSize, + 'stacking': self.stacking, + 'scaleDistribution': { + 'type': self.scaleDistributionType, + 'log': self.scaleDistributionLog + }, + 'hideFrom': { + 'tooltip': False, + 'viz': False, + 'legend': False + }, + 'thresholdsStyle': { + 'mode': self.thresholdsStyleMode + }, + }, + 'mappings': self.mappings, + "min": self.valueMin, + "max": self.valueMax, + "decimals": self.valueDecimals, + 'unit': self.unit + }, + 'overrides': self.overrides + }, + 'options': { + 'legend': { + 'displayMode': self.legendDisplayMode, + 'placement': self.legendPlacement, + 'calcs': self.legendCalcs + }, + 'tooltip': { + 'mode': self.tooltipMode + } + }, + 'type': TIMESERIES_TYPE, + } + ) @attr.s class ValueMap(object): - op = attr.ib() + """ + Generates json structure for a value mapping item. + + :param op: comparison operator + :param value: value to map to text + :param text: text to map the value to + """ text = attr.ib() value = attr.ib() + op = attr.ib(default='=') def to_json_data(self): return { @@ -1215,16 +2407,24 @@ def to_json_data(self): @attr.s -class RangeMap(object): - start = attr.ib() - end = attr.ib() - text = attr.ib() +class SparkLine(object): + fillColor = attr.ib( + default=attr.Factory(lambda: BLUE_RGBA), + validator=instance_of(RGBA), + ) + full = attr.ib(default=False, validator=instance_of(bool)) + lineColor = attr.ib( + default=attr.Factory(lambda: BLUE_RGB), + validator=instance_of(RGB), + ) + show = attr.ib(default=False, validator=instance_of(bool)) def to_json_data(self): return { - 'from': self.start, - 'to': self.end, - 'text': self.text, + 'fillColor': self.fillColor, + 'full': self.full, + 'lineColor': self.lineColor, + 'show': self.show, } @@ -1248,33 +2448,16 @@ def to_json_data(self): @attr.s -class Text(object): - """Generates a Text panel.""" - - content = attr.ib() - editable = attr.ib(default=True, validator=instance_of(bool)) - error = attr.ib(default=False, validator=instance_of(bool)) - height = attr.ib(default=None) - id = attr.ib(default=None) - links = attr.ib(default=attr.Factory(list)) - mode = attr.ib(default=TEXT_MODE_MARKDOWN) - span = attr.ib(default=None) - title = attr.ib(default="") - transparent = attr.ib(default=False, validator=instance_of(bool)) +class RangeMap(object): + start = attr.ib() + end = attr.ib() + text = attr.ib() def to_json_data(self): return { - 'content': self.content, - 'editable': self.editable, - 'error': self.error, - 'height': self.height, - 'id': self.id, - 'links': self.links, - 'mode': self.mode, - 'span': self.span, - 'title': self.title, - 'transparent': self.transparent, - 'type': TEXT_TYPE, + 'from': self.start, + 'to': self.end, + 'text': self.text, } @@ -1329,69 +2512,553 @@ def to_json_data(self): } +@attr.s +class DiscreteColorMappingItem(object): + """ + Generates json structure for the value mapping item for the StatValueMappings class: + + :param text: String to color + :param color: To color the text with + """ + + text = attr.ib(validator=instance_of(str)) + color = attr.ib(default=GREY1, validator=instance_of((str, RGBA))) + + def to_json_data(self): + return { + "color": self.color, + "text": self.text, + } + + +@attr.s +class Discrete(Panel): + """ + Generates Discrete panel json structure. + https://grafana.com/grafana/plugins/natel-discrete-panel/ + + :param colorMaps: list of DiscreteColorMappingItem, to color values + (note these apply **after** value mappings) + :param backgroundColor: dito + :param lineColor: Separator line color between rows + :param metricNameColor: dito + :param timeTextColor: dito + :param valueTextColor: dito + + :param decimals: number of decimals to display + :param rowHeight: dito + + :param units: defines value units + :param legendSortBy: time (desc: '-ms', asc: 'ms), count (desc: '-count', asc: 'count') + + :param highlightOnMouseover: whether to highlight the state of hovered time falls in. + :param showLegend: dito + :param showLegendPercent: whether to show percentage of time spent in each state/value + :param showLegendNames: + :param showLegendValues: whether to values in legend + :param legendPercentDecimals: number of decimals for legend + :param showTimeAxis: dito + :param use12HourClock: dito + :param writeMetricNames: dito + :param writeLastValue: dito + :param writeAllValues: whether to show all values + + :param showDistinctCount: whether to show distinct values count + :param showLegendCounts: whether to show value occurrence count + :param showLegendTime: whether to show of each state + :param showTransitionCount: whether to show transition count + + :param colorMaps: list of DiscreteColorMappingItem + :param rangeMaps: list of RangeMap + :param valueMaps: list of ValueMap + """ + + backgroundColor = attr.ib( + default=RGBA(128, 128, 128, 0.1), + validator=instance_of((RGBA, RGB, str)) + ) + lineColor = attr.ib( + default=RGBA(0, 0, 0, 0.1), + validator=instance_of((RGBA, RGB, str)) + ) + metricNameColor = attr.ib( + default="#000000", + validator=instance_of((RGBA, RGB, str)) + ) + timeTextColor = attr.ib( + default="#d8d9da", + validator=instance_of((RGBA, RGB, str)) + ) + valueTextColor = attr.ib( + default="#000000", + validator=instance_of((RGBA, RGB, str)) + ) + + decimals = attr.ib(default=0, validator=instance_of(int)) + legendPercentDecimals = attr.ib(default=0, validator=instance_of(int)) + rowHeight = attr.ib(default=50, validator=instance_of(int)) + textSize = attr.ib(default=24, validator=instance_of(int)) + + textSizeTime = attr.ib(default=12, validator=instance_of(int)) + units = attr.ib(default="none", validator=instance_of(str)) + legendSortBy = attr.ib( + default="-ms", + validator=in_(['-ms', 'ms', '-count', 'count']) + ) + + highlightOnMouseover = attr.ib(default=True, validator=instance_of(bool)) + showLegend = attr.ib(default=True, validator=instance_of(bool)) + showLegendPercent = attr.ib(default=True, validator=instance_of(bool)) + showLegendNames = attr.ib(default=True, validator=instance_of(bool)) + showLegendValues = attr.ib(default=True, validator=instance_of(bool)) + showTimeAxis = attr.ib(default=True, validator=instance_of(bool)) + use12HourClock = attr.ib(default=False, validator=instance_of(bool)) + writeMetricNames = attr.ib(default=False, validator=instance_of(bool)) + writeLastValue = attr.ib(default=True, validator=instance_of(bool)) + writeAllValues = attr.ib(default=False, validator=instance_of(bool)) + + showDistinctCount = attr.ib(default=None) + showLegendCounts = attr.ib(default=None) + showLegendTime = attr.ib(default=None) + showTransitionCount = attr.ib(default=None) + + colorMaps = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=instance_of(DiscreteColorMappingItem), + iterable_validator=instance_of(list), + ), + ) + rangeMaps = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=instance_of(RangeMap), + iterable_validator=instance_of(list), + ), + ) + valueMaps = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=instance_of(ValueMap), + iterable_validator=instance_of(list), + ), + ) + + def to_json_data(self): + graphObject = { + 'type': DISCRETE_TYPE, + + 'backgroundColor': self.backgroundColor, + 'lineColor': self.lineColor, + 'metricNameColor': self.metricNameColor, + 'timeTextColor': self.timeTextColor, + 'valueTextColor': self.valueTextColor, + 'legendPercentDecimals': self.legendPercentDecimals, + 'decimals': self.decimals, + 'rowHeight': self.rowHeight, + 'textSize': self.textSize, + 'textSizeTime': self.textSizeTime, + + 'units': self.units, + 'legendSortBy': self.legendSortBy, + + 'highlightOnMouseover': self.highlightOnMouseover, + 'showLegend': self.showLegend, + 'showLegendPercent': self.showLegendPercent, + 'showLegendNames': self.showLegendNames, + 'showLegendValues': self.showLegendValues, + 'showTimeAxis': self.showTimeAxis, + 'use12HourClock': self.use12HourClock, + 'writeMetricNames': self.writeMetricNames, + 'writeLastValue': self.writeLastValue, + 'writeAllValues': self.writeAllValues, + + 'showDistinctCount': self.showDistinctCount, + 'showLegendCounts': self.showLegendCounts, + 'showLegendTime': self.showLegendTime, + 'showTransitionCount': self.showTransitionCount, + + 'colorMaps': self.colorMaps, + 'rangeMaps': self.rangeMaps, + 'valueMaps': self.valueMaps, + } + return self.panel_json(graphObject) + + +@attr.s +class Text(Panel): + """Generates a Text panel.""" + + content = attr.ib(default="", validator=instance_of(str)) + error = attr.ib(default=False, validator=instance_of(bool)) + mode = attr.ib( + default=TEXT_MODE_MARKDOWN, + validator=in_([TEXT_MODE_MARKDOWN, TEXT_MODE_HTML, TEXT_MODE_TEXT]) + ) + + def to_json_data(self): + return self.panel_json({ + 'type': TEXT_TYPE, + 'error': self.error, + 'options': { + 'content': self.content, + 'mode': self.mode, + }, + }) + + @attr.s class AlertList(object): - """Generates the AlertList Panel.""" + """Generates the AlertList Panel. + + :param dashboardTags: A list of tags (strings) for the panel. + :param description: Panel description, supports markdown and links. + :param gridPos: describes the panel size and position in grid coordinates. + :param id: panel id + :param limit: Max number of alerts that can be displayed in the list. + :param nameFilter: Show only alerts that contain nameFilter in their name. + :param onlyAlertsOnDashboard: If true, shows only alerts from the current dashboard. + :param links: Additional web links to be presented in the panel. A list of instantiation of + DataLink objects. + :param show: Show the current alert list (ALERTLIST_SHOW_CURRENT) or only the alerts that were + changed (ALERTLIST_SHOW_CHANGES). + :param sortOrder: Defines the sorting order of the alerts. Gets one of the following values as + input: SORT_ASC, SORT_DESC and SORT_IMPORTANCE. + :param span: Defines the number of spans that will be used for the panel. + :param stateFilter: Show alerts with statuses from the stateFilter list. The list can contain a + subset of the following statuses: + [ALERTLIST_STATE_ALERTING, ALERTLIST_STATE_OK, ALERTLIST_STATE_NO_DATA, + ALERTLIST_STATE_PAUSED, ALERTLIST_STATE_EXECUTION_ERROR, ALERTLIST_STATE_PENDING]. + An empty list means all alerts. + :param title: The panel title. + :param transparent: If true, display the panel without a background. + :param alertName: Show only alerts that contain alertName in their name. + """ - description = attr.ib(default="") + dashboardTags = attr.ib( + default=attr.Factory(list), + validator=attr.validators.deep_iterable( + member_validator=attr.validators.instance_of(str), + iterable_validator=attr.validators.instance_of(list))) + description = attr.ib(default="", validator=instance_of(str)) + gridPos = attr.ib( + default=None, validator=attr.validators.optional(attr.validators.instance_of(GridPos))) id = attr.ib(default=None) limit = attr.ib(default=DEFAULT_LIMIT) - links = attr.ib(default=attr.Factory(list)) + links = attr.ib( + default=attr.Factory(list), + validator=attr.validators.deep_iterable( + member_validator=attr.validators.instance_of(DataLink), + iterable_validator=attr.validators.instance_of(list))) + nameFilter = attr.ib(default="", validator=instance_of(str)) onlyAlertsOnDashboard = attr.ib(default=True, validator=instance_of(bool)) show = attr.ib(default=ALERTLIST_SHOW_CURRENT) sortOrder = attr.ib(default=SORT_ASC, validator=in_([1, 2, 3])) + span = attr.ib(default=6) stateFilter = attr.ib(default=attr.Factory(list)) title = attr.ib(default="") transparent = attr.ib(default=False, validator=instance_of(bool)) + alertName = attr.ib(default="", validator=instance_of(str)) + + def _map_panels(self, f): + return f(self) def to_json_data(self): return { + 'dashboardTags': self.dashboardTags, 'description': self.description, + 'gridPos': self.gridPos, 'id': self.id, 'limit': self.limit, 'links': self.links, + 'nameFilter': self.nameFilter, 'onlyAlertsOnDashboard': self.onlyAlertsOnDashboard, 'show': self.show, 'sortOrder': self.sortOrder, + 'span': self.span, 'stateFilter': self.stateFilter, 'title': self.title, 'transparent': self.transparent, 'type': ALERTLIST_TYPE, + "options": { + "alertName": self.alertName + }, + } + + +@attr.s +class Stat(Panel): + """Generates Stat panel json structure + + Grafana doc on stat: https://grafana.com/docs/grafana/latest/panels/visualizations/stat-panel/ + + :param alignment: defines value & title positioning: keys 'auto' 'centre' + :param colorMode: defines if Grafana will color panel background: keys "value" "background" + :param decimals: number of decimals to display + :param format: defines value units + :param graphMode: defines if Grafana will draw graph: keys 'area' 'none' + :param noValue: define the default value if no value is found + :param mappings: the list of values to text mappings + This should be a list of StatMapping objects + https://grafana.com/docs/grafana/latest/panels/field-configuration-options/#value-mapping + :param orientation: Stacking direction in case of multiple series or fields: keys 'auto' 'horizontal' 'vertical' + :param overrides: To override the base characteristics of certain timeseries data + :param reduceCalc: algorithm for reduction to a single value: keys + 'mean' 'lastNotNull' 'last' 'first' 'firstNotNull' 'min' 'max' 'sum' 'total' + :param fields: should be included in the panel + :param textMode: define Grafana will show name or value: keys: 'auto' 'name' 'none' 'value' 'value_and_name' + :param thresholds: single stat thresholds + """ + + alignment = attr.ib(default='auto') + colorMode = attr.ib(default='value') + decimals = attr.ib(default=None) + format = attr.ib(default='none') + graphMode = attr.ib(default='area') + mappings = attr.ib(default=attr.Factory(list)) + noValue = attr.ib(default='none') + orientation = attr.ib(default='auto') + overrides = attr.ib(default=attr.Factory(list)) + reduceCalc = attr.ib(default='mean', type=str) + fields = attr.ib(default="") + textMode = attr.ib(default='auto') + thresholds = attr.ib(default="") + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'custom': {}, + 'decimals': self.decimals, + 'mappings': self.mappings, + 'unit': self.format, + 'noValue': self.noValue + }, + 'overrides': self.overrides + }, + 'options': { + 'textMode': self.textMode, + 'colorMode': self.colorMode, + 'graphMode': self.graphMode, + 'justifyMode': self.alignment, + 'orientation': self.orientation, + 'reduceOptions': { + 'calcs': [ + self.reduceCalc + ], + 'fields': self.fields, + 'values': False + } + }, + 'type': STAT_TYPE, + } + ) + + +@attr.s +class StatValueMappingItem(object): + """ + Generates json structure for the value mapping item for the StatValueMappings class: + + :param text: String that will replace input value + :param mapValue: Value to be replaced + :param color: How to color the text if mapping occurs + :param index: index + """ + + text = attr.ib() + mapValue = attr.ib(default="", validator=instance_of(str)) + color = attr.ib(default="", validator=instance_of(str)) + index = attr.ib(default=None) + + def to_json_data(self): + return { + self.mapValue: { + 'text': self.text, + 'color': self.color, + 'index': self.index + } + } + + +@attr.s(init=False) +class StatValueMappings(object): + """ + Generates json structure for the value mappings for the StatPanel: + + :param mappingItems: List of StatValueMappingItem objects + + mappings=[ + core.StatValueMappings( + core.StatValueMappingItem('Offline', '0', 'red'), # Value must a string + core.StatValueMappingItem('Online', '1', 'green') + ), + ], + """ + + mappingItems = attr.ib( + factory=list, + validator=attr.validators.deep_iterable( + member_validator=attr.validators.instance_of(StatValueMappingItem), + iterable_validator=attr.validators.instance_of(list), + ), + ) + + def __init__(self, *mappings: StatValueMappingItem): + self.__attrs_init__([*mappings]) + + def to_json_data(self): + ret_dict = { + 'type': 'value', + 'options': { + } + } + + for item in self.mappingItems: + ret_dict['options'].update(item.to_json_data()) + + return ret_dict + + +@attr.s +class StatRangeMappings(object): + """ + Generates json structure for the range mappings for the StatPanel: + + :param text: Sting that will replace input value + :param startValue: When using a range, the start value of the range + :param endValue: When using a range, the end value of the range + :param color: How to color the text if mapping occurs + :param index: index + """ + + text = attr.ib() + startValue = attr.ib(default=0, validator=instance_of(int)) + endValue = attr.ib(default=0, validator=instance_of(int)) + color = attr.ib(default="", validator=instance_of(str)) + index = attr.ib(default=None) + + def to_json_data(self): + return { + 'type': 'range', + 'options': { + 'from': self.startValue, + 'to': self.endValue, + 'result': { + 'text': self.text, + 'color': self.color, + 'index': self.index + } + } + } + + +@attr.s +class StatMapping(object): + """ + Deprecated Grafana v8 + Generates json structure for the value mapping for the Stat panel: + + :param text: Sting that will replace input value + :param value: Value to be replaced + :param startValue: When using a range, the start value of the range + :param endValue: When using a range, the end value of the range + :param id: panel id + """ + + text = attr.ib() + mapValue = attr.ib(default="", validator=instance_of(str)) + startValue = attr.ib(default="", validator=instance_of(str)) + endValue = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=None) + + def to_json_data(self): + mappingType = MAPPING_TYPE_VALUE_TO_TEXT if self.mapValue else MAPPING_TYPE_RANGE_TO_TEXT + + ret_dict = { + 'operator': '', + 'text': self.text, + 'type': mappingType, + 'value': self.mapValue, + 'from': self.startValue, + 'to': self.endValue, + 'id': self.id } + return ret_dict + + +@attr.s +class StatValueMapping(object): + """ + Deprecated Grafana v8 + Generates json structure for the value mappings for the StatPanel: + + :param text: Sting that will replace input value + :param mapValue: Value to be replaced + :param id: panel id + """ + + text = attr.ib() + mapValue = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=None) + + def to_json_data(self): + return StatMapping( + self.text, + mapValue=self.mapValue, + id=self.id, + ) + + +@attr.s +class StatRangeMapping(object): + """ + Deprecated Grafana v8 + Generates json structure for the range mappings for the StatPanel: + + :param text: Sting that will replace input value + :param startValue: When using a range, the start value of the range + :param endValue: When using a range, the end value of the range + :param id: panel id + """ + + text = attr.ib() + startValue = attr.ib(default="", validator=instance_of(str)) + endValue = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=None) + + def to_json_data(self): + return StatMapping( + self.text, + startValue=self.startValue, + endValue=self.endValue, + id=self.id + ) + @attr.s -class SingleStat(object): +class SingleStat(Panel): """Generates Single Stat panel json structure - Grafana doc on singlestat: http://docs.grafana.org/reference/singlestat/ + This panel was deprecated in Grafana 7.0, please use Stat instead + + Grafana doc on singlestat: https://grafana.com/docs/grafana/latest/features/panels/singlestat/ - :param dataSource: Grafana datasource name - :param targets: list of metric requests for chosen datasource - :param title: panel title :param cacheTimeout: metric query result cache ttl :param colors: the list of colors that can be used for coloring panel value or background. Additional info on coloring in docs: - http://docs.grafana.org/reference/singlestat/#coloring + https://grafana.com/docs/grafana/latest/features/panels/singlestat/#coloring :param colorBackground: defines if grafana will color panel background :param colorValue: defines if grafana will color panel value - :param description: optional panel description :param decimals: override automatic decimal precision for legend/tooltips - :param editable: defines if panel is editable via web interfaces :param format: defines value units :param gauge: draws and additional speedometer-like gauge based - :param height: defines panel height - :param hideTimeOverride: hides time overrides - :param id: panel id - :param interval: defines time interval between metric queries - :param links: additional web links :param mappingType: defines panel mapping type. Additional info can be found in docs: - http://docs.grafana.org/reference/singlestat/#value-to-text-mapping + https://grafana.com/docs/grafana/latest/features/panels/singlestat/#value-to-text-mapping :param mappingTypes: the list of available mapping types for panel - :param maxDataPoints: maximum metric query results, - that will be used for rendering - :param minSpan: minimum span number :param nullText: defines what to show if metric query result is undefined :param nullPointMode: defines how to render undefined values :param postfix: defines postfix that will be attached to value @@ -1399,37 +3066,24 @@ class SingleStat(object): :param prefix: defines prefix that will be attached to value :param prefixFontSize: defines prefix font size :param rangeMaps: the list of value to text mappings - :param span: defines the number of spans that will be used for panel :param sparkline: defines if grafana should draw an additional sparkline. Sparkline grafana documentation: - http://docs.grafana.org/reference/singlestat/#spark-lines + https://grafana.com/docs/grafana/latest/features/panels/singlestat/#spark-lines :param thresholds: single stat thresholds - :param transparent: defines if panel should be transparent :param valueFontSize: defines value font size :param valueName: defines value type. possible values are: min, max, avg, current, total, name, first, delta, range :param valueMaps: the list of value to text mappings - :param timeFrom: time range that Override relative time """ - dataSource = attr.ib() - targets = attr.ib() - title = attr.ib() cacheTimeout = attr.ib(default=None) colors = attr.ib(default=attr.Factory(lambda: [GREEN, ORANGE, RED])) colorBackground = attr.ib(default=False, validator=instance_of(bool)) colorValue = attr.ib(default=False, validator=instance_of(bool)) - description = attr.ib(default=None) decimals = attr.ib(default=None) - editable = attr.ib(default=True, validator=instance_of(bool)) - format = attr.ib(default="none") + format = attr.ib(default='none') gauge = attr.ib(default=attr.Factory(Gauge), validator=instance_of(Gauge)) - height = attr.ib(default=None) - hideTimeOverride = attr.ib(default=False, validator=instance_of(bool)) - id = attr.ib(default=None) - interval = attr.ib(default=None) - links = attr.ib(default=attr.Factory(list)) mappingType = attr.ib(default=MAPPING_TYPE_VALUE_TO_TEXT) mappingTypes = attr.ib( default=attr.Factory(lambda: [ @@ -1437,69 +3091,51 @@ class SingleStat(object): MAPPING_RANGE_TO_TEXT, ]), ) - maxDataPoints = attr.ib(default=100) minSpan = attr.ib(default=None) nullText = attr.ib(default=None) - nullPointMode = attr.ib(default="connected") + nullPointMode = attr.ib(default='connected') postfix = attr.ib(default="") - postfixFontSize = attr.ib(default="50%") + postfixFontSize = attr.ib(default='50%') prefix = attr.ib(default="") - prefixFontSize = attr.ib(default="50%") + prefixFontSize = attr.ib(default='50%') rangeMaps = attr.ib(default=attr.Factory(list)) - repeat = attr.ib(default=None) - span = attr.ib(default=6) sparkline = attr.ib( default=attr.Factory(SparkLine), validator=instance_of(SparkLine), ) thresholds = attr.ib(default="") - transparent = attr.ib(default=False, validator=instance_of(bool)) - valueFontSize = attr.ib(default="80%") + valueFontSize = attr.ib(default='80%') valueName = attr.ib(default=VTYPE_DEFAULT) valueMaps = attr.ib(default=attr.Factory(list)) - timeFrom = attr.ib(default=None) def to_json_data(self): - return { - 'cacheTimeout': self.cacheTimeout, - 'colorBackground': self.colorBackground, - 'colorValue': self.colorValue, - 'colors': self.colors, - 'datasource': self.dataSource, - 'decimals': self.decimals, - 'description': self.description, - 'editable': self.editable, - 'format': self.format, - 'gauge': self.gauge, - 'id': self.id, - 'interval': self.interval, - 'links': self.links, - 'height': self.height, - 'hideTimeOverride': self.hideTimeOverride, - 'mappingType': self.mappingType, - 'mappingTypes': self.mappingTypes, - 'maxDataPoints': self.maxDataPoints, - 'minSpan': self.minSpan, - 'nullPointMode': self.nullPointMode, - 'nullText': self.nullText, - 'postfix': self.postfix, - 'postfixFontSize': self.postfixFontSize, - 'prefix': self.prefix, - 'prefixFontSize': self.prefixFontSize, - 'rangeMaps': self.rangeMaps, - 'repeat': self.repeat, - 'span': self.span, - 'sparkline': self.sparkline, - 'targets': self.targets, - 'thresholds': self.thresholds, - 'title': self.title, - 'transparent': self.transparent, - 'type': SINGLESTAT_TYPE, - 'valueFontSize': self.valueFontSize, - 'valueMaps': self.valueMaps, - 'valueName': self.valueName, - 'timeFrom': self.timeFrom, - } + return self.panel_json( + { + 'cacheTimeout': self.cacheTimeout, + 'colorBackground': self.colorBackground, + 'colorValue': self.colorValue, + 'colors': self.colors, + 'decimals': self.decimals, + 'format': self.format, + 'gauge': self.gauge, + 'mappingType': self.mappingType, + 'mappingTypes': self.mappingTypes, + 'minSpan': self.minSpan, + 'nullPointMode': self.nullPointMode, + 'nullText': self.nullText, + 'postfix': self.postfix, + 'postfixFontSize': self.postfixFontSize, + 'prefix': self.prefix, + 'prefixFontSize': self.prefixFontSize, + 'rangeMaps': self.rangeMaps, + 'sparkline': self.sparkline, + 'thresholds': self.thresholds, + 'type': SINGLESTAT_TYPE, + 'valueFontSize': self.valueFontSize, + 'valueMaps': self.valueMaps, + 'valueName': self.valueName, + } + ) @attr.s @@ -1539,12 +3175,27 @@ def to_json_data(self): @attr.s class StringColumnStyleType(object): TYPE = 'string' - - preserveFormat = attr.ib(validator=instance_of(bool)) - sanitize = attr.ib(validator=instance_of(bool)) + decimals = attr.ib(default=2, validator=instance_of(int)) + colorMode = attr.ib(default=None) + colors = attr.ib(default=attr.Factory(lambda: [GREEN, ORANGE, RED])) + thresholds = attr.ib(default=attr.Factory(list)) + preserveFormat = attr.ib(validator=instance_of(bool), default=False) + sanitize = attr.ib(validator=instance_of(bool), default=False) + unit = attr.ib(default=SHORT_FORMAT) + mappingType = attr.ib(default=MAPPING_TYPE_VALUE_TO_TEXT) + valueMaps = attr.ib(default=attr.Factory(list)) + rangeMaps = attr.ib(default=attr.Factory(list)) def to_json_data(self): return { + 'decimals': self.decimals, + 'colorMode': self.colorMode, + 'colors': self.colors, + 'thresholds': self.thresholds, + 'unit': self.unit, + 'mappingType': self.mappingType, + 'valueMaps': self.valueMaps, + 'rangeMaps': self.rangeMaps, 'preserveFormat': self.preserveFormat, 'sanitize': self.sanitize, 'type': self.TYPE, @@ -1566,6 +3217,12 @@ class ColumnStyle(object): alias = attr.ib(default="") pattern = attr.ib(default="") + align = attr.ib(default='auto', validator=in_( + ['auto', 'left', 'right', 'center'])) + link = attr.ib(validator=instance_of(bool), default=False) + linkOpenInNewTab = attr.ib(validator=instance_of(bool), default=False) + linkUrl = attr.ib(validator=instance_of(str), default="") + linkTooltip = attr.ib(validator=instance_of(str), default="") type = attr.ib( default=attr.Factory(NumberColumnStyleType), validator=instance_of(( @@ -1580,6 +3237,11 @@ def to_json_data(self): data = { 'alias': self.alias, 'pattern': self.pattern, + 'align': self.align, + 'link': self.link, + 'linkTargetBlank': self.linkOpenInNewTab, + 'linkUrl': self.linkUrl, + 'linkTooltip': self.linkTooltip, } data.update(self.type.to_json_data()) return data @@ -1605,8 +3267,8 @@ class Column(object): :param value: aggregation function """ - text = attr.ib(default="Avg") - value = attr.ib(default="avg") + text = attr.ib(default='Avg') + value = attr.ib(default='avg') def to_json_data(self): return { @@ -1753,6 +3415,1225 @@ def to_json_data(self): } +@attr.s +class BarGauge(Panel): + """Generates Bar Gauge panel json structure + + :param allValue: If All values should be shown or a Calculation + :param calc: Calculation to perform on metrics + :param dataLinks: list of data links hooked to datapoints on the graph + :param decimals: override automatic decimal precision for legend/tooltips + :param displayMode: style to display bar gauge in + :param format: defines value units + :param labels: option to show gauge level labels + :param limit: limit of number of values to show when not Calculating + :param max: maximum value of the gauge + :param min: minimum value of the gauge + :param orientation: orientation of the bar gauge + :param rangeMaps: the list of value to text mappings + :param thresholdLabel: label for gauge. Template Variables: + "$__series_namei" "$__field_name" "$__cell_{N} / $__calc" + :param thresholdMarkers: option to show marker of level on gauge + :param thresholds: single stat thresholds + :param valueMaps: the list of value to text mappings + """ + + allValues = attr.ib(default=False, validator=instance_of(bool)) + calc = attr.ib(default=GAUGE_CALC_MEAN) + dataLinks = attr.ib(default=attr.Factory(list)) + decimals = attr.ib(default=None) + displayMode = attr.ib( + default=GAUGE_DISPLAY_MODE_LCD, + validator=in_( + [ + GAUGE_DISPLAY_MODE_LCD, + GAUGE_DISPLAY_MODE_BASIC, + GAUGE_DISPLAY_MODE_GRADIENT, + ] + ), + ) + format = attr.ib(default='none') + label = attr.ib(default=None) + limit = attr.ib(default=None) + max = attr.ib(default=100) + min = attr.ib(default=0) + orientation = attr.ib( + default=ORIENTATION_HORIZONTAL, + validator=in_([ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL]), + ) + rangeMaps = attr.ib(default=attr.Factory(list)) + thresholdLabels = attr.ib(default=False, validator=instance_of(bool)) + thresholdMarkers = attr.ib(default=True, validator=instance_of(bool)) + thresholds = attr.ib( + default=attr.Factory( + lambda: [ + Threshold('green', 0, 0.0), + Threshold('red', 1, 80.0) + ] + ), + validator=instance_of(list), + ) + valueMaps = attr.ib(default=attr.Factory(list)) + + def to_json_data(self): + return self.panel_json( + { + 'options': { + 'displayMode': self.displayMode, + 'fieldOptions': { + 'calcs': [self.calc], + 'defaults': { + 'decimals': self.decimals, + 'max': self.max, + 'min': self.min, + 'title': self.label, + 'unit': self.format, + 'links': self.dataLinks, + }, + 'limit': self.limit, + 'mappings': self.valueMaps, + 'override': {}, + 'thresholds': self.thresholds, + 'values': self.allValues, + }, + 'orientation': self.orientation, + 'showThresholdLabels': self.thresholdLabels, + 'showThresholdMarkers': self.thresholdMarkers, + }, + 'type': BARGAUGE_TYPE, + } + ) + + +@attr.s +class GaugePanel(Panel): + """Generates Gauge panel json structure + + :param allValue: If All values should be shown or a Calculation + :param calc: Calculation to perform on metrics + :param dataLinks: list of data links hooked to datapoints on the graph + :param decimals: override automatic decimal precision for legend/tooltips + :param format: defines value units + :param labels: option to show gauge level labels + :param limit: limit of number of values to show when not Calculating + :param max: maximum value of the gauge + :param min: minimum value of the gauge + :param rangeMaps: the list of value to text mappings + :param thresholdLabel: label for gauge. Template Variables: + "$__series_namei" "$__field_name" "$__cell_{N} / $__calc" + :param thresholdMarkers: option to show marker of level on gauge + :param thresholds: single stat thresholds + :param valueMaps: the list of value to text mappings + """ + + allValues = attr.ib(default=False, validator=instance_of(bool)) + calc = attr.ib(default=GAUGE_CALC_MEAN) + dataLinks = attr.ib(default=attr.Factory(list)) + decimals = attr.ib(default=None) + format = attr.ib(default='none') + label = attr.ib(default=None) + limit = attr.ib(default=None) + max = attr.ib(default=100) + min = attr.ib(default=0) + rangeMaps = attr.ib(default=attr.Factory(list)) + thresholdLabels = attr.ib(default=False, validator=instance_of(bool)) + thresholdMarkers = attr.ib(default=True, validator=instance_of(bool)) + thresholds = attr.ib( + default=attr.Factory( + lambda: [ + Threshold('green', 0, 0.0), + Threshold('red', 1, 80.0) + ] + ), + validator=instance_of(list), + ) + valueMaps = attr.ib(default=attr.Factory(list)) + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'calcs': [self.calc], + 'decimals': self.decimals, + 'max': self.max, + 'min': self.min, + 'title': self.label, + 'unit': self.format, + 'links': self.dataLinks, + 'limit': self.limit, + 'mappings': self.valueMaps, + 'override': {}, + 'values': self.allValues, + }, + 'showThresholdLabels': self.thresholdLabels, + 'showThresholdMarkers': self.thresholdMarkers, + }, + 'type': GAUGE_TYPE, + } + ) + + +@attr.s +class HeatmapColor(object): + """A Color object for heatmaps + + :param cardColor: color + :param colorScale: scale + :param colorScheme: scheme + :param exponent: exponent + :param max: max + :param min: min + :param mode: mode + """ + + # Maybe cardColor should validate to RGBA object, not sure + cardColor = attr.ib(default='#b4ff00', validator=instance_of(str)) + colorScale = attr.ib(default='sqrt', validator=instance_of(str)) + colorScheme = attr.ib(default='interpolateOranges') + exponent = attr.ib(default=0.5, validator=instance_of(float)) + mode = attr.ib(default='spectrum', validator=instance_of(str)) + max = attr.ib(default=None) + min = attr.ib(default=None) + + def to_json_data(self): + return { + 'mode': self.mode, + 'cardColor': self.cardColor, + 'colorScale': self.colorScale, + 'exponent': self.exponent, + 'colorScheme': self.colorScheme, + 'max': self.max, + 'min': self.min, + } + + +@attr.s +class Heatmap(Panel): + """Generates Heatmap panel json structure (https://grafana.com/docs/grafana/latest/features/panels/heatmap/) + + :param heatmap: dict + :param cards: A heatmap card object: keys "cardPadding", "cardRound" + :param color: Heatmap color object + :param dataFormat: 'timeseries' or 'tsbuckets' + :param yBucketBound: 'auto', 'upper', 'middle', 'lower' + :param reverseYBuckets: boolean + :param xBucketSize: Size + :param xBucketNumber: Number + :param yBucketSize: Size + :param yBucketNumber: Number + :param highlightCards: boolean + :param hideZeroBuckets: boolean + :param transparent: defines if the panel should be transparent + """ + + # The below does not really like the Legend class we have defined above + legend = attr.ib(default={'show': False}) + tooltip = attr.ib( + default=attr.Factory(Tooltip), + validator=instance_of(Tooltip), + ) + cards = attr.ib( + default={ + 'cardPadding': None, + 'cardRound': None + } + ) + + color = attr.ib( + default=attr.Factory(HeatmapColor), + validator=instance_of(HeatmapColor), + ) + + dataFormat = attr.ib(default='timeseries') + heatmap = {} + hideZeroBuckets = attr.ib(default=False) + highlightCards = attr.ib(default=True) + options = attr.ib(default=None) + + xAxis = attr.ib( + default=attr.Factory(XAxis), + validator=instance_of(XAxis) + ) + xBucketNumber = attr.ib(default=None) + xBucketSize = attr.ib(default=None) + + yAxis = attr.ib( + default=attr.Factory(YAxis), + validator=instance_of(YAxis) + ) + yBucketBound = attr.ib(default=None) + yBucketNumber = attr.ib(default=None) + yBucketSize = attr.ib(default=None) + reverseYBuckets = attr.ib(default=False) + + def to_json_data(self): + return self.panel_json( + { + 'cards': self.cards, + 'color': self.color, + 'dataFormat': self.dataFormat, + 'heatmap': self.heatmap, + 'hideZeroBuckets': self.hideZeroBuckets, + 'highlightCards': self.highlightCards, + 'legend': self.legend, + 'options': self.options, + 'reverseYBuckets': self.reverseYBuckets, + 'tooltip': self.tooltip, + 'type': HEATMAP_TYPE, + 'xAxis': self.xAxis, + 'xBucketNumber': self.xBucketNumber, + 'xBucketSize': self.xBucketSize, + 'yAxis': self.yAxis, + 'yBucketBound': self.yBucketBound, + 'yBucketNumber': self.yBucketNumber, + 'yBucketSize': self.yBucketSize + } + ) + + +@attr.s +class StatusmapColor(object): + """A Color object for Statusmaps + + :param cardColor: colour + :param colorScale: scale + :param colorScheme: scheme + :param exponent: exponent + :param max: max + :param min: min + :param mode: mode + :param thresholds: threshold + """ + + # Maybe cardColor should validate to RGBA object, not sure + cardColor = attr.ib(default='#b4ff00', validator=instance_of(str)) + colorScale = attr.ib(default='sqrt', validator=instance_of(str)) + colorScheme = attr.ib(default='GnYlRd', validator=instance_of(str)) + exponent = attr.ib(default=0.5, validator=instance_of(float)) + mode = attr.ib(default='spectrum', validator=instance_of(str)) + thresholds = attr.ib(factory=list, validator=instance_of(list)) + max = attr.ib(default=None) + min = attr.ib(default=None) + + def to_json_data(self): + return { + 'mode': self.mode, + 'cardColor': self.cardColor, + 'colorScale': self.colorScale, + 'exponent': self.exponent, + 'colorScheme': self.colorScheme, + 'max': self.max, + 'min': self.min, + 'thresholds': self.thresholds + } + + +@attr.s +class Statusmap(Panel): + """Generates json structure for the flant-statusmap-panel visualisation plugin + (https://grafana.com/grafana/plugins/flant-statusmap-panel/). + + :param alert: Alert + :param cards: A statusmap card object: keys 'cardRound', 'cardMinWidth', 'cardHSpacing', 'cardVSpacing' + :param color: A StatusmapColor object + :param isNew: isNew + :param legend: Legend object + :param nullPointMode: null + :param tooltip: Tooltip object + :param xAxis: XAxis object + :param yAxis: YAxis object + """ + + alert = attr.ib(default=None) + cards = attr.ib( + default={ + 'cardRound': None, + 'cardMinWidth': 5, + 'cardHSpacing': 2, + 'cardVSpacing': 2, + }, validator=instance_of(dict)) + + color = attr.ib( + default=attr.Factory(StatusmapColor), + validator=instance_of(StatusmapColor), + ) + + isNew = attr.ib(default=True, validator=instance_of(bool)) + legend = attr.ib( + default=attr.Factory(Legend), + validator=instance_of(Legend), + ) + nullPointMode = attr.ib(default=NULL_AS_ZERO) + tooltip = attr.ib( + default=attr.Factory(Tooltip), + validator=instance_of(Tooltip), + ) + xAxis = attr.ib( + default=attr.Factory(XAxis), + validator=instance_of(XAxis) + ) + yAxis = attr.ib( + default=attr.Factory(YAxis), + validator=instance_of(YAxis) + ) + + def to_json_data(self): + graphObject = { + 'color': self.color, + 'isNew': self.isNew, + 'legend': self.legend, + 'minSpan': self.minSpan, + 'nullPointMode': self.nullPointMode, + 'tooltip': self.tooltip, + 'type': STATUSMAP_TYPE, + 'xaxis': self.xAxis, + 'yaxis': self.yAxis, + } + if self.alert: + graphObject['alert'] = self.alert + return self.panel_json(graphObject) + + +@attr.s +class Svg(Panel): + """Generates SVG panel json structure + Grafana doc on SVG: https://grafana.com/grafana/plugins/marcuscalidus-svg-panel + + :param format: defines value units + :param jsCodeFilePath: path to javascript file to be run on dashboard refresh + :param jsCodeInitFilePath: path to javascript file to be run after the first initialization of the SVG + :param reduceCalc: algorithm for reduction to a single value, + keys 'mean' 'lastNotNull' 'last' 'first' 'firstNotNull' 'min' 'max' 'sum' 'total' + :param svgFilePath: path to SVG image file to be displayed + """ + + format = attr.ib(default='none') + jsCodeFilePath = attr.ib(default="", validator=instance_of(str)) + jsCodeInitFilePath = attr.ib(default="", validator=instance_of(str)) + height = attr.ib(default=None) + svgFilePath = attr.ib(default="", validator=instance_of(str)) + + @staticmethod + def read_file(file_path): + if file_path: + with open(file_path) as f: + read_data = f.read() + return read_data + else: + return '' + + def to_json_data(self): + + js_code = self.read_file(self.jsCodeFilePath) + js_init_code = self.read_file(self.jsCodeInitFilePath) + svg_data = self.read_file(self.svgFilePath) + + return self.panel_json( + { + 'format': self.format, + 'js_code': js_code, + 'js_init_code': js_init_code, + 'svg_data': svg_data, + 'type': SVG_TYPE, + 'useSVGBuilder': False + } + ) + + +@attr.s +class PieChart(Panel): + """Generates Pie Chart panel json structure + + This panel was deprecated in Grafana 8.0, please use PieChartv2 instead + + Grafana doc on Pie Chart: https://grafana.com/grafana/plugins/grafana-piechart-panel + + :param aliasColors: dictionary of color overrides + :param format: defines value units + :param legendType: defines where the legend position + :param overrides: To override the base characteristics of certain data + :param pieType: defines the shape of the pie chart (pie or donut) + :param percentageDecimals: Number of decimal places to show if percentages shown in legned + :param showLegend: defines if the legend should be shown + :param showLegendValues: defines if the legend should show values + :param showLegendPercentage: Show percentages in the legend + :param thresholds: defines thresholds + """ + + aliasColors = attr.ib(default=attr.Factory(dict)) + format = attr.ib(default='none') + legendType = attr.ib(default='Right side') + overrides = attr.ib(default=attr.Factory(list)) + pieType = attr.ib(default='pie') + percentageDecimals = attr.ib(default=0, validator=instance_of(int)) + showLegend = attr.ib(default=True) + showLegendValues = attr.ib(default=True) + showLegendPercentage = attr.ib(default=False, validator=instance_of(bool)) + thresholds = attr.ib(default="") + + def to_json_data(self): + print('PieChart panel was deprecated in Grafana 8.0, please use PieChartv2 instead') + return self.panel_json( + { + 'aliasColors': self.aliasColors, + 'format': self.format, + 'pieType': self.pieType, + 'height': self.height, + 'fieldConfig': { + 'defaults': { + 'custom': {}, + }, + 'overrides': self.overrides + }, + 'legend': { + 'show': self.showLegend, + 'values': self.showLegendValues, + 'percentage': self.showLegendPercentage, + 'percentageDecimals': self.percentageDecimals + }, + 'legendType': self.legendType, + 'type': PIE_CHART_TYPE, + } + ) + + +@attr.s +class PieChartv2(Panel): + """Generates Pie Chart panel json structure + Grafana docs on Pie Chart: https://grafana.com/docs/grafana/latest/visualizations/pie-chart-panel/ + + :param custom: Custom overides + :param colorMode: Color mode + palette-classic (Default), + :param legendDisplayMode: Display mode of legend: list, table or hidden + :param legendPlacement: Location of the legend in the panel: bottom or right + :param legendValues: List of value to be shown in legend eg. ['value', 'percent'] + :param mappings: To assign colors to boolean or string values, use Value mappings + :param overrides: Overrides + :param pieType: Pie chart type + pie (Default), donut + :param reduceOptionsCalcs: Reducer function / calculation + :param reduceOptionsFields: Fields that should be included in the panel + :param reduceOptionsValues: Calculate a single value per column or series or show each row + :param tooltipMode: Tooltip mode + single (Default), multi, none + :param unit: units + """ + + custom = attr.ib(factory=dict, validator=instance_of(dict)) + colorMode = attr.ib(default='palette-classic', validator=instance_of(str)) + legendDisplayMode = attr.ib(default='list', validator=instance_of(str)) + legendPlacement = attr.ib(default='bottom', validator=instance_of(str)) + legendValues = attr.ib(factory=list, validator=instance_of(list)) + mappings = attr.ib(default=attr.Factory(list)) + overrides = attr.ib(factory=list, validator=instance_of(list)) + pieType = attr.ib(default='pie', validator=instance_of(str)) + reduceOptionsCalcs = attr.ib(default=['lastNotNull'], validator=instance_of(list)) + reduceOptionsFields = attr.ib(default='', validator=instance_of(str)) + reduceOptionsValues = attr.ib(default=False, validator=instance_of(bool)) + tooltipMode = attr.ib(default='single', validator=instance_of(str)) + unit = attr.ib(default='', validator=instance_of(str)) + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'color': { + 'mode': self.colorMode + }, + 'custom': self.custom, + 'mappings': self.mappings, + 'unit': self.unit, + }, + 'overrides': self.overrides, + }, + 'options': { + 'reduceOptions': { + 'values': self.reduceOptionsValues, + 'calcs': self.reduceOptionsCalcs, + 'fields': self.reduceOptionsFields + }, + 'pieType': self.pieType, + 'tooltip': { + 'mode': self.tooltipMode + }, + 'legend': { + 'displayMode': self.legendDisplayMode, + 'placement': self.legendPlacement, + 'values': self.legendValues + }, + }, + 'type': PIE_CHART_V2_TYPE, + } + ) + + +@attr.s +class DashboardList(Panel): + """Generates Dashboard list panel json structure + Grafana doc on Dashboard list: https://grafana.com/docs/grafana/latest/panels/visualizations/dashboard-list-panel/ + + :param showHeadings: The chosen list selection (Starred, Recently viewed, Search) is shown as a heading + :param showSearch: Display dashboards by search query or tags. + Requires you to enter at least one value in Query or Tags + :param showRecent: Display recently viewed dashboards in alphabetical order + :param showStarred: Display starred dashboards in alphabetical order + :param maxItems: Sets the maximum number of items to list per section + :param searchQuery: Enter the query you want to search by + :param searchTags: List of tags you want to search by + :param overrides: To override the base characteristics of certain data + """ + showHeadings = attr.ib(default=True, validator=instance_of(bool)) + showSearch = attr.ib(default=False, validator=instance_of(bool)) + showRecent = attr.ib(default=False, validator=instance_of(bool)) + showStarred = attr.ib(default=True, validator=instance_of(bool)) + maxItems = attr.ib(default=10, validator=instance_of(int)) + searchQuery = attr.ib(default='', validator=instance_of(str)) + searchTags = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + overrides = attr.ib(default=attr.Factory(list)) + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'custom': {}, + }, + 'overrides': self.overrides + }, + 'headings': self.showHeadings, + 'search': self.showSearch, + 'recent': self.showRecent, + 'starred': self.showStarred, + 'limit': self.maxItems, + 'query': self.searchQuery, + 'tags': self.searchTags, + 'type': DASHBOARDLIST_TYPE, + } + ) + + +@attr.s +class Logs(Panel): + """Generates Logs panel json structure + Grafana doc on Logs panel: https://grafana.com/docs/grafana/latest/panels/visualizations/logs-panel/ + + :param showLabels: Show or hide the unique labels column, which shows only non-common labels + :param showCommonLabels: Show or hide the common labels. + :param showTime: Show or hide the log timestamp column + :param wrapLogMessages: Toggle line wrapping + :param sortOrder: Display results in 'Descending' or 'Ascending' time order. The default is Descending, + showing the newest logs first. + :param dedupStrategy: One of none, exact, numbers, signature. Default is none + :param enableLogDetails: Set this to True to see the log details view for each log row. + :param overrides: To override the base characteristics of certain data + :param prettifyLogMessage: Set this to true to pretty print all JSON logs. This setting does not affect logs in any format other than JSON. + """ + showLabels = attr.ib(default=False, validator=instance_of(bool)) + showCommonLabels = attr.ib(default=False, validator=instance_of(bool)) + showTime = attr.ib(default=False, validator=instance_of(bool)) + wrapLogMessages = attr.ib(default=False, validator=instance_of(bool)) + sortOrder = attr.ib(default='Descending', validator=instance_of(str)) + dedupStrategy = attr.ib(default='none', validator=instance_of(str)) + enableLogDetails = attr.ib(default=False, validator=instance_of(bool)) + overrides = attr.ib(default=attr.Factory(list)) + prettifyLogMessage = attr.ib(default=False, validator=instance_of(bool)) + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'custom': {}, + }, + 'overrides': self.overrides + }, + 'options': { + 'showLabels': self.showLabels, + 'showCommonLabels': self.showCommonLabels, + 'showTime': self.showTime, + 'wrapLogMessage': self.wrapLogMessages, + 'sortOrder': self.sortOrder, + 'dedupStrategy': self.dedupStrategy, + 'enableLogDetails': self.enableLogDetails, + 'prettifyLogMessage': self.prettifyLogMessage + }, + 'type': LOGS_TYPE, + } + ) + + +@attr.s +class Threshold(object): + """Threshold for for panels + + :param color: Color of threshold + :param index: Index of color in panel + :param line: Display Threshold line, defaults to True + :param value: When to use this color will be null if index is 0 + :param op: EVAL_LT for less than or EVAL_GT for greater than to indicate what the threshold applies to. + :param yaxis: Choose left or right for panels + + Care must be taken in the order in which the Threshold objects are specified, + Grafana expects the value to increase. + + Example:: + thresholds = [ + Threshold('green', 0, 0.0), + Threshold('red', 1, 80.0)] + + """ + + color = attr.ib() + index = attr.ib(validator=instance_of(int)) + value = attr.ib(validator=instance_of(float)) + line = attr.ib(default=True, validator=instance_of(bool)) + op = attr.ib(default=EVAL_GT) + yaxis = attr.ib(default='left') + + def to_json_data(self): + return { + 'op': self.op, + 'yaxis': self.yaxis, + 'color': self.color, + 'line': self.line, + 'index': self.index, + 'value': 'null' if self.index == 0 else self.value, + } + + +@attr.s +class GraphThreshold(object): + """Threshold for for Graph panel + + :param colorMode: Color mode of the threshold, value can be `ok`, `warning`, `critical` or `custom`. + If `custom` is selcted a lineColor and fillColor should be provided + :param fill: Display threshold fill, defaults to True + :param line: Display threshold line, defaults to True + :param value: When to use this color will be null if index is 0 + :param op: EVAL_LT for less than or EVAL_GT for greater than to indicate what the threshold applies to. + :param yaxis: Choose left or right for Graph panels + :param fillColor: Fill color of the threshold, when colorMode = "custom" + :param lineColor: Line color of the threshold, when colorMode = "custom" + + Example: + thresholds = [ + GraphThreshold(colorMode="ok", value=10.0), + GraphThreshold(colorMode="critical", value=90.0) + ] + + """ + + value = attr.ib(validator=instance_of(float)) + colorMode = attr.ib(default="critical") + fill = attr.ib(default=True, validator=instance_of(bool)) + line = attr.ib(default=True, validator=instance_of(bool)) + op = attr.ib(default=EVAL_GT) + yaxis = attr.ib(default='left') + fillColor = attr.ib(default=RED) + lineColor = attr.ib(default=RED) + + def to_json_data(self): + data = { + 'value': self.value, + 'colorMode': self.colorMode, + 'fill': self.fill, + 'line': self.line, + 'op': self.op, + 'yaxis': self.yaxis, + } + + if self.colorMode == "custom": + data['fillColor'] = self.fillColor + data['lineColor'] = self.lineColor + + return data + + +@attr.s +class SeriesOverride(object): + """ + To override properties of e.g. Graphs. + + :param alias: Name of the metric to apply to + :param bars: Whether to show data point bars + :param lines: Whether to keep graph lines + :param yaxis: Whether to move axis of the metric to the right (=2) or not (=1) + :param fill: Fill strength (0...10) + :param color: Whether to change color to + :param fillBelowTo: Alias of the other metric to fill below + :param zindex: Move things to front or background (-3...3) + :param dashed: Whether to dash the line + :param dashLength: Length of dashes (1..20) + :param spaceLength: Length of spaces betwee dashed + :param zindex: Move things to front or background + """ + alias = attr.ib(validator=instance_of(str)) + bars = attr.ib(default=False, validator=instance_of(bool)) + lines = attr.ib(default=True, validator=instance_of(bool)) + yaxis = attr.ib(default=1, validator=attr.validators.in_([1, 2])) + fill = attr.ib(default=1, validator=attr.validators.in_(range(11))) + zindex = attr.ib(default=0, validator=attr.validators.in_(range(-3, 4))) + dashes = attr.ib(default=False, validator=instance_of(bool)) + dashLength = attr.ib(default=None, validator=attr.validators.in_([*range(1, 21), None])) + spaceLength = attr.ib(default=None, validator=attr.validators.in_([*range(1, 21), None])) + + color = attr.ib(default=None) + fillBelowTo = attr.ib( + default=None, + validator=attr.validators.instance_of((str, type(None))) + ) + + def to_json_data(self): + return { + 'alias': self.alias, + 'bars': self.bars, + 'lines': self.lines, + 'yaxis': self.yaxis, + 'fill': self.fill, + 'color': self.color, + 'fillBelowTo': self.fillBelowTo, + 'zindex': self.zindex, + 'dashes': self.dashes, + 'dashLength': self.dashLength, + 'spaceLength': self.spaceLength, + } + + +WORLDMAP_CENTER = ['(0°, 0°)', 'North America', 'Europe', 'West Asia', 'SE Asia', 'Last GeoHash', 'custom'] +WORLDMAP_LOCATION_DATA = ['countries', 'countries_3letter', 'states', 'probes', 'geohash', 'json_endpoint', 'jsonp endpoint', 'json result', 'table'] + + +@attr.s +class Worldmap(Panel): + """Generates Worldmap panel json structure + Grafana doc on Worldmap: https://grafana.com/grafana/plugins/grafana-worldmap-panel/ + + :param aggregation: metric aggregation: min, max, avg, current, total + :param circleMaxSize: Maximum map circle size + :param circleMinSize: Minimum map circle size + :param decimals: Number of decimals to show + :param geoPoint: Name of the geo_point/geohash column. This is used to calculate where the circle should be drawn. + :param locationData: Format of the location data, options in `WORLDMAP_LOCATION_DATA` + :param locationName: Name of the Location Name column. Used to label each circle on the map. If it is empty then the geohash value is used. + :param metric: Name of the metric column. This is used to give the circle a value - this determines how large the circle is. + :param mapCenter: Where to centre the map, default center (0°, 0°). Options: North America, Europe, West Asia, SE Asia, Last GeoHash, custom + :param mapCenterLatitude: If mapCenter=custom set the initial map latitude + :param mapCenterLongitude: If mapCenter=custom set the initial map longitude + :param hideEmpty: Hide series with only nulls + :param hideZero: Hide series with only zeros + :param initialZoom: Initial map zoom + :param jsonUrl: URL for JSON location data if `json_endpoint` or `jsonp endpoint` used + :param jsonpCallback: Callback if `jsonp endpoint` used + :param mouseWheelZoom: Zoom map on scroll of mouse wheel + :param stickyLabels: Sticky map labels + :param thresholds: String of thresholds eg. '0,10,20' + :param thresholdsColors: List of colors to be used in each threshold + :param unitPlural: Units plural + :param unitSingle: Units single + :param unitSingular: Units singular + """ + + circleMaxSize = attr.ib(default=30, validator=instance_of(int)) + circleMinSize = attr.ib(default=2, validator=instance_of(int)) + decimals = attr.ib(default=0, validator=instance_of(int)) + geoPoint = attr.ib(default='geohash', validator=instance_of(str)) + locationData = attr.ib(default='countries', validator=attr.validators.in_(WORLDMAP_LOCATION_DATA)) + locationName = attr.ib(default='') + hideEmpty = attr.ib(default=False, validator=instance_of(bool)) + hideZero = attr.ib(default=False, validator=instance_of(bool)) + initialZoom = attr.ib(default=1, validator=instance_of(int)) + jsonUrl = attr.ib(default='', validator=instance_of(str)) + jsonpCallback = attr.ib(default='', validator=instance_of(str)) + mapCenter = attr.ib(default='(0°, 0°)', validator=attr.validators.in_(WORLDMAP_CENTER)) + mapCenterLatitude = attr.ib(default=0, validator=instance_of(int)) + mapCenterLongitude = attr.ib(default=0, validator=instance_of(int)) + metric = attr.ib(default='Value') + mouseWheelZoom = attr.ib(default=False, validator=instance_of(bool)) + stickyLabels = attr.ib(default=False, validator=instance_of(bool)) + thresholds = attr.ib(default='0,100,150', validator=instance_of(str)) + thresholdColors = attr.ib(default=["#73BF69", "#73BF69", "#FADE2A", "#C4162A"], validator=instance_of(list)) + unitPlural = attr.ib(default='', validator=instance_of(str)) + unitSingle = attr.ib(default='', validator=instance_of(str)) + unitSingular = attr.ib(default='', validator=instance_of(str)) + aggregation = attr.ib(default='total', validator=instance_of(str)) + + def to_json_data(self): + return self.panel_json( + { + 'circleMaxSize': self.circleMaxSize, + 'circleMinSize': self.circleMinSize, + 'colors': self.thresholdColors, + 'decimals': self.decimals, + 'esGeoPoint': self.geoPoint, + 'esMetric': self.metric, + 'locationData': self.locationData, + 'esLocationName': self.locationName, + 'hideEmpty': self.hideEmpty, + 'hideZero': self.hideZero, + 'initialZoom': self.initialZoom, + 'jsonUrl': self.jsonUrl, + 'jsonpCallback': self.jsonpCallback, + 'mapCenter': self.mapCenter, + 'mapCenterLatitude': self.mapCenterLatitude, + 'mapCenterLongitude': self.mapCenterLongitude, + 'mouseWheelZoom': self.mouseWheelZoom, + 'stickyLabels': self.stickyLabels, + 'thresholds': self.thresholds, + 'unitPlural': self.unitPlural, + 'unitSingle': self.unitSingle, + 'unitSingular': self.unitSingular, + 'valueName': self.aggregation, + 'tableQueryOptions': { + 'queryType': 'geohash', + 'geohashField': 'geohash', + 'latitudeField': 'latitude', + 'longitudeField': 'longitude', + 'metricField': 'metric' + }, + 'type': WORLD_MAP_TYPE + } + ) + + +@attr.s +class StateTimeline(Panel): + """Generates State Timeline panel json structure + Grafana docs on State Timeline panel: https://grafana.com/docs/grafana/latest/visualizations/state-timeline/ + + :param alignValue: Controls value alignment inside state regions, default left + :param colorMode: Default thresholds + :param fillOpacity: Controls the opacity of state regions, default 0.9 + :param legendDisplayMode: refine how the legend appears, list, table or hidden + :param legendPlacement: bottom or top + :param lineWidth: Controls line width of state regions + :param mappings: To assign colors to boolean or string values, use Value mappings + :param overrides: To override the base characteristics of certain data + :param mergeValues: Controls whether Grafana merges identical values if they are next to each other, default True + :param rowHeight: Controls how much space between rows there are. 1 = no space = 0.5 = 50% space + :param showValue: Controls whether values are rendered inside the state regions. Auto will render values if there is sufficient space. + :param tooltipMode: Default single + """ + alignValue = attr.ib(default='left', validator=instance_of(str)) + colorMode = attr.ib(default='thresholds', validator=instance_of(str)) + fillOpacity = attr.ib(default=70, validator=instance_of(int)) + legendDisplayMode = attr.ib(default='list', validator=instance_of(str)) + legendPlacement = attr.ib(default='bottom', validator=instance_of(str)) + lineWidth = attr.ib(default=0, validator=instance_of(int)) + mappings = attr.ib(default=attr.Factory(list)) + overrides = attr.ib(default=attr.Factory(list)) + mergeValues = attr.ib(default=True, validator=instance_of(bool)) + rowHeight = attr.ib(default=0.9, validator=instance_of(float)) + showValue = attr.ib(default='auto', validator=instance_of(str)) + tooltipMode = attr.ib(default='single', validator=instance_of(str)) + + def to_json_data(self): + return self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'custom': { + 'lineWidth': self.lineWidth, + 'fillOpacity': self.fillOpacity + }, + 'color': { + 'mode': self.colorMode + }, + 'mappings': self.mappings + }, + 'overrides': self.overrides + }, + 'options': { + 'mergeValues': self.mergeValues, + 'showValue': self.showValue, + 'alignValue': self.alignValue, + 'rowHeight': self.rowHeight, + 'legend': { + 'displayMode': self.legendDisplayMode, + 'placement': self.legendPlacement + }, + 'tooltip': { + 'mode': self.tooltipMode + } + }, + 'type': STATE_TIMELINE_TYPE, + } + ) + + +@attr.s +class Histogram(Panel): + """Generates Histogram panel json structure + Grafana docs on Histogram panel: https://grafana.com/docs/grafana/latest/visualizations/histogram/# + + :param bucketOffset: Bucket offset for none-zero-based buckets + :param bucketSize: Bucket size, default Auto + :param colorMode: Default thresholds + :param combine: Combine all series into a single histogram + :param fillOpacity: Controls the opacity of state regions, default 0.9 + :param legendDisplayMode: refine how the legend appears, list, table or hidden + :param legendPlacement: bottom or top + :param lineWidth: Controls line width of state regions + :param mappings: To assign colors to boolean or string values, use Value mappings + :param overrides: To override the base characteristics of certain data + """ + bucketOffset = attr.ib(default=0, validator=instance_of(int)) + bucketSize = attr.ib(default=0, validator=instance_of(int)) + colorMode = attr.ib(default='thresholds', validator=instance_of(str)) + combine = attr.ib(default=False, validator=instance_of(bool)) + fillOpacity = attr.ib(default=80, validator=instance_of(int)) + legendDisplayMode = attr.ib(default='list', validator=instance_of(str)) + legendPlacement = attr.ib(default='bottom', validator=instance_of(str)) + lineWidth = attr.ib(default=0, validator=instance_of(int)) + mappings = attr.ib(default=attr.Factory(list)) + overrides = attr.ib(default=attr.Factory(list)) + + def to_json_data(self): + histogram = self.panel_json( + { + 'fieldConfig': { + 'defaults': { + 'custom': { + 'lineWidth': self.lineWidth, + 'fillOpacity': self.fillOpacity + }, + 'color': { + 'mode': self.colorMode + }, + 'mappings': self.mappings + }, + 'overrides': self.overrides + }, + 'options': { + 'legend': { + 'displayMode': self.legendDisplayMode, + 'placement': self.legendPlacement + }, + "bucketOffset": self.bucketOffset, + "combine": self.combine, + }, + 'type': HISTOGRAM_TYPE, + } + ) + + if self.bucketSize > 0: + histogram['options']['bucketSize'] = self.bucketSize + + return histogram + + +@attr.s +class News(Panel): + """Generates News panel json structure + + :param feedUrl: URL to query, only RSS feed formats are supported (not Atom). + :param showImage: Controls if the news item social (og:image) image is shown above text content + :param useProxy: If the feed is unable to connect, consider a CORS proxy + """ + feedUrl = attr.ib(default='', validator=instance_of(str)) + showImage = attr.ib(default=True, validator=instance_of(bool)) + useProxy = attr.ib(default=False, validator=instance_of(bool)) + + def to_json_data(self): + return self.panel_json( + { + 'options': { + 'feedUrl': self.feedUrl, + 'showImage': self.showImage, + 'useProxy': self.useProxy + }, + 'type': NEWS_TYPE, + } + ) + + +@attr.s +class Ae3ePlotly(Panel): + """Generates ae3e plotly panel json structure + GitHub repo of the panel: https://github.com/ae3e/ae3e-plotly-panel + :param configuration in json format: Plotly configuration. Docs: https://plotly.com/python/configuration-options/ + :param data: Plotly data: https://plotly.com/python/figure-structure/ + :param layout: Layout of the chart in json format. Plotly docs: https://plotly.com/python/reference/layout/ + :param script: Script executed whenever new data is available. Must return an object with one or more of the + following properties : data, layout, config f(data, variables){...your code...} + :param clickScript: Script executed when chart is clicked. f(data){...your code...} + """ + configuration = attr.ib(default=attr.Factory(dict), validator=attr.validators.instance_of(dict)) + data = attr.ib(default=attr.Factory(list), validator=instance_of(list)) + layout = attr.ib(default=attr.Factory(dict), validator=attr.validators.instance_of(dict)) + script = attr.ib(default="""console.log(data) + var trace = { + x: data.series[0].fields[0].values.buffer, + y: data.series[0].fields[1].values.buffer + }; + return {data:[trace],layout:{title:'My Chart'}};""", validator=instance_of(str)) + clickScript = attr.ib(default='', validator=instance_of(str)) + + def to_json_data(self): + plotly = self.panel_json( + { + 'fieldConfig': { + 'defaults': {}, + 'overrides': [] + }, + 'options': { + 'configuration': {}, + 'data': self.data, + 'layout': {}, + 'onclick': self.clickScript, + 'script': self.script, + }, + 'type': AE3E_PLOTLY_TYPE, + } + ) + _deep_update(plotly["options"]["layout"], self.layout) + _deep_update(plotly["options"]["configuration"], self.configuration) + return plotly + + +@attr.s +class BarChart(Panel): + """Generates bar chart panel json structure + Grafana docs on Bar chart panel: https://grafana.com/docs/grafana/latest/panels-visualizations/visualizations/bar-chart/ + + :param orientation: Controls the orientation of the chart + :param xTickLabelRotation: Controls the rotation of bar labels + :param xTickLabelSpacing: Controls the spacing of bar labels + :param showValue: Controls the visibility of values + :param stacking: Controls the stacking of the bar chart + :param groupWidth: Controls the width of the group + :param barWidth: Controls the width of the bars + :param barRadius: Controls the radius of the bars + :param toolTipMode: Controls the style of tooltips + :param toolTipSort: Controls the sort order of tooltips, when toolTipMode is 'All' + :param showLegend: Controls the visibility of legends + :param legendDisplayMode: Controls the style of legends, if they are shown. + :param legendPlacement: Controls the placement of legends, if they are shown + :param legendCalcs: Controls the calculations to show on legends + :param lineWidth: Controls the width of lines + :param fillOpacity: Contorls the opacity of bars + :param gradientMode: Controls the gradient style of the bars + :param axisPlacement: Controls the axis placement + :param axisLabel: Controls the axis labels + :param axisColorMode: Controls the axis color style + :param scaleDistributionType: Controls the type of distribution + :param axisCenteredZero: Controls the centering of the axis + :param hideFromTooltip: Controls the hiding of tooltips + :param hideFromViz: Controls the hiding of bars + :param hideFromLegend: Controls the hiding of legends + :param colorMode: Controls the color palette of the bars + :param fixedColor: Controls the color of the bars, when the colorMode is fixed + :param mappings: Controls the mapping of values + :param thresholdsMode: Controls the style threshold + :param thresholdSteps: Controls the treshold steps + :param overrides: Controls the overriding of certain datas base characteristics + """ + orientation = attr.ib(default='auto', validator=instance_of(str)) + xTickLabelRotation = attr.ib(default=0, validator=instance_of(int)) + xTickLabelSpacing = attr.ib(default=0, validator=instance_of(int)) + showValue = attr.ib(default='auto', validator=instance_of(str)) + stacking = attr.ib(default='none', validator=instance_of(str)) + groupWidth = attr.ib(default=0.7, validator=instance_of(float)) + barWidth = attr.ib(default=0.97, validator=instance_of(float)) + barRadius = attr.ib(default=0.0, validator=instance_of(float)) + tooltipMode = attr.ib(default='single', validator=instance_of(str)) + tooltipSort = attr.ib(default='none', validator=instance_of(str)) + showLegend = attr.ib(default=True, validator=instance_of(bool)) + legendDisplayMode = attr.ib(default='list', validator=instance_of(str)) + legendPlacement = attr.ib(default='bottom', validator=instance_of(str)) + legendCalcs = attr.ib(factory=list, validator=instance_of(list)) + lineWidth = attr.ib(default=1, validator=instance_of(int)) + fillOpacity = attr.ib(default=80, validator=instance_of(int)) + gradientMode = attr.ib(default='none', validator=instance_of(str)) + axisPlacement = attr.ib(default='auto', validator=instance_of(str)) + axisLabel = attr.ib(default='', validator=instance_of(str)) + axisColorMode = attr.ib(default='text', validator=instance_of(str)) + scaleDistributionType = attr.ib(default='linear', validator=instance_of(str)) + axisCenteredZero = attr.ib(default=False, validator=instance_of(bool)) + hideFromTooltip = attr.ib(default=False, validator=instance_of(bool)) + hideFromViz = attr.ib(default=False, validator=instance_of(bool)) + hideFromLegend = attr.ib(default=False, validator=instance_of(bool)) + colorMode = attr.ib(default='palette-classic', validator=instance_of(str)) + fixedColor = attr.ib(default='blue', validator=instance_of(str)) + mappings = attr.ib(factory=list, validator=instance_of(list)) + thresholdsMode = attr.ib(default='absolute', validator=instance_of(str)) + thresholdSteps = attr.ib( + default=attr.Factory(lambda: [ + { + 'value': None, + 'color': 'green' + }, + { + 'value': 80, + 'color': 'red' + } + ]), + validator=instance_of(list) + ) + overrides = attr.ib(factory=list, validator=instance_of(list)) + + def to_json_data(self): + bar_chart = self.panel_json( + { + 'options': { + 'orientation': self.orientation, + 'xTickLabelRotation': self.xTickLabelRotation, + 'xTickLabelSpacing': self.xTickLabelSpacing, + 'showValue': self.showValue, + 'stacking': self.stacking, + 'groupWidth': self.groupWidth, + 'barWidth': self.barWidth, + 'barRadius': self.barRadius, + 'tooltip': { + 'mode': self.tooltipMode, + 'sort': self.tooltipSort + }, + 'legend': { + 'showLegend': self.showLegend, + 'displayMode': self.legendDisplayMode, + 'placement': self.legendPlacement, + 'calcs': self.legendCalcs + }, + }, + 'fieldConfig': { + 'defaults': { + 'custom': { + 'lineWidth': self.lineWidth, + 'fillOpacity': self.fillOpacity, + 'gradientMode': self.gradientMode, + 'axisPlacement': self.axisPlacement, + 'axisLabel': self.axisLabel, + 'axisColorMode': self.axisColorMode, + 'scaleDistribution': { + 'type': self.scaleDistributionType + }, + 'axisCenteredZero': self.axisCenteredZero, + 'hideFrom': { + 'tooltip': self.hideFromTooltip, + 'viz': self.hideFromViz, + 'legend': self.hideFromLegend + } + }, + 'color': { + 'mode': self.colorMode, + 'fixedColor': self.fixedColor if self.colorMode == 'fixed' else 'none' + }, + 'mappings': self.mappings, + 'thresholds': { + 'mode': self.thresholdsMode, + 'steps': self.thresholdSteps + } + }, + 'overrides': self.overrides + }, + 'type': BAR_CHART_TYPE + } + ) + return bar_chart + + @attr.s class Annotation(object): """ diff --git a/grafanalib/elasticsearch.py b/grafanalib/elasticsearch.py index 9a731b1a..35f1a9d4 100644 --- a/grafanalib/elasticsearch.py +++ b/grafanalib/elasticsearch.py @@ -2,11 +2,12 @@ import attr import itertools -from attr.validators import instance_of +from attr.validators import in_, instance_of +from grafanalib.core import AlertCondition -DATE_HISTOGRAM_DEFAULT_FIELD = "time_iso8601" -ORDER_ASC = "asc" -ORDER_DESC = "desc" +DATE_HISTOGRAM_DEFAULT_FIELD = 'time_iso8601' +ORDER_ASC = 'asc' +ORDER_DESC = 'desc' @attr.s @@ -16,12 +17,27 @@ class CountMetricAgg(object): https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html It's the default aggregator for elasticsearch queries. + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param inline: script to apply to the data, using '_value' """ + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + return { + 'id': str(self.id), + 'hide': self.hide, 'type': 'count', 'field': 'select field', - 'settings': {}, + 'inlineScript': self.inline, + 'settings': self.settings, } @@ -32,14 +48,157 @@ class MaxMetricAgg(object): https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html :param field: name of elasticsearch field to provide the maximum for + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param inline: script to apply to the data, using '_value' """ field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + return { + 'id': str(self.id), + 'hide': self.hide, 'type': 'max', 'field': self.field, - 'settings': {}, + 'inlineScript': self.inline, + 'settings': self.settings, + } + + +@attr.s +class CardinalityMetricAgg(object): + """An aggregator that provides the cardinality. value among the values. + + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html + + :param field: name of elasticsearch field to provide the cardinality for + :param id: id of the metric + :param hide: show/hide the metric in the final panel display + :param inline: script to apply to the data, using '_value' + """ + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + + return { + 'id': str(self.id), + 'hide': self.hide, + 'type': 'cardinality', + 'field': self.field, + 'inlineScript': self.inline, + 'settings': self.settings, + } + + +@attr.s +class AverageMetricAgg(object): + """An aggregator that provides the average. value among the values. + + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html + + :param field: name of elasticsearch metric aggregator to provide the average of + :param id: id of the metric + :param hide: show/hide the metric in the final panel display + :param inline: script to apply to the data, using '_value' + """ + + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + + return { + 'id': str(self.id), + 'hide': self.hide, + 'type': 'avg', + 'field': self.field, + 'inlineScript': self.inline, + 'settings': self.settings, + 'meta': {} + } + + +@attr.s +class DerivativeMetricAgg(object): + """An aggregator that takes the derivative of another metric aggregator. + + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html + + :param field: id of elasticsearch metric aggregator to provide the derivative of + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param pipelineAgg: pipeline aggregator id + :param unit: derivative units + """ + field = attr.ib(default="", validator=instance_of(str)) + hide = attr.ib(default=False, validator=instance_of(bool)) + id = attr.ib(default=0, validator=instance_of(int)) + pipelineAgg = attr.ib(default=1, validator=instance_of(int)) + unit = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + settings = {} + if self.unit != "": + settings['unit'] = self.unit + + return { + 'id': str(self.id), + 'pipelineAgg': str(self.pipelineAgg), + 'hide': self.hide, + 'type': 'derivative', + 'field': self.field, + 'settings': settings, + } + + +@attr.s +class SumMetricAgg(object): + """An aggregator that provides the sum of the values. + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html + :param field: name of elasticsearch field to provide the sum over + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param inline: script to apply to the data, using '_value' + """ + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + + return { + 'id': str(self.id), + 'hide': self.hide, + 'type': 'sum', + 'field': self.field, + 'inlineScript': self.inline, + 'settings': self.settings, } @@ -60,7 +219,7 @@ class DateHistogramGroupBy(object): default=DATE_HISTOGRAM_DEFAULT_FIELD, validator=instance_of(str), ) - interval = attr.ib(default="auto", validator=instance_of(str)) + interval = attr.ib(default='auto', validator=instance_of(str)) minDocCount = attr.ib(default=0, validator=instance_of(int)) def to_json_data(self): @@ -76,6 +235,43 @@ def to_json_data(self): } +@attr.s +class BucketScriptAgg(object): + """An aggregator that applies a bucket script to the results of previous aggregations. + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html + + :param fields: dictionary of field names mapped to aggregation IDs to be used in the bucket script + e.g. { "field1":1 }, which allows the output of aggregate ID 1 to be referenced as + params.field1 in the bucket script + :param script: script to apply to the data using the variables specified in 'fields' + :param id: id of the aggregator + :param hide: show/hide the metric in the final panel display + """ + fields = attr.ib(factory=dict, validator=instance_of(dict)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + script = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + pipelineVars = [] + for field in self.fields: + pipelineVars.append({ + 'name': str(field), + 'pipelineAgg': str(self.fields[field]) + }) + + return { + 'field': 'select field', + 'type': 'bucket_script', + 'id': str(self.id), + 'hide': self.hide, + 'pipelineVariables': pipelineVars, + 'settings': { + 'script': self.script + }, + } + + @attr.s class Filter(object): """ A Filter for a FilterGroupBy aggregator. @@ -90,9 +286,9 @@ class Filter(object): def to_json_data(self): return { - 'label': self.label, - 'query': self.query, - } + 'label': self.label, + 'query': self.query, + } @attr.s @@ -109,12 +305,12 @@ class FiltersGroupBy(object): def to_json_data(self): return { - 'id': str(self.id), - 'settings': { - 'filters': self.filters, - }, - 'type': 'filters', - } + 'id': str(self.id), + 'settings': { + 'filters': self.filters, + }, + 'type': 'filters', + } @attr.s @@ -127,14 +323,15 @@ class TermsGroupBy(object): :param field: name of the field to group by :param minDocCount: min. amount of matching records to return a result :param order: ORDER_ASC or ORDER_DESC - :param orderBy: term to order the bucker + :param orderBy: term to order the bucket Term value: '_term', Doc Count: '_count' + or to use metric function use the string value "2" :param size: how many buckets are returned """ field = attr.ib(validator=instance_of(str)) id = attr.ib(default=0, validator=instance_of(int)) minDocCount = attr.ib(default=1, validator=instance_of(int)) order = attr.ib(default=ORDER_DESC, validator=instance_of(str)) - orderBy = attr.ib(default="_term", validator=instance_of(str)) + orderBy = attr.ib(default='_term', validator=instance_of(str)) size = attr.ib(default=0, validator=instance_of(int)) def to_json_data(self): @@ -145,7 +342,7 @@ def to_json_data(self): 'settings': { 'min_doc_count': self.minDocCount, 'order': self.order, - 'order_by': self.orderBy, + 'orderBy': self.orderBy, 'size': self.size, }, } @@ -165,6 +362,7 @@ class ElasticsearchTarget(object): :param metricAggs: Metric Aggregators :param query: query :param refId: target reference id + :param timeField: name of the elasticsearch time field """ alias = attr.ib(default=None) @@ -174,9 +372,10 @@ class ElasticsearchTarget(object): metricAggs = attr.ib(default=attr.Factory(lambda: [CountMetricAgg()])) query = attr.ib(default="", validator=instance_of(str)) refId = attr.ib(default="", validator=instance_of(str)) + timeField = attr.ib(default="@timestamp", validator=instance_of(str)) def _map_bucket_aggs(self, f): - return attr.assoc(self, bucketAggs=list(map(f, self.bucketAggs))) + return attr.evolve(self, bucketAggs=list(map(f, self.bucketAggs))) def auto_bucket_agg_ids(self): """Give unique IDs all bucketAggs without ID. @@ -207,4 +406,144 @@ def to_json_data(self): 'metrics': self.metricAggs, 'query': self.query, 'refId': self.refId, + 'timeField': self.timeField, + } + + +@attr.s +class ElasticsearchAlertCondition(AlertCondition): + """ + Override alert condition to support Elasticseach target. + + See AlertCondition for more information. + + :param Target target: Metric the alert condition is based on. + :param Evaluator evaluator: How we decide whether we should alert on the + metric. e.g. ``GreaterThan(5)`` means the metric must be greater than 5 + to trigger the condition. See ``GreaterThan``, ``LowerThan``, + ``WithinRange``, ``OutsideRange``, ``NoValue``. + :param TimeRange timeRange: How long the condition must be true for before + we alert. + :param operator: One of ``OP_AND`` or ``OP_OR``. How this condition + combines with other conditions. + :param reducerType: RTYPE_* + :param type: CTYPE_* + """ + + target = attr.ib(default=None, validator=instance_of(ElasticsearchTarget)) + + +@attr.s +class MinMetricAgg(object): + """An aggregator that provides the min. value among the values. + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html + :param field: name of elasticsearch field to provide the minimum for + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param inline: script to apply to the data, using '_value' + """ + + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.inline: + self.settings['script'] = {'inline': self.inline} + + return { + 'id': str(self.id), + 'hide': self.hide, + 'type': 'min', + 'field': self.field, + 'inlineScript': self.inline, + 'settings': self.settings, + } + + +@attr.s +class PercentilesMetricAgg(object): + """A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html + :param field: name of elasticsearch field to provide the percentiles for + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param inline: script to apply to the data, using '_value' + :param percents: list of percentiles, like [95,99] + """ + + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + inline = attr.ib(default="", validator=instance_of(str)) + percents = attr.ib(default=attr.Factory(list)) + settings = attr.ib(factory=dict) + + def to_json_data(self): + self.settings = {} + + self.settings['percents'] = self.percents + + if self.inline: + self.settings['script'] = {'inline': self.inline} + + return { + 'id': str(self.id), + 'hide': self.hide, + 'type': 'percentiles', + 'field': self.field, + 'inlineScript': self.inline, + 'settings': self.settings, + } + + +@attr.s +class RateMetricAgg(object): + """An aggregator that provides the rate of the values. + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-rate-aggregation.html + :param field: name of elasticsearch field to provide the sum over + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param unit: calendar interval to group by + supported calendar intervals + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html#calendar_intervals + "minute" + "hour" + "day" + "week" + "month" + "quarter" + "year" + :param mode: sum or count the values + :param script: script to apply to the data, using '_value' + """ + + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + unit = attr.ib(default="", validator=instance_of(str)) + mode = attr.ib(default="", validator=in_(["", "value_count", "sum"])) + script = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.unit: + self.settings["unit"] = self.unit + + if self.mode: + self.settings["mode"] = self.mode + + if self.script: + self.settings["script"] = self.script + + return { + "id": str(self.id), + "hide": self.hide, + "field": self.field, + "settings": self.settings, + "type": "rate", } diff --git a/grafanalib/formatunits.py b/grafanalib/formatunits.py new file mode 100644 index 00000000..54112427 --- /dev/null +++ b/grafanalib/formatunits.py @@ -0,0 +1,286 @@ +""" +Grafana unit formats +(https://github.com/grafana/grafana/blob/main/packages/grafana-data/src/valueFormats/categories.ts) + +To use: +from grafanalib import formatunits as UNITS + +format = UNITS.BYTES +""" + +NO_FORMAT = 'none' +NONE_FORMAT = 'none' +NUMBER_FORMAT = 'none' +STRING_FORMAT = 'string' +PERCENT_UNIT = 'percentunit' +PERCENT_FORMAT = 'percent' +SHORT = 'short' +HUMIDITY = 'humidity' # %H +DECIBEL = 'dB' +HEXADECIMAL_OX = 'hex0x' # 0x +HEXADECIMAL = 'hex' +SCI_NOTATION = 'sci' +LOCAL_FORMAT = 'locale' +PIXELS = 'pixel' +# Acceleration +METERS_SEC_2 = 'accMS2' # m/sec² +FEET_SEC_2 = 'accFS2' # f/sec² +G_UNIT = 'accG' # g +# Angle +DEGREES = 'degree' # ° +RADIANS = 'radian' # rad +GRADIAN = 'grad' # grad +ARC_MINUTES = 'arcmin' # arcmin +ARC_SECONDS = 'arcsec' # arcsec +# Area +SQUARE_METERS = 'areaM2' # m² +SQUARE_FEET = 'areaF2' # ft² +SQUARE_MILES = 'areaMI2' # mi² +# Computation +FLOPS_PER_SEC = 'flops' # FLOP/s +MEGA_FLOPS_PER_SEC = 'mflops' # MFLOP/s +GIGA_FLOPS_PER_SEC = 'gflops' # GFLOP/s +TERA_FLOPS_PER_SEC = 'tflops' # TFLOP/s +PETA_FLOPS_PER_SEC = 'pflops' # PFLOP/s +EXA_FLOPS_PER_SEC = 'eflops' # EFLOP/s +ZETTA_FLOPS_PER_SEC = 'zflops' # ZFLOP/s +YOTTA_FLOPS_PER_SEC = 'yflops' # YFLOP/s +# Concentration +PARTS_PER_MILLION = 'ppm' # ppm +PARTS_PER_BILLION = 'conppb' # ppb +NANO_GRAM_PER_CUBIC_METER = 'conngm3' # ng/m³ +NANO_GRAM_PER_NORMAL_CUBIC_METER = 'conngNm3' # ng/Nm³ +MICRO_GRAM_PER_CUBIC_METER = 'conμgm3' # μg/m³ +MICRO_GRAM_PER_NORMAL_CUBIC_METER = 'conμgNm3' # μg/Nm³ +MILLI_GRAM_PER_CUBIC_METER = 'conmgm3' # mg/m³ +MILLI_GRAM_PER_NORMAL_CUBIC_METER = 'conmgNm3' # mg/Nm³ +GRAM_PER_CUBIC_METER = 'congm3' # g/m³ +GRAM_PER_NORMAL_CUBIC_METER = 'congNm3' # g/Nm³ +MILLI_GRAM_PER_DECI_LITRE = 'conmgdL' # mg/dL +MILLI_MOLES_PER_LITRE = 'conmmolL' # mmol/L +# Currency +DOLLARS = 'currencyUSD' # $ +POUNDS = 'currencyGBP' # £ +EURO = 'currencyEUR' # € +YEN = 'currencyJPY' # ¥ +RUBLES = 'currencyRUB' # ₽ +HRYVNIAS = 'currencyUAH' # ₴ +REAL = 'currencyBRL' # R$ +DANISH_KRONE = 'currencyDKK' # kr +ICELANDIC_KRONA = 'currencyISK' # kr +NORWEGIAN_KRONE = 'currencyNOK' # kr +SWEDISH_KORNA = 'currencySEK' # kr +CZECH_KORUNA = 'currencyCZK' # czk +SWISS_FRANC = 'currencyCHF' # CHF +POLISH_ZLOTY = 'currencyPLN' # PLN +BITCOIN = 'currencyBTC' # ฿ +MILLI_BITCOIN = 'currencymBTC' # mBTC +MICRO_BITCOIN = 'currencyμBTC' # μBTC +SOUTH_AFRICAN_RAND = 'currencyZAR' # R +INDIAN_RUPEE = 'currencyINR' # ₹ +SOUTH_KOREAN_WON = 'currencyKRW' # ₩ +INDONESIAN_RUPIAH = 'currencyIDR' # Rp +PHILIPPINE_PESO = 'currencyPHP' # PHP +# Data +BYTES_IEC = 'bytes' +BYTES = 'decbytes' # B +BITS_IEC = 'bits' +BITS = 'decbits' +KIBI_BYTES = 'kbytes' # KiB +KILO_BYTES = 'deckbytes' # kB +MEBI_BYTES = 'mbytes' # MiB +MEGA_BYTES = 'decmbytes' # MB +GIBI_BYTES = 'gbytes' # GiB +GIGA_BYTES = 'decgbytes' # GB +TEBI_BYTES = 'tbytes' # TiB +TERA_BYTES = 'dectbytes' # TB +PEBI_BYTES = 'pbytes' # PiB +PETA_BYTES = 'decpbytes' # PB +# Data Rate +PACKETS_SEC = 'pps' # p/s + +BYTES_SEC_IEC = 'binBps' # B/s +KIBI_BYTES_SEC = 'KiBs' # KiB/s +MEBI_BYTES_SEC = 'MiBs' # MiB/s +GIBI_BYTES_SEC = 'GiBs' # GiB/s +TEBI_BYTES_SEC = 'TiBs' # TiB/s +PEBI_BYTES_SEC = 'PiBs' # PB/s + +BYTES_SEC = 'Bps' # B/s +KILO_BYTES_SEC = 'KBs' # kB/s +MEGA_BYTES_SEC = 'MBs' # MB/s +GIGA_BYTES_SEC = 'GBs' # GB/s +TERA_BYTES_SEC = 'TBs' # TB/s +PETA_BYTES_SEC = 'PBs' # PB/s + +BITS_SEC_IEC = 'binbps' # b/s +KIBI_BITS_SEC = 'Kibits' # Kib/s +MEBI_BITS_SEC = 'Mibits' # Mib/s +GIBI_BITS_SEC = 'Gibits' # Gib/s +TEBI_BITS_SEC = 'Tibits' # Tib/s +PEBI_BITS_SEC = 'Pibits' # Pib/s + +BITS_SEC = 'bps' # b/s +KILO_BITS_SEC = 'Kbits' # kb/s +MEGA_BITS_SEC = 'Mbits' # Mb/s +GIGA_BITS_SEC = 'Gbits' # Gb/s +TERA_BITS_SEC = 'Tbits' # Tb/s +PETA_BITS_SEC = 'Pbits' # Pb/s +# Date & Time +DATE_TIME_ISO = 'dateTimeAsIso' +DATE_TIME_ISO_TODAY = 'dateTimeAsIsoNoDateIfToday' +DATE_TIME_US = 'dateTimeAsUS' +DATE_TIME_US_TODAY = 'dateTimeAsUSNoDateIfToday' +DATE_TIME_LOCAL = 'dateTimeAsLocal' +DATE_TIME_LOCAL_TODAY = 'dateTimeAsLocalNoDateIfToday' +DATE_TIME_DEFAULT = 'dateTimeAsSystem' +DATE_TIME_FROM_NOW = 'dateTimeFromNow' +# Energy +WATT = 'watt' # W +KILO_WATT = 'kwatt' # kW +MEGA_WATT = 'megwatt' # MW +GIGA_WATT = 'gwatt' # GW +MILLI_WATT = 'mwatt' # mW +WATT_SQUARE_METER = 'Wm2' # W/m² +VOLT_AMPERE = 'voltamp' # VA +KILO_VOLT_AMPERE = 'kvoltamp' # kVA +VAR = 'voltampreact' # VAR +KILO_VAR = 'kvoltampreact' # kVAR +WATT_HOUR = 'watth' # Wh +WATT_HOUR_KILO = 'watthperkg' # Wh/kg +KILO_WATT_HOUR = 'kwatth' # kWh +KILO_WATT_MIN = 'kwattm' # kWm +AMPERE_HOUR = 'amph' # Ah +KILO_AMPERE_HR = 'kamph' # kAh +MILLI_AMPER_HOUR = 'mamph' # mAh +JOULE = 'joule' # J +ELECTRON_VOLT = 'ev' # eV +AMPERE = 'amp' # A +KILO_AMPERE = 'kamp' # kA +MILLI_AMPERE = 'mamp' # mA +VOLT = 'volt' # V +KILO_VOLT = 'kvolt' # kV +MILLI_VOLT = 'mvolt' # mV +DECIBEL_MILLI_WATT = 'dBm' # dBm +OHM = 'ohm' # Ω +KILO_OHM = 'kohm' # kΩ +MEGA_OHM = 'Mohm' # MΩ +FARAD = 'farad' # F +MICRO_FARAD = 'µfarad' # µF +NANO_FARAD = 'nfarad' # nF +PICO_FARAD = 'pfarad' # pF +FEMTO_FARAD = 'ffarad' # fF +HENRY = 'henry' # H +MILLI_HENRY = 'mhenry' # mH +MICRO_HENRY = 'µhenry' # µH +LUMENS = 'lumens' # Lm +# Flow +GALLONS_PER_MIN = 'flowgpm' # gpm +CUBIC_METERS_PER_SEC = 'flowcms' # cms +CUBIC_FEET_PER_SEC = 'flowcfs' # cfs +CUBIC_FEET_PER_MIN = 'flowcfm' # cfm +LITRES_PER_HOUR = 'litreh' # L/h +LITRES_PER_MIN = 'flowlpm' # L/min +MILLI_LITRE_PER_MIN = 'flowmlpm' # mL/min +LUX = 'lux' # lx +# Force +NEWTON_METERS = 'forceNm' # Nm +KILO_NEWTON_METERS = 'forcekNm' # kNm +NEWTONS = 'forceN' # N +KILO_NEWTONS = 'forcekN' # kN +# Hash Rate +HASHES_PER_SEC = 'Hs' # H/s +KILO_HASHES_PER_SEC = 'KHs' # kH/s +MEGA_HASHES_PER_SEC = 'MHs' # MH/s +GIGA_HASHES_PER_SEC = 'GHs' # GH/s +TERA_HASHES_PER_SEC = 'THs' # TH/s +PETA_HASHES_PER_SEC = 'PHs' # PH/s +EXA_HASHES_PER_SEC = 'EHs' # EH/s +# Mass +MILLI_GRAM = 'massmg' # mg +GRAM = 'massg' # g +POUND = 'masslb' # lb +KILO_GRAM = 'masskg' # kg +METRIC_TON = 'masst' # t +# Length +MILLI_METER = 'lengthmm' # mm +INCH = 'lengthin' # in +METER = 'lengthm' # m +KILO_METER = 'lengthkm' # km +FEET = 'lengthft' # ft +MILE = 'lengthmi' # mi +# Pressure +MILLI_BARS = 'pressurembar' # mBar, +BARS = 'pressurebar' # Bar, +KILO_BARS = 'pressurekbar' # kBar, +PASCALS = 'pressurepa' # Pa +HECTO_PASCALS = 'pressurehpa' # hPa +KILO_PASCALS = 'pressurekpa' # kPa +INCHES_OF_MERCURY = 'pressurehg' # "Hg +PSI = 'pressurepsi' # psi +# Radiation +BECQUEREL = 'radbq' # Bq +CURIE = 'radci' # Ci +GRAY = 'radgy' # Gy +RAD = 'radrad' # rad +MICROSIEVERT = 'radusv' # µSv +MILLI_SIEVERT = 'radmsv' # mSv +SIEVERT = 'radsv' # Sv +REM = 'radrem' # rem +EXPOSURE = 'radexpckg' # C/kg +ROENTGEN = 'radr' # R +MICRO_SIEVERT_PER_HOUR = 'radusvh' # µSv/h +MILLI_SIEVERT_PER_HOUR = 'radmsvh' # mSv/h +SIEVERT_PER_HOUR = 'radsvh' # Sv/h +# Rotational Speed +RPM = 'rotrpm' # rpm +HERTZ_ROTATION = 'rothz' # Hz +RADS_PER_SEC = 'rotrads' # rad/s +DEGREES_PER_SECOND = 'rotdegs' # °/s +# Temperature +CELSUIS = 'celsius' # °C +FARENHEIT = 'fahrenheit' # °F +KELVIN = 'kelvin' # K +# Time +HERTZ = 'hertz' # Hz +NANO_SECONDS = 'ns' # ns +MICRO_SECONDS = 'µs' # µs +MILLI_SECONDS = 'ms' # ms +SECONDS = 's' # s +MINUTES = 'm' # m +HOURS = 'h' # h +DAYS = 'd' # d +DURATION_MILLI_SECONDS = 'dtdurationms' # ms +DURATION_SECONDS = 'dtdurations' # s +HH_MM_SS = 'dthms' # hh:mm:ss +D_HH_MM_SS = 'dtdhms' # d hh:mm:ss +TIME_TICKS = 'timeticks' # s/100 +CLOCK_MSEC = 'clockms' # ms +CLOCK_SEC = 'clocks' # s +# Throughput +COUNTS_PER_SEC = 'cps' # cps +OPS_PER_SEC = 'ops' # ops +REQUESTS_PER_SEC = 'reqps' # rps +READS_PER_SEC = 'rps' # rps +WRITES_PER_SEC = 'wps' # wps +IO_OPS_PER_SEC = 'iops' # iops +COUNTS_PER_MIN = 'cpm' # cpm +OPS_PER_MIN = 'opm' # opm +READS_PER_MIN = 'rpm' # rpm +WRITES_PER_MIN = 'wpm' # wpm +# Velocity +METERS_PER_SEC = 'velocityms' # m/s +KILO_METERS_PER_SEC = 'velocitykmh' # km/h +MILES_PER_HOUR = 'velocitymph' # mph +KNOTS = 'velocityknot' # kn +# Volume +MILLI_LITRE = 'mlitre' # mL +LITRE = 'litre' # L +CUBIC_METER = 'm3' # m³ +NORMAL_CUBIC_METER = 'Nm3' # Nm³ +CUBIC_DECI_METER = 'dm3' # dm³ +GALLONS = 'gallons' # g +# Boolean +TRUE_FALSE = 'bool' # True/False +YES_NO = 'bool_yes_no' # Yes/No +ON_OFF = 'bool_on_off' # On/Off diff --git a/grafanalib/humio.py b/grafanalib/humio.py new file mode 100644 index 00000000..6bff77a8 --- /dev/null +++ b/grafanalib/humio.py @@ -0,0 +1,30 @@ +"""Helpers to create Humio-specific Grafana queries.""" + +import attr + + +@attr.s +class HumioTarget(object): + """ + Generates Humio target JSON structure. + + Link to Humio Grafana plugin https://grafana.com/grafana/plugins/humio-datasource/ + + Humio docs on query language https://library.humio.com/humio-server/syntax.html + + :param humioQuery: Query that will be executed on Humio + :param humioRepository: Repository to execute query on. + :param refId: target reference id + """ + + humioQuery = attr.ib(default="") + humioRepository = attr.ib(default="") + refId = attr.ib(default="") + + def to_json_data(self): + + return { + "humioQuery": self.humioQuery, + "humioRepository": self.humioRepository, + "refId": self.refId + } diff --git a/grafanalib/influxdb.py b/grafanalib/influxdb.py new file mode 100644 index 00000000..b84384be --- /dev/null +++ b/grafanalib/influxdb.py @@ -0,0 +1,44 @@ +"""Helpers to create InfluxDB-specific Grafana queries.""" + +import attr + +TIME_SERIES_TARGET_FORMAT = 'time_series' + + +@attr.s +class InfluxDBTarget(object): + """ + Generates InfluxDB target JSON structure. + + Grafana docs on using InfluxDB: + https://grafana.com/docs/features/datasources/influxdb/ + InfluxDB docs on querying or reading data: + https://v2.docs.influxdata.com/v2.0/query-data/ + + :param alias: legend alias + :param format: Bucket aggregators + :param datasource: Influxdb name (for multiple datasource with same panel) + :param measurement: Metric Aggregators + :param query: query + :param rawQuery: target reference id + :param refId: target reference id + """ + + alias = attr.ib(default="") + format = attr.ib(default=TIME_SERIES_TARGET_FORMAT) + datasource = attr.ib(default="") + measurement = attr.ib(default="") + query = attr.ib(default="") + rawQuery = attr.ib(default=True) + refId = attr.ib(default="") + + def to_json_data(self): + return { + 'query': self.query, + 'resultFormat': self.format, + 'alias': self.alias, + 'datasource': self.datasource, + 'measurement': self.measurement, + 'rawQuery': self.rawQuery, + 'refId': self.refId + } diff --git a/grafanalib/opentsdb.py b/grafanalib/opentsdb.py index 9576e895..955b7dc2 100644 --- a/grafanalib/opentsdb.py +++ b/grafanalib/opentsdb.py @@ -5,36 +5,36 @@ from grafanalib.validators import is_in # OpenTSDB aggregators -OTSDB_AGG_AVG = "avg" -OTSDB_AGG_COUNT = "count" -OTSDB_AGG_DEV = "dev" -OTSDB_AGG_EP50R3 = "ep50r3" -OTSDB_AGG_EP50R7 = "ep50r7" -OTSDB_AGG_EP75R3 = "ep75r3" -OTSDB_AGG_EP75R7 = "ep75r7" -OTSDB_AGG_EP90R3 = "ep90r3" -OTSDB_AGG_EP90R7 = "ep90r7" -OTSDB_AGG_EP95R3 = "ep95r3" -OTSDB_AGG_EP95R7 = "ep95r7" -OTSDB_AGG_EP99R3 = "ep99r3" -OTSDB_AGG_EP99R7 = "ep99r7" -OTSDB_AGG_EP999R3 = "ep999r3" -OTSDB_AGG_EP999R7 = "ep999r7" -OTSDB_AGG_FIRST = "first" -OTSDB_AGG_LAST = "last" -OTSDB_AGG_MIMMIN = "mimmin" -OTSDB_AGG_MIMMAX = "mimmax" -OTSDB_AGG_MIN = "min" -OTSDB_AGG_MAX = "max" -OTSDB_AGG_NONE = "none" -OTSDB_AGG_P50 = "p50" -OTSDB_AGG_P75 = "p75" -OTSDB_AGG_P90 = "p90" -OTSDB_AGG_P95 = "p95" -OTSDB_AGG_P99 = "p99" -OTSDB_AGG_P999 = "p999" -OTSDB_AGG_SUM = "sum" -OTSDB_AGG_ZIMSUM = "zimsum" +OTSDB_AGG_AVG = 'avg' +OTSDB_AGG_COUNT = 'count' +OTSDB_AGG_DEV = 'dev' +OTSDB_AGG_EP50R3 = 'ep50r3' +OTSDB_AGG_EP50R7 = 'ep50r7' +OTSDB_AGG_EP75R3 = 'ep75r3' +OTSDB_AGG_EP75R7 = 'ep75r7' +OTSDB_AGG_EP90R3 = 'ep90r3' +OTSDB_AGG_EP90R7 = 'ep90r7' +OTSDB_AGG_EP95R3 = 'ep95r3' +OTSDB_AGG_EP95R7 = 'ep95r7' +OTSDB_AGG_EP99R3 = 'ep99r3' +OTSDB_AGG_EP99R7 = 'ep99r7' +OTSDB_AGG_EP999R3 = 'ep999r3' +OTSDB_AGG_EP999R7 = 'ep999r7' +OTSDB_AGG_FIRST = 'first' +OTSDB_AGG_LAST = 'last' +OTSDB_AGG_MIMMIN = 'mimmin' +OTSDB_AGG_MIMMAX = 'mimmax' +OTSDB_AGG_MIN = 'min' +OTSDB_AGG_MAX = 'max' +OTSDB_AGG_NONE = 'none' +OTSDB_AGG_P50 = 'p50' +OTSDB_AGG_P75 = 'p75' +OTSDB_AGG_P90 = 'p90' +OTSDB_AGG_P95 = 'p95' +OTSDB_AGG_P99 = 'p99' +OTSDB_AGG_P999 = 'p999' +OTSDB_AGG_SUM = 'sum' +OTSDB_AGG_ZIMSUM = 'zimsum' OTSDB_DOWNSAMPLING_FILL_POLICIES = ('none', 'nan', 'null', 'zero') OTSDB_DOWNSAMPLING_FILL_POLICY_DEFAULT = 'none' @@ -106,7 +106,7 @@ class OpenTSDBTarget(object): metric = attr.ib() refId = attr.ib(default="") - aggregator = attr.ib(default="sum") + aggregator = attr.ib(default='sum') alias = attr.ib(default=None) isCounter = attr.ib(default=False, validator=instance_of(bool)) counterMax = attr.ib(default=None) diff --git a/docs/example-elasticsearch.dashboard.py b/grafanalib/tests/examples/example-elasticsearch.dashboard.py similarity index 84% rename from docs/example-elasticsearch.dashboard.py rename to grafanalib/tests/examples/example-elasticsearch.dashboard.py index 1615c98a..8932ac52 100644 --- a/docs/example-elasticsearch.dashboard.py +++ b/grafanalib/tests/examples/example-elasticsearch.dashboard.py @@ -7,8 +7,15 @@ - Max. response time per point of time of HTTP requests """ -from grafanalib.core import * -from grafanalib.elasticsearch import * +from grafanalib.core import ( + Dashboard, Graph, Legend, NULL_AS_NULL, Row, SECONDS_FORMAT, + SHORT_FORMAT, YAxes, YAxis +) + +from grafanalib.elasticsearch import ( + DateHistogramGroupBy, ElasticsearchTarget, Filter, + FiltersGroupBy, MaxMetricAgg +) suc_label = "Success (200-300)" clt_err_label = "Client Errors (400-500)" @@ -38,7 +45,8 @@ dataSource="elasticsearch", targets=tgts, lines=False, - legend=Legend(alignAsTable=True, rightSide=True, total=True, current=True, max=True), + legend=Legend(alignAsTable=True, rightSide=True, + total=True, current=True, max=True), lineWidth=1, nullPointMode=NULL_AS_NULL, seriesOverrides=[ @@ -68,7 +76,7 @@ "color": "#447EBC" }, ], - yAxes=[ + yAxes=YAxes( YAxis( label="Count", format=SHORT_FORMAT, @@ -79,7 +87,7 @@ format=SECONDS_FORMAT, decimals=2 ), - ], + ), transparent=True, span=12, ) diff --git a/grafanalib/tests/examples/example.alertsv8.alertgroup.py b/grafanalib/tests/examples/example.alertsv8.alertgroup.py new file mode 100644 index 00000000..dd944872 --- /dev/null +++ b/grafanalib/tests/examples/example.alertsv8.alertgroup.py @@ -0,0 +1,96 @@ +"""Example grafana 8.x+ Alert""" + + +from grafanalib.core import ( + AlertGroup, + AlertRulev8, + Target, + AlertCondition, + LowerThan, + OP_OR, + OP_AND, + RTYPE_LAST +) + +# An AlertGroup is one group contained in an alert folder. +alertgroup = AlertGroup( + name="Production Alerts", + # Each AlertRule forms a separate alert. + rules=[ + AlertRulev8( + # Each rule must have a unique title + title="Database is unresponsive", + # Several triggers can be used per alert, a trigger is a combination of a Target and its AlertCondition in a tuple. + triggers=[ + ( + # A target refId must be assigned, and exist only once per AlertRule. + Target( + expr='sum(kube_pod_container_status_ready{exported_pod=~"database-/*"})', + # Set datasource to name of your datasource + datasource="VictoriaMetrics", + refId="A", + ), + AlertCondition( + evaluator=LowerThan(1), + # To have the alert fire when either of the triggers are met in the rule, set both AlertCondition operators to OP_OR. + operator=OP_OR, + reducerType=RTYPE_LAST + ) + ), + ( + Target( + expr='sum by (app) (count_over_time({app="database"}[5m]))', + # Set datasource to name of your datasource + datasource="Loki", + refId="B", + ), + AlertCondition( + evaluator=LowerThan(1000), + operator=OP_OR, + reducerType=RTYPE_LAST + ) + ) + ], + annotations={ + "summary": "The database is down", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateInterval="1m", + evaluateFor="3m", + ), + + # Second alert + AlertRulev8( + title="Service API blackbox failure", + triggers=[ + ( + Target( + expr='probe_success{instance="my-service.foo.com/ready"}', + # Set datasource to name of your datasource + datasource="VictoriaMetrics", + refId="A", + ), + AlertCondition( + evaluator=LowerThan(1), + operator=OP_AND, + reducerType=RTYPE_LAST, + ) + ) + ], + annotations={ + "summary": "Service API has been unavailable for 3 minutes", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateInterval="1m", + evaluateFor="3m", + ) + ] +) diff --git a/grafanalib/tests/examples/example.alertsv9.alertfilebasedprovisioning.py b/grafanalib/tests/examples/example.alertsv9.alertfilebasedprovisioning.py new file mode 100644 index 00000000..5644e41c --- /dev/null +++ b/grafanalib/tests/examples/example.alertsv9.alertfilebasedprovisioning.py @@ -0,0 +1,103 @@ +"""Example grafana 9.x+ Alert""" + + +from grafanalib.core import ( + AlertGroup, + AlertRulev9, + Target, + AlertCondition, + AlertExpression, + AlertFileBasedProvisioning, + GreaterThan, + OP_AND, + RTYPE_LAST, + EXP_TYPE_CLASSIC, + EXP_TYPE_REDUCE, + EXP_TYPE_MATH +) + +# An AlertGroup is one group contained in an alert folder. +alertgroup = AlertGroup( + name="Production Alerts", + # Each AlertRule forms a separate alert. + rules=[ + # Alert rule using classic condition > 3 + AlertRulev9( + # Each rule must have a unique title + title="Alert for something 3", + uid='alert3', + # Several triggers can be used per alert + condition='B', + triggers=[ + # A target refId must be assigned, and exist only once per AlertRule. + Target( + expr="from(bucket: \"sensors\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"remote_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_system\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")", + # Set datasource to name of your datasource + datasource="influxdb", + refId="A", + ), + AlertExpression( + refId="B", + expressionType=EXP_TYPE_CLASSIC, + expression='A', + conditions=[ + AlertCondition( + evaluator=GreaterThan(3), + operator=OP_AND, + reducerType=RTYPE_LAST + ) + ] + ) + ], + annotations={ + "summary": "The database is down", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateFor="3m", + ), + # Alert rule using reduce and Math + AlertRulev9( + # Each rule must have a unique title + title="Alert for something 4", + uid='alert4', + condition='C', + # Several triggers can be used per alert + triggers=[ + # A target refId must be assigned, and exist only once per AlertRule. + Target( + expr="from(bucket: \"sensors\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"remote_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_system\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")", + # Set datasource to name of your datasource + datasource="influxdb", + refId="A", + ), + AlertExpression( + refId="B", + expressionType=EXP_TYPE_REDUCE, + expression='A', + reduceFunction='mean', + reduceMode='dropNN' + ), + AlertExpression( + refId="C", + expressionType=EXP_TYPE_MATH, + expression='$B < 3' + ) + ], + annotations={ + "summary": "The database is down", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateFor="3m", + ) + ] +) + +alertfilebasedprovisioning = AlertFileBasedProvisioning([alertgroup]) diff --git a/grafanalib/tests/examples/example.alertsv9.alertgroup.py b/grafanalib/tests/examples/example.alertsv9.alertgroup.py new file mode 100644 index 00000000..383471f9 --- /dev/null +++ b/grafanalib/tests/examples/example.alertsv9.alertgroup.py @@ -0,0 +1,100 @@ +"""Example grafana 9.x+ Alert""" + + +from grafanalib.core import ( + AlertGroup, + AlertRulev9, + Target, + AlertCondition, + AlertExpression, + GreaterThan, + OP_AND, + RTYPE_LAST, + EXP_TYPE_CLASSIC, + EXP_TYPE_REDUCE, + EXP_TYPE_MATH +) + +# An AlertGroup is one group contained in an alert folder. +alertgroup = AlertGroup( + name="Production Alerts", + # Each AlertRule forms a separate alert. + rules=[ + # Alert rule using classic condition > 3 + AlertRulev9( + # Each rule must have a unique title + title="Alert for something 1", + uid='alert1', + # Several triggers can be used per alert + condition='B', + triggers=[ + # A target refId must be assigned, and exist only once per AlertRule. + Target( + expr="from(bucket: \"sensors\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"remote_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_system\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")", + # Set datasource to name of your datasource + datasource="influxdb", + refId="A", + ), + AlertExpression( + refId="B", + expressionType=EXP_TYPE_CLASSIC, + expression='A', + conditions=[ + AlertCondition( + evaluator=GreaterThan(3), + operator=OP_AND, + reducerType=RTYPE_LAST + ) + ] + ) + ], + annotations={ + "summary": "The database is down", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateFor="3m", + ), + # Alert rule using reduce and Math + AlertRulev9( + # Each rule must have a unique title + title="Alert for something 2", + uid='alert2', + condition='C', + # Several triggers can be used per alert + triggers=[ + # A target refId must be assigned, and exist only once per AlertRule. + Target( + expr="from(bucket: \"sensors\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"remote_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_system\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")", + # Set datasource to name of your datasource + datasource="influxdb", + refId="A", + ), + AlertExpression( + refId="B", + expressionType=EXP_TYPE_REDUCE, + expression='A', + reduceFunction='mean', + reduceMode='dropNN' + ), + AlertExpression( + refId="C", + expressionType=EXP_TYPE_MATH, + expression='$B < 3' + ) + ], + annotations={ + "summary": "The database is down", + "runbook_url": "runbook-for-this-scenario.com/foo", + }, + labels={ + "environment": "prod", + "slack": "prod-alerts", + }, + evaluateFor="3m", + ) + ] +) diff --git a/grafanalib/tests/examples/example.dashboard-with-sql.py b/grafanalib/tests/examples/example.dashboard-with-sql.py new file mode 100644 index 00000000..e18a4180 --- /dev/null +++ b/grafanalib/tests/examples/example.dashboard-with-sql.py @@ -0,0 +1,34 @@ +from grafanalib.core import ( + Dashboard, + Graph, + GridPos, + OPS_FORMAT, + RowPanel, + SHORT_FORMAT, + SqlTarget, + YAxes, + YAxis, +) + + +dashboard = Dashboard( + title="Random stats from SQL DB", + panels=[ + RowPanel(title="New row", gridPos=GridPos(h=1, w=24, x=0, y=8)), + Graph( + title="Some SQL Queries", + dataSource="Your SQL Source", + targets=[ + SqlTarget( + rawSql='SELECT date as "time", metric FROM example WHERE $__timeFilter("time")', + refId="A", + ), + ], + yAxes=YAxes( + YAxis(format=OPS_FORMAT), + YAxis(format=SHORT_FORMAT), + ), + gridPos=GridPos(h=8, w=24, x=0, y=9), + ), + ], +).auto_panel_ids() diff --git a/grafanalib/tests/examples/example.dashboard.py b/grafanalib/tests/examples/example.dashboard.py new file mode 100644 index 00000000..ccc88dc2 --- /dev/null +++ b/grafanalib/tests/examples/example.dashboard.py @@ -0,0 +1,52 @@ + +from grafanalib.core import ( + Dashboard, TimeSeries, GaugePanel, + Target, GridPos, + OPS_FORMAT +) + +dashboard = Dashboard( + title="Python generated example dashboard", + description="Example dashboard using the Random Walk and default Prometheus datasource", + tags=[ + 'example' + ], + timezone="browser", + panels=[ + TimeSeries( + title="Random Walk", + dataSource='default', + targets=[ + Target( + datasource='grafana', + expr='example', + ), + ], + gridPos=GridPos(h=8, w=16, x=0, y=0), + ), + GaugePanel( + title="Random Walk", + dataSource='default', + targets=[ + Target( + datasource='grafana', + expr='example', + ), + ], + gridPos=GridPos(h=4, w=4, x=17, y=0), + ), + TimeSeries( + title="Prometheus http requests", + dataSource='prometheus', + targets=[ + Target( + expr='rate(prometheus_http_requests_total[5m])', + legendFormat="{{ handler }}", + refId='A', + ), + ], + unit=OPS_FORMAT, + gridPos=GridPos(h=8, w=16, x=0, y=10), + ), + ], +).auto_panel_ids() diff --git a/grafanalib/tests/examples/example.upload-alerts.py b/grafanalib/tests/examples/example.upload-alerts.py new file mode 100644 index 00000000..79081f01 --- /dev/null +++ b/grafanalib/tests/examples/example.upload-alerts.py @@ -0,0 +1,61 @@ +from grafanalib.core import AlertGroup +from grafanalib._gen import DashboardEncoder, loader +import json +import requests +from os import getenv + + +def get_alert_json(alert: AlertGroup): + ''' + get_alert_json generates JSON from grafanalib AlertGroup object + + :param alert - AlertGroup created via grafanalib + ''' + + return json.dumps(alert.to_json_data(), sort_keys=True, indent=4, cls=DashboardEncoder) + + +def upload_to_grafana(alertjson, folder, server, api_key, session_cookie, verify=True): + ''' + upload_to_grafana tries to upload the AlertGroup to grafana and prints response + WARNING: This will first delete all alerts in the AlertGroup before replacing them with the provided AlertGroup. + + :param alertjson - AlertGroup json generated by grafanalib + :param folder - Folder to upload the AlertGroup into + :param server - grafana server name + :param api_key - grafana api key with read and write privileges + ''' + groupName = json.loads(alertjson)['name'] + + headers = {} + if api_key: + print("using bearer auth") + headers['Authorization'] = f"Bearer {api_key}" + + if session_cookie: + print("using session cookie") + headers['Cookie'] = session_cookie + + print(f"deleting AlertGroup {groupName} in folder {folder}") + r = requests.delete(f"https://{server}/api/ruler/grafana/api/v1/rules/{folder}/{groupName}", headers=headers, verify=verify) + print(f"{r.status_code} - {r.content}") + + headers['Content-Type'] = 'application/json' + + print(f"ensuring folder {folder} exists") + r = requests.post(f"https://{server}/api/folders", data={"title": folder}, headers=headers) + print(f"{r.status_code} - {r.content}") + + print(f"uploading AlertGroup {groupName} to folder {folder}") + r = requests.post(f"https://{server}/api/ruler/grafana/api/v1/rules/{folder}", data=alertjson, headers=headers, verify=verify) + # TODO: add error handling + print(f"{r.status_code} - {r.content}") + + +grafana_api_key = getenv("GRAFANA_API_KEY") +grafana_server = getenv("GRAFANA_SERVER") +grafana_cookie = getenv("GRAFANA_COOKIE") + +# Generate an alert from the example +my_alergroup_json = get_alert_json(loader("./grafanalib/tests/examples/example.alertgroup.py")) +upload_to_grafana(my_alergroup_json, "testfolder", grafana_server, grafana_api_key, grafana_cookie) diff --git a/grafanalib/tests/examples/example.upload-dashboard.py b/grafanalib/tests/examples/example.upload-dashboard.py new file mode 100644 index 00000000..588379b8 --- /dev/null +++ b/grafanalib/tests/examples/example.upload-dashboard.py @@ -0,0 +1,44 @@ +from grafanalib.core import Dashboard +from grafanalib._gen import DashboardEncoder +import json +import requests +from os import getenv + + +def get_dashboard_json(dashboard, overwrite=False, message="Updated by grafanlib"): + ''' + get_dashboard_json generates JSON from grafanalib Dashboard object + + :param dashboard - Dashboard() created via grafanalib + ''' + + # grafanalib generates json which need to pack to "dashboard" root element + return json.dumps( + { + "dashboard": dashboard.to_json_data(), + "overwrite": overwrite, + "message": message + }, sort_keys=True, indent=2, cls=DashboardEncoder) + + +def upload_to_grafana(json, server, api_key, verify=True): + ''' + upload_to_grafana tries to upload dashboard to grafana and prints response + + :param json - dashboard json generated by grafanalib + :param server - grafana server name + :param api_key - grafana api key with read and write privileges + ''' + + headers = {'Authorization': f"Bearer {api_key}", 'Content-Type': 'application/json'} + r = requests.post(f"https://{server}/api/dashboards/db", data=json, headers=headers, verify=verify) + # TODO: add error handling + print(f"{r.status_code} - {r.content}") + + +grafana_api_key = getenv("GRAFANA_API_KEY") +grafana_server = getenv("GRAFANA_SERVER") + +my_dashboard = Dashboard(title="My awesome dashboard", uid='abifsd') +my_dashboard_json = get_dashboard_json(my_dashboard, overwrite=True) +upload_to_grafana(my_dashboard_json, grafana_server, grafana_api_key) diff --git a/grafanalib/tests/examples/sqltarget_example_files/example.sql b/grafanalib/tests/examples/sqltarget_example_files/example.sql new file mode 100644 index 00000000..22a4747b --- /dev/null +++ b/grafanalib/tests/examples/sqltarget_example_files/example.sql @@ -0,0 +1,3 @@ +SELECT example, count(id) +FROM test +GROUP BY example; diff --git a/grafanalib/tests/examples/sqltarget_example_files/example_with_params.sql b/grafanalib/tests/examples/sqltarget_example_files/example_with_params.sql new file mode 100644 index 00000000..cbc1242b --- /dev/null +++ b/grafanalib/tests/examples/sqltarget_example_files/example_with_params.sql @@ -0,0 +1,3 @@ +SELECT example +FROM test +WHERE example='{example}' AND example_date BETWEEN '{starting_date}' AND '{ending_date}'; diff --git a/grafanalib/tests/examples/table-example-dashboard.py b/grafanalib/tests/examples/table-example-dashboard.py new file mode 100755 index 00000000..d067476c --- /dev/null +++ b/grafanalib/tests/examples/table-example-dashboard.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +NAME: + table-example-dashboard.py + +DESCRIPTION: + This script creates Grafana dashboards using Grafanalib, and a static table + which defines metrics/dashboards. + + The resulting dashboard can be easily uploaded to Grafana with associated script: + + upload_grafana_dashboard.sh + +USAGE: + Create and upload the dashboard: + + ./table-example-dashboard.py --title "My python dashboard" > dash.json + ./upload_grafana_dashboard.sh dash.json + +""" + +import textwrap +import argparse +import sys +import io +import grafanalib.core as G +from grafanalib._gen import write_dashboard + +DEFAULT_TITLE = "Python Example Dashboard" + +# Simple example of table drive - good to enhance with Grid position, Legend etc. +metrics = [ + {'section': 'Monitor Tracking'}, + {'row': 1}, + {'title': 'Monitor Processes (by cmd)', + 'expr': ['monitor_by_cmd{serverid="$serverid"}', + 'sum(monitor_by_cmd{serverid="$serverid"})']}, + {'title': 'Monitor Processes (by user)', + 'expr': ['monitor_by_user{serverid="$serverid"}', + 'sum(monitor_by_user{serverid="$serverid"})']}, +] + + +class CreateDashboard(): + "See module doc string for details" + + def __init__(self, *args, **kwargs): + self.parse_args(__doc__, args) + + def parse_args(self, doc, args): + "Common parsing and setting up of args" + desc = textwrap.dedent(doc) + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=desc) + parser.add_argument('-t', '--title', default=DEFAULT_TITLE, + help="Dashboard title. Default: " + DEFAULT_TITLE) + self.options = parser.parse_args(args=args) + + def run(self): + templateList = [G.Template(default="", + dataSource="default", + name="serverid", + label="ServerID", + query="label_values(serverid)")] + + dashboard = G.Dashboard(title=self.options.title, + templating=G.Templating(list=templateList)) + + # Simple table processing - could be enhanced to use GridPos etc. + for metric in metrics: + if 'section' in metric: + dashboard.rows.append(G.Row(title=metric['section'], showTitle=True)) + continue + if 'row' in metric: + dashboard.rows.append(G.Row(title='', showTitle=False)) + continue + graph = G.Graph(title=metric['title'], + dataSource='default', + maxDataPoints=1000, + legend=G.Legend(show=True, alignAsTable=True, + min=True, max=True, avg=True, current=True, total=True, + sort='max', sortDesc=True), + yAxes=G.single_y_axis()) + ref_id = 'A' + for texp in metric['expr']: + graph.targets.append(G.Target(expr=texp, + refId=ref_id)) + ref_id = chr(ord(ref_id) + 1) + dashboard.rows[-1].panels.append(graph) + + # Auto-number panels - returns new dashboard + dashboard = dashboard.auto_panel_ids() + + s = io.StringIO() + write_dashboard(dashboard, s) + print("""{ + "dashboard": %s + } + """ % s.getvalue()) + + +if __name__ == '__main__': + """ Main Program""" + obj = CreateDashboard(*sys.argv[1:]) + obj.run() diff --git a/grafanalib/tests/examples/upload_grafana_dashboard.sh b/grafanalib/tests/examples/upload_grafana_dashboard.sh new file mode 100755 index 00000000..4d95a2f4 --- /dev/null +++ b/grafanalib/tests/examples/upload_grafana_dashboard.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +show_help_info () { +echo -e "\n\tERROR: $1" + +cat < + +Example: + + ./upload_grafana_dashboard.sh dash.json + +HELPINFO +} + +function msg () { echo -e "$*"; } +function bail () { msg "\nError: ${1:-Unknown Error}\n"; exit ${2:-1}; } + +# ------------------------------------------------------------------------- +if [ -z "$1" ];then + show_help_info "No dashboard parameter received" + exit 1 +fi + +GRAFANA_API_KEY=${GRAFANA_API_KEY:-Unset} +if [[ $GRAFANA_API_KEY == Unset ]]; then + echo -e "\\nError: GRAFANA_API_KEY environment variable not define.\\n" + exit 1 +fi +GRAFANA_SERVER=${GRAFANA_SERVER:-Unset} +if [[ $GRAFANA_SERVER == Unset ]]; then + echo -e "\\nError: GRAFANA_SERVER environment variable not define.\\n" + exit 1 +fi +logfile="grafana_upload.log" + +# Get path/file parm +DASHBOARD=$1 + +# Pull through jq to validate json +payload="$(jq . ${DASHBOARD}) >> $logfile" + +# Upload the JSON to Grafana +curl -X POST \ + -H 'Content-Type: application/json' \ + -d "${payload}" \ + "http://api_key:$GRAFANA_API_KEY@$GRAFANA_SERVER/api/dashboards/db" -w "\n" | tee -a "$logfile" diff --git a/grafanalib/tests/test_azuremonitor.py b/grafanalib/tests/test_azuremonitor.py new file mode 100644 index 00000000..42dfec21 --- /dev/null +++ b/grafanalib/tests/test_azuremonitor.py @@ -0,0 +1,79 @@ +"""Tests for Azure Monitor Datasource""" + +import grafanalib.core as G +import grafanalib.azuremonitor as A +from grafanalib import _gen +from io import StringIO + + +def test_serialization_azure_metrics_target(): + """Serializing a graph doesn't explode.""" + graph = G.TimeSeries( + title="Test Azure Monitor", + dataSource="default", + targets=[ + A.AzureMonitorMetricsTarget( + aggregation="Total", + metricDefinition="Microsoft.Web/sites", + metricName="Requests", + metricNamespace="Microsoft.Web/sites", + resourceGroup="test-grafana", + resourceName="test-grafana", + subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", + refId="Requests", + ), + ], + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != "" + + +def test_serialization_azure_logs_target(): + """Serializing a graph doesn't explode.""" + + logs_query = """AzureMetrics +| where TimeGenerated > ago(30d) +| extend tail_latency = Maximum / Average +| where MetricName == "Http5xx" or (MetricName == "HttpResponseTime" and Average >= 3) or (MetricName == "HttpResponseTime" and tail_latency >= 10 and Average >= 0.5) +| summarize dcount(TimeGenerated) by Resource +| order by dcount_TimeGenerated""" + + graph = G.GaugePanel( + title="Test Logs", + dataSource="default", + targets=[ + A.AzureLogsTarget( + query=logs_query, + resource="/subscriptions/3a680d1a-9310-4667-9e6a-9fcd2ecddd86", + subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", + refId="Bad Minutes", + ), + ], + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != "" + + +def test_serialization_azure_graph_target(): + """Serializing a graph doesn't explode.""" + + graph_query = """Resources +| project name, type, location +| order by name asc""" + + graph = G.GaugePanel( + title="Test Logs", + dataSource="default", + targets=[ + A.AzureLogsTarget( + query=graph_query, + subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", + refId="Resources", + ), + ], + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != "" diff --git a/grafanalib/tests/test_cloudwatch.py b/grafanalib/tests/test_cloudwatch.py new file mode 100644 index 00000000..b35cdf6c --- /dev/null +++ b/grafanalib/tests/test_cloudwatch.py @@ -0,0 +1,66 @@ +"""Tests for Cloudwatch Datasource""" + +import grafanalib.core as G +import grafanalib.cloudwatch as C +from grafanalib import _gen +from io import StringIO + + +def test_serialization_cloudwatch_metrics_target(): + """Serializing a graph doesn't explode.""" + graph = G.Graph( + title="Lambda Duration", + dataSource="Cloudwatch data source", + targets=[ + C.CloudwatchMetricsTarget(), + ], + id=1, + yAxes=G.YAxes( + G.YAxis(format=G.SHORT_FORMAT, label="ms"), + G.YAxis(format=G.SHORT_FORMAT), + ), + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != '' + + +def test_serialization_cloudwatch_logs_insights_target(): + """Serializing a graph doesn't explode.""" + graph = G.Logs( + title="Lambda Duration", + dataSource="Cloudwatch data source", + targets=[ + C.CloudwatchLogsInsightsTarget(), + ], + id=1, + wrapLogMessages=True + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != '' + + +def test_cloudwatch_logs_insights_target(): + """Test Cloudwatch Logs Insights target""" + cloudwatch_logs_insights_expression = "fields @timestamp, @xrayTraceId, @message | filter @message like /^(?!.*(START|END|REPORT|LOGS|EXTENSION)).*$/ | sort @timestamp desc" + ref_id = "A" + log_group_names = ["/aws/lambda/foo", "/aws/lambda/bar"] + + target = C.CloudwatchLogsInsightsTarget( + expression=cloudwatch_logs_insights_expression, + logGroupNames=log_group_names, + refId=ref_id + ) + + data = target.to_json_data() + + assert data["expression"] == cloudwatch_logs_insights_expression + assert data["id"] == "" + assert data["logGroupNames"] == log_group_names + assert data["namespace"] == "" + assert data["queryMode"] == "Logs" + assert data["refId"] == ref_id + assert data["region"] == "default" + assert data["statsGroups"] == [] + assert data["hide"] is False diff --git a/grafanalib/tests/test_core.py b/grafanalib/tests/test_core.py index 6d72db87..6b640645 100644 --- a/grafanalib/tests/test_core.py +++ b/grafanalib/tests/test_core.py @@ -1,10 +1,47 @@ """Tests for core.""" +import random import pytest import grafanalib.core as G +def dummy_grid_pos() -> G.GridPos: + return G.GridPos(h=1, w=2, x=3, y=4) + + +def dummy_data_link() -> G.DataLink: + return G.DataLink( + title='dummy title', + linkUrl='https://www.dummy-link-url.com', + isNewTab=True + ) + + +def dummy_evaluator() -> G.Evaluator: + return G.Evaluator( + type=G.EVAL_GT, + params=42 + ) + + +def dummy_alert_condition() -> G.AlertCondition: + return G.AlertCondition( + target=G.Target( + refId="A", + ), + evaluator=G.Evaluator( + type=G.EVAL_GT, + params=42), + timeRange=G.TimeRange( + from_time='5m', + to_time='now' + ), + operator=G.OP_AND, + reducerType=G.RTYPE_AVG, + ) + + def test_template_defaults(): t = G.Template( name='test', @@ -60,26 +97,367 @@ def test_custom_template_dont_override_options(): assert t.to_json_data()['current']['value'] == '1' -def test_table_styled_columns(): - t = G.Table.with_styled_columns( - columns=[ - (G.Column('Foo', 'foo'), G.ColumnStyle()), - (G.Column('Bar', 'bar'), None), - ], - dataSource='some data source', +def test_stat_no_repeat(): + t = G.Stat( + title='dummy', + dataSource='data source', targets=[ - G.Target(expr='some expr'), - ], - title='table title', + G.Target(expr='some expr') + ] ) - assert t.columns == [ - G.Column('Foo', 'foo'), - G.Column('Bar', 'bar'), + + assert t.to_json_data()['repeat'] is None + assert t.to_json_data()['repeatDirection'] is None + assert t.to_json_data()['maxPerRow'] is None + + +def test_Text_exception_checks(): + with pytest.raises(TypeError): + G.Text(content=123) + + with pytest.raises(TypeError): + G.Text(error=123) + + with pytest.raises(ValueError): + G.Text(mode=123) + + +def test_ePictBox(): + t = G.ePictBox() + json_data = t.to_json_data() + + assert json_data['angle'] == 0 + assert json_data['backgroundColor'] == "#000" + assert json_data['blinkHigh'] is False + assert json_data['blinkLow'] is False + assert json_data['color'] == "#000" + assert json_data['colorHigh'] == "#000" + assert json_data['colorLow'] == "#000" + assert json_data['colorMedium'] == "#000" + assert json_data['colorSymbol'] is False + assert json_data['customSymbol'] == "" + assert json_data['decimal'] == 0 + assert json_data['fontSize'] == 12 + assert json_data['hasBackground'] is False + assert json_data['hasOrb'] is False + assert json_data['hasSymbol'] is False + assert json_data['isUsingThresholds'] is False + assert json_data['orbHideText'] is False + assert json_data['orbLocation'] == "Left" + assert json_data['orbSize'] == 13 + assert json_data['prefix'] == "" + assert json_data['prefixSize'] == 10 + assert json_data['selected'] is False + assert json_data['serie'] == "" + assert json_data['suffix'] == "" + assert json_data['suffixSize'] == 10 + assert json_data['symbol'] == "" + assert json_data['symbolDefHeight'] == 32 + assert json_data['symbolDefWidth'] == 32 + assert json_data['symbolHeight'] == 32 + assert json_data['symbolHideText'] is False + assert json_data['symbolWidth'] == 32 + assert json_data['text'] == "N/A" + assert json_data['thresholds'] == "" + assert json_data['url'] == "" + assert json_data['xpos'] == 0 + assert json_data['ypos'] == 0 + + t = G.ePictBox( + angle=1, + backgroundColor="#100", + blinkHigh=True, + blinkLow=True, + color="#200", + colorHigh="#300", + colorLow="#400", + colorMedium="#500", + colorSymbol=True, + decimal=2, + fontSize=9, + hasBackground=True, + hasOrb=True, + hasSymbol=True, + orbHideText=True, + orbLocation="Right", + orbSize=10, + prefix="prefix", + prefixSize=11, + selected=True, + serie="series", + suffix="suffix", + suffixSize=12, + symbol="data:image/svg+xml;base64,...", + symbolDefHeight=13, + symbolDefWidth=14, + symbolHeight=15, + symbolHideText=True, + symbolWidth=17, + text="text", + thresholds="40,50", + url="https://google.de", + xpos=18, + ypos=19, + ) + + json_data = t.to_json_data() + + assert json_data['angle'] == 1 + assert json_data['backgroundColor'] == "#100" + assert json_data['blinkHigh'] is True + assert json_data['blinkLow'] is True + assert json_data['color'] == "#200" + assert json_data['colorHigh'] == "#300" + assert json_data['colorLow'] == "#400" + assert json_data['colorMedium'] == "#500" + assert json_data['colorSymbol'] is True + assert json_data['decimal'] == 2 + assert json_data['fontSize'] == 9 + assert json_data['hasBackground'] is True + assert json_data['hasOrb'] is True + assert json_data['hasSymbol'] is True + assert json_data['isUsingThresholds'] is True + assert json_data['orbHideText'] is True + assert json_data['orbLocation'] == "Right" + assert json_data['orbSize'] == 10 + assert json_data['prefix'] == "prefix" + assert json_data['prefixSize'] == 11 + assert json_data['selected'] is True + assert json_data['serie'] == "series" + assert json_data['suffix'] == "suffix" + assert json_data['suffixSize'] == 12 + assert json_data['symbol'] == "data:image/svg+xml;base64,..." + assert json_data['symbolDefHeight'] == 13 + assert json_data['symbolDefWidth'] == 14 + assert json_data['symbolHeight'] == 15 + assert json_data['symbolHideText'] is True + assert json_data['symbolWidth'] == 17 + assert json_data['text'] == "text" + assert json_data['thresholds'] == "40,50" + assert json_data['url'] == "https://google.de" + assert json_data['xpos'] == 18 + assert json_data['ypos'] == 19 + + +def test_ePictBox_custom_symbole_logic(): + t = G.ePictBox( + customSymbol="https://foo.bar/foo.jpg", + symbol="will be overiden", + ) + + json_data = t.to_json_data() + + assert json_data['customSymbol'] == "https://foo.bar/foo.jpg" + assert json_data['symbol'] == "custom" + + +def test_ePict(): + t = G.ePict() + json_data = t.to_json_data() + + assert json_data['type'] == G.EPICT_TYPE + assert json_data['options']['autoScale'] is True + assert json_data['options']['bgURL'] == '' + assert json_data['options']['boxes'] == [] + + t = G.ePict( + autoScale=False, + bgURL='https://example.com/img.jpg', + boxes=[ + G.ePictBox(), + G.ePictBox(angle=123), + ] + ) + json_data = t.to_json_data() + + print(json_data) + + assert json_data['type'] == G.EPICT_TYPE + assert json_data['options']['autoScale'] is False + assert json_data['options']['bgURL'] == 'https://example.com/img.jpg' + assert json_data['options']['boxes'] == [ + G.ePictBox(), + G.ePictBox(angle=123), ] - assert t.styles == [ - G.ColumnStyle(pattern='Foo'), + + +def test_Text(): + t = G.Text() + + json_data = t.to_json_data() + assert json_data['error'] is False + assert json_data['options']['content'] == "" + assert json_data['options']['mode'] == G.TEXT_MODE_MARKDOWN + + t = G.Text(content='foo', error=True, mode=G.TEXT_MODE_HTML) + + json_data = t.to_json_data() + assert json_data['error'] is True + assert json_data['options']['content'] == "foo" + assert json_data['options']['mode'] == G.TEXT_MODE_HTML + + +def test_DiscreteColorMappingItem_exception_checks(): + with pytest.raises(TypeError): + G.DiscreteColorMappingItem(123) + + with pytest.raises(TypeError): + G.DiscreteColorMappingItem("foo", color=123) + + +def test_DiscreteColorMappingItem(): + t = G.DiscreteColorMappingItem('foo') + + json_data = t.to_json_data() + assert json_data['text'] == 'foo' + assert json_data['color'] == G.GREY1 + + t = G.DiscreteColorMappingItem('foo', color='bar') + + json_data = t.to_json_data() + assert json_data['text'] == 'foo' + assert json_data['color'] == 'bar' + + +def test_Discrete_exceptions(): + with pytest.raises(ValueError): + G.Discrete(legendSortBy='foo') + + with pytest.raises(TypeError): + G.Discrete(rangeMaps=[123, 456]) + + with pytest.raises(TypeError): + G.Discrete(valueMaps=['foo', 'bar']) + + with pytest.raises(TypeError): + G.Discrete(lineColor=123) + + with pytest.raises(TypeError): + G.Discrete(highlightOnMouseover=123) + + +def test_Discrete(): + colorMap = [ + G.DiscreteColorMappingItem('bar', color='baz'), + G.DiscreteColorMappingItem('foz', color='faz') ] + t = G.Discrete( + title='foo', + colorMaps=colorMap, + lineColor='#aabbcc', + metricNameColor=G.RGBA(1, 2, 3, .5), + decimals=123, + highlightOnMouseover=False, + showDistinctCount=True, + showLegendCounts=False, + ) + + json_data = t.to_json_data() + assert json_data['colorMaps'] == colorMap + assert json_data['title'] == 'foo' + assert json_data['type'] == G.DISCRETE_TYPE + assert json_data['rangeMaps'] == [] + assert json_data['valueMaps'] == [] + + assert json_data['backgroundColor'] == G.RGBA(128, 128, 128, 0.1) + assert json_data['lineColor'] == '#aabbcc' + assert json_data['metricNameColor'] == G.RGBA(1, 2, 3, .5) + assert json_data['timeTextColor'] == "#d8d9da" + assert json_data['valueTextColor'] == "#000000" + + assert json_data['decimals'] == 123 + assert json_data['legendPercentDecimals'] == 0 + assert json_data['rowHeight'] == 50 + assert json_data['textSize'] == 24 + assert json_data['textSizeTime'] == 12 + + assert json_data['highlightOnMouseover'] is False + assert json_data['showLegend'] is True + assert json_data['showLegendPercent'] is True + assert json_data['showLegendNames'] is True + assert json_data['showLegendValues'] is True + assert json_data['showTimeAxis'] is True + assert json_data['use12HourClock'] is False + assert json_data['writeMetricNames'] is False + assert json_data['writeLastValue'] is True + assert json_data['writeAllValues'] is False + + assert json_data['showDistinctCount'] is True + assert json_data['showLegendCounts'] is False + assert json_data['showLegendTime'] is None + assert json_data['showTransitionCount'] is None + + +def test_StatValueMappings_exception_checks(): + with pytest.raises(TypeError): + G.StatValueMappings( + G.StatValueMappingItem('foo', '0', 'dark-red'), + "not of type StatValueMappingItem", + ) + + +def test_StatValueMappings(): + t = G.StatValueMappings( + G.StatValueMappingItem('foo', '0', 'dark-red'), # Value must a string + G.StatValueMappingItem('bar', '1', 'purple'), + ) + + json_data = t.to_json_data() + assert json_data['type'] == 'value' + assert json_data['options']['0']['text'] == 'foo' + assert json_data['options']['0']['color'] == 'dark-red' + assert json_data['options']['1']['text'] == 'bar' + assert json_data['options']['1']['color'] == 'purple' + + +def test_StatRangeMappings(): + t = G.StatRangeMappings( + 'dummy_text', + startValue=10, + endValue=20, + color='dark-red' + ) + + json_data = t.to_json_data() + assert json_data['type'] == 'range' + assert json_data['options']['from'] == 10 + assert json_data['options']['to'] == 20 + assert json_data['options']['result']['text'] == 'dummy_text' + assert json_data['options']['result']['color'] == 'dark-red' + + +def test_StatMapping(): + t = G.StatMapping( + 'dummy_text', + startValue='foo', + endValue='bar', + ) + + json_data = t.to_json_data() + assert json_data['text'] == 'dummy_text' + assert json_data['from'] == 'foo' + assert json_data['to'] == 'bar' + + +def test_stat_with_repeat(): + t = G.Stat( + title='dummy', + dataSource='data source', + targets=[ + G.Target(expr='some expr') + ], + repeat=G.Repeat( + variable="repetitionVariable", + direction='h', + maxPerRow=10 + ) + ) + + assert t.to_json_data()['repeat'] == 'repetitionVariable' + assert t.to_json_data()['repeatDirection'] == 'h' + assert t.to_json_data()['maxPerRow'] == 10 + def test_single_stat(): data_source = 'dummy data source' @@ -92,60 +470,694 @@ def test_single_stat(): assert data['title'] == title -CW_TESTDATA = [ - pytest.param( - {}, - {'region': '', - 'namespace': '', - 'metricName': '', - 'statistics': [], - 'dimensions': {}, - 'id': '', - 'expression': '', - 'period': '', - 'alias': '', - 'highResolution': False, - 'refId': '', - 'datasource': '', - 'hide': False}, - id='defaults', - ), - pytest.param( - { - 'region': 'us-east-1', - 'namespace': 'AWS/RDS', - 'metricName': 'CPUUtilization', - 'statistics': ['Average'], - 'dimensions': {'DBInstanceIdentifier': 'foo'}, - 'id': 'id', - 'expression': 'expr', - 'period': 'period', - 'alias': 'alias', - 'highResolution': True, - 'refId': 'A', - 'datasource': 'CloudWatch', - 'hide': True, - }, +def test_dashboard_list(): + title = 'dummy title' + dashboard_list = G.DashboardList(title=title) + data = dashboard_list.to_json_data() + assert data['targets'] == [] + assert data['datasource'] is None + assert data['title'] == title + assert data['starred'] is True + + +def test_logs_panel(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + logs = G.Logs(data_source, targets, title) + data = logs.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert data['options']['showLabels'] is False + assert data['options']['showCommonLabels'] is False + assert data['options']['showTime'] is False + assert data['options']['wrapLogMessage'] is False + assert data['options']['sortOrder'] == 'Descending' + assert data['options']['dedupStrategy'] == 'none' + assert data['options']['enableLogDetails'] is False + assert data['options']['prettifyLogMessage'] is False + + +def test_notification(): + uid = 'notification_channel' + notification = G.Notification(uid) + data = notification.to_json_data() + assert data['uid'] == uid + + +def test_graph_panel(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + graph = G.Graph(data_source, targets, title) + data = graph.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert 'alert' not in data + + +def test_panel_extra_json(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + extraJson = { + 'fillGradient': 6, + 'yaxis': {'align': True}, + 'legend': {'avg': True}, + } + graph = G.Graph(data_source, targets, title, extraJson=extraJson) + data = graph.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert 'alert' not in data + assert data['fillGradient'] == 6 + assert data['yaxis']['align'] is True + # Nested non-dict object should also be deep-updated + assert data['legend']['max'] is False + assert data['legend']['avg'] is True + + +def test_graph_panel_threshold(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + thresholds = [ + G.GraphThreshold(20.0), + G.GraphThreshold(40.2, colorMode="ok") + ] + graph = G.Graph(data_source, targets, title, thresholds=thresholds) + data = graph.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert 'alert' not in data + assert data['thresholds'] == thresholds + + +def test_graph_panel_alert(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + alert = [ + G.AlertCondition(G.Target(refId="A"), G.Evaluator('a', 'b'), G.TimeRange('5', '6'), 'd', 'e') + ] + thresholds = [ + G.GraphThreshold(20.0), + G.GraphThreshold(40.2, colorMode="ok") + ] + graph = G.Graph(data_source, targets, title, thresholds=thresholds, alert=alert) + data = graph.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert data['alert'] == alert + assert data['thresholds'] == [] + + +def test_graph_threshold(): + value = 20.0 + colorMode = "ok" + threshold = G.GraphThreshold(value, colorMode=colorMode) + data = threshold.to_json_data() + + assert data['value'] == value + assert data['colorMode'] == colorMode + assert data['fill'] is True + assert data['line'] is True + assert data['op'] == G.EVAL_GT + assert 'fillColor' not in data + assert 'lineColor' not in data + + +def test_graph_threshold_custom(): + value = 20.0 + colorMode = "custom" + color = G.GREEN + threshold = G.GraphThreshold(value, colorMode=colorMode, fillColor=color) + data = threshold.to_json_data() + + assert data['value'] == value + assert data['colorMode'] == colorMode + assert data['fill'] is True + assert data['line'] is True + assert data['op'] == G.EVAL_GT + assert data['fillColor'] == color + assert data['lineColor'] == G.RED + + +def test_alert_list(): + alert_list = G.AlertList( + dashboardTags=['dummy tag'], + description='dummy description', + gridPos=dummy_grid_pos(), + id=random.randint(1, 10), + links=[dummy_data_link(), dummy_data_link()], + nameFilter='dummy name filter', + stateFilter=[G.ALERTLIST_STATE_ALERTING, G.ALERTLIST_STATE_OK], + title='dummy title' + ) + alert_list.to_json_data() + + +def test_SeriesOverride_exception_checks(): + with pytest.raises(TypeError): + G.SeriesOverride() + + with pytest.raises(TypeError): + G.SeriesOverride(123) + + with pytest.raises(TypeError): + G.SeriesOverride('alias', bars=123) + + with pytest.raises(TypeError): + G.SeriesOverride('alias', lines=123) + + with pytest.raises(ValueError): + G.SeriesOverride('alias', yaxis=123) + with pytest.raises(ValueError): + G.SeriesOverride('alias', yaxis='abc') + + with pytest.raises(TypeError): + G.SeriesOverride('alias', fillBelowTo=123) + + with pytest.raises(ValueError): + G.SeriesOverride('alias', fill="foo") + with pytest.raises(ValueError): + G.SeriesOverride('alias', fill=123) + with pytest.raises(ValueError): + G.SeriesOverride('alias', fill=-2) + with pytest.raises(ValueError): + G.SeriesOverride('alias', zindex=5) + + with pytest.raises(TypeError): + G.SeriesOverride('alias', dashes="foo") + + with pytest.raises(ValueError): + G.SeriesOverride('alias', dashLength=-2) + with pytest.raises(ValueError): + G.SeriesOverride('alias', dashLength=25) + with pytest.raises(ValueError): + G.SeriesOverride('alias', spaceLength=-2) + with pytest.raises(ValueError): + G.SeriesOverride('alias', spaceLength=25) + + with pytest.raises(ValueError): + G.SeriesOverride('alias', dashLength="foo") + with pytest.raises(ValueError): + G.SeriesOverride('alias', spaceLength="foo") + + +def test_SeriesOverride(): + t = G.SeriesOverride('alias').to_json_data() + + assert t['alias'] == 'alias' + assert t['bars'] is False + assert t['lines'] is True + assert t['yaxis'] == 1 + assert t['fill'] == 1 + assert t['color'] is None + assert t['fillBelowTo'] is None + assert t['dashes'] is False + assert t['dashLength'] is None + assert t['spaceLength'] is None + assert t['zindex'] == 0 + + t = G.SeriesOverride( + 'alias', + bars=True, + lines=False, + yaxis=2, + fill=7, + color='#abc', + fillBelowTo='other_alias', + dashes=True, + dashLength=12, + spaceLength=17, + zindex=-2, + ).to_json_data() + + assert t['alias'] == 'alias' + assert t['bars'] is True + assert t['lines'] is False + assert t['yaxis'] == 2 + assert t['fill'] == 7 + assert t['color'] == '#abc' + assert t['fillBelowTo'] == 'other_alias' + assert t['dashes'] is True + assert t['dashLength'] == 12 + assert t['spaceLength'] == 17 + assert t['zindex'] == -2 + + +def test_alert(): + alert = G.Alert( + name='dummy name', + message='dummy message', + alertConditions=dummy_alert_condition(), + alertRuleTags=dict(alert_rul_dummy_key='alert rul dummy value') + ) + alert.to_json_data() + + +def test_alertgroup(): + name = "Example Alert Group" + group = G.AlertGroup( + name=name, + rules=[ + G.AlertRulev8( + title="My Important Alert!", + triggers=[ + ( + G.Target(refId="A"), + G.AlertCondition( + evaluator=G.LowerThan(1), + operator=G.OP_OR, + ), + ), + ( + G.Target(refId="B"), + G.AlertCondition( + evaluator=G.GreaterThan(1), + operator=G.OP_OR, + ) + ) + ] + ) + ] + ) + + output = group.to_json_data() + + assert output["name"] == name + assert output["rules"][0]["grafana_alert"]["rule_group"] == name + + +def test_alertrulev8(): + title = "My Important Alert!" + annotations = {"summary": "this alert fires when prod is down!!!"} + labels = {"severity": "serious"} + rule = G.AlertRulev8( + title=title, + triggers=[ + ( + G.Target( + refId="A", + datasource="Prometheus", + ), + G.AlertCondition( + evaluator=G.LowerThan(1), + operator=G.OP_OR, + ), + ), + ( + G.Target( + refId="B", + datasource="Prometheus", + ), + G.AlertCondition( + evaluator=G.GreaterThan(1), + operator=G.OP_OR, + ) + ) + ], + annotations=annotations, + labels=labels, + evaluateFor="3m", + ) + + data = rule.to_json_data() + assert data['grafana_alert']['title'] == title + assert data['annotations'] == annotations + assert data['labels'] == labels + assert data['for'] == "3m" + + +def test_alertrule_invalid_triggers(): + # test that triggers is a list of [(Target, AlertCondition)] + + with pytest.raises(ValueError): + G.AlertRulev8( + title="Invalid rule", + triggers=[ + G.Target( + refId="A", + datasource="Prometheus", + ), + ], + ) + + with pytest.raises(ValueError): + G.AlertRulev8( + title="Invalid rule", + triggers=[ + ( + "foo", + G.AlertCondition( + evaluator=G.GreaterThan(1), + operator=G.OP_OR, + ) + ), + ], + ) + + with pytest.raises(ValueError): + G.AlertRulev8( + title="Invalid rule", + triggers=[ + ( + G.Target( + refId="A", + datasource="Prometheus", + ), + "bar" + ), + ], + ) + + +def test_alertrulev9(): + title = "My Important Alert!" + annotations = {"summary": "this alert fires when prod is down!!!"} + labels = {"severity": "serious"} + condition = 'C' + rule = G.AlertRulev9( + title=title, + uid='alert1', + condition=condition, + triggers=[ + G.Target( + expr='query', + refId='A', + datasource='Prometheus', + ), + G.AlertExpression( + refId='B', + expressionType=G.EXP_TYPE_CLASSIC, + expression='A', + conditions=[ + G.AlertCondition( + evaluator=G.GreaterThan(3), + operator=G.OP_AND, + reducerType=G.RTYPE_LAST + ) + ] + ), + ], + annotations=annotations, + labels=labels, + evaluateFor="3m", + ) + + data = rule.to_json_data() + assert data['annotations'] == annotations + assert data['labels'] == labels + assert data['for'] == "3m" + assert data['grafana_alert']['title'] == title + assert data['grafana_alert']['condition'] == condition + + +def test_alertexpression(): + refId = 'D' + expression = 'C' + expressionType = G.EXP_TYPE_REDUCE + reduceFunction = G.EXP_REDUCER_FUNC_MAX + reduceMode = G.EXP_REDUCER_FUNC_DROP_NN + + alert_exp = G.AlertExpression( + refId=refId, + expression=expression, + expressionType=expressionType, + reduceFunction=reduceFunction, + reduceMode=reduceMode + ) + + data = alert_exp.to_json_data() + + assert data['refId'] == refId + assert data['datasourceUid'] == '-100' + assert data['model']['conditions'] == [] + assert data['model']['datasource'] == { + 'type': '__expr__', + 'uid': '-100' + } + assert data['model']['expression'] == expression + assert data['model']['refId'] == refId + assert data['model']['type'] == expressionType + assert data['model']['reducer'] == reduceFunction + assert data['model']['settings']['mode'] == reduceMode + + +def test_alertfilefasedfrovisioning(): + groups = [{ + 'foo': 'bar' + }] + + rules = G.AlertFileBasedProvisioning( + groups=groups + ) + + data = rules.to_json_data() + + assert data['apiVersion'] == 1 + assert data['groups'] == groups + + +def test_alertCondition_useNewAlerts_default(): + alert_condition = G.AlertCondition( + G.Target(refId="A"), + G.Evaluator('a', 'b'), + G.TimeRange('5', '6'), + 'd', + 'e' + ) + data = alert_condition.to_json_data() + assert data['query']['model'] is not None + assert len(data['query']['params']) == 3 + + +def test_alertCondition_useNewAlerts_true(): + alert_condition = G.AlertCondition( + G.Target(refId="A"), + G.Evaluator('a', 'b'), + G.TimeRange('5', '6'), + 'd', + 'e', + useNewAlerts=True + ) + data = alert_condition.to_json_data() + assert 'model' not in data['query'] + assert len(data['query']['params']) == 1 + + +def test_worldmap(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + worldmap = G.Worldmap(data_source, targets, title, circleMaxSize=11) + data = worldmap.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert data['circleMaxSize'] == 11 + + +def test_stateTimeline(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + stateTimeline = G.StateTimeline(data_source, targets, title, rowHeight=0.7) + data = stateTimeline.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert data['options']['rowHeight'] == 0.7 + + +def test_timeseries(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + timeseries = G.TimeSeries(data_source, targets, title) + data = timeseries.to_json_data() + assert data['targets'] == targets + assert data['datasource'] == data_source + assert data['title'] == title + assert data['fieldConfig']['overrides'] == [] + + +def test_timeseries_with_overrides(): + data_source = 'dummy data source' + targets = ['dummy_prom_query'] + title = 'dummy title' + overrides = [ { - 'region': 'us-east-1', - 'namespace': 'AWS/RDS', - 'metricName': 'CPUUtilization', - 'statistics': ['Average'], - 'dimensions': {'DBInstanceIdentifier': 'foo'}, - 'id': 'id', - 'expression': 'expr', - 'period': 'period', - 'alias': 'alias', - 'highResolution': True, - 'refId': 'A', - 'datasource': 'CloudWatch', - 'hide': True, + "matcher": { + "id": "byName", + "options": "min" + }, + "properties": [ + { + "id": "custom.fillBelowTo", + "value": "min" + }, + { + "id": "custom.lineWidth", + "value": 0 + } + ] + } + ] + timeseries = G.TimeSeries( + dataSource=data_source, + targets=targets, + title=title, + overrides=overrides, + ) + data = timeseries.to_json_data() + assert data["targets"] == targets + assert data["datasource"] == data_source + assert data["title"] == title + assert data["fieldConfig"]["overrides"] == overrides + + +def test_news(): + title = "dummy title" + feedUrl = "www.example.com" + news = G.News(title=title, feedUrl=feedUrl) + data = news.to_json_data() + assert data["options"]["feedUrl"] == feedUrl + assert data["title"] == title + + +def test_pieChartv2(): + data_source = "dummy data source" + targets = ["dummy_prom_query"] + title = "dummy title" + pie = G.PieChartv2(data_source, targets, title) + data = pie.to_json_data() + assert data["targets"] == targets + assert data["datasource"] == data_source + assert data["title"] == title + + +def test_histogram(): + data_source = "dummy data source" + targets = ["dummy_prom_query"] + title = "dummy title" + panel = G.Histogram(data_source, targets, title) + data = panel.to_json_data() + assert data["targets"] == targets + assert data["datasource"] == data_source + assert data["title"] == title + assert 'bucketSize' not in data['options'] + + bucketSize = 5 + panel = G.Histogram(data_source, targets, title, bucketSize=bucketSize) + data = panel.to_json_data() + assert data['options']['bucketSize'] == bucketSize + + +def test_ae3e_plotly(): + data_source = "dummy data source" + targets = ["dummy_prom_query"] + title = "dummy title" + panel = G.Ae3ePlotly(data_source, targets, title) + data = panel.to_json_data() + assert data["targets"] == targets + assert data["datasource"] == data_source + assert data["title"] == title + assert bool(data["options"]["configuration"]) is False + assert bool(data["options"]["layout"]) is False + + config = { + "displayModeBar": False + } + layout = { + "font": { + "color": "darkgrey" }, - id='custom', + } + panel = G.Ae3ePlotly(data_source, targets, title, configuration=config, layout=layout) + data = panel.to_json_data() + assert data["options"]["configuration"] == config + assert data["options"]["layout"] == layout + + +def test_barchart(): + data_source = "dummy data source" + targets = ["dummy_prom_query"] + title = "dummy title" + panel = G.BarChart(data_source, targets, title) + data = panel.to_json_data() + assert data["targets"] == targets + assert data["datasource"] == data_source + assert data["title"] == title + assert data["options"] is not None + assert data["fieldConfig"] is not None + assert data["options"]["orientation"] == 'auto' + assert data["fieldConfig"]["defaults"]["color"]["mode"] == 'palette-classic' + + panel = G.BarChart(data_source, targets, title, orientation='horizontal', axisCenteredZero=True, showLegend=False) + data = panel.to_json_data() + assert data["options"]["orientation"] == 'horizontal' + assert data["fieldConfig"]["defaults"]["custom"]["axisCenteredZero"] + assert not data["options"]["legend"]["showLegend"] + + +def test_target_invalid(): + with pytest.raises(ValueError, match=r"target should have non-empty 'refId' attribute"): + return G.AlertCondition( + target=G.Target(), + evaluator=G.Evaluator( + type=G.EVAL_GT, + params=42), + timeRange=G.TimeRange( + from_time='5m', + to_time='now' + ), + operator=G.OP_AND, + reducerType=G.RTYPE_AVG, + ) + + +def test_sql_target(): + t = G.Table( + dataSource="some data source", + targets=[ + G.SqlTarget(rawSql="SELECT * FROM example"), + ], + title="table title", ) -] + assert t.to_json_data()["targets"][0].rawQuery is True + assert t.to_json_data()["targets"][0].rawSql == "SELECT * FROM example" + +def test_sql_target_with_source_files(): + t = G.Table( + dataSource="some data source", + targets=[ + G.SqlTarget(srcFilePath="grafanalib/tests/examples/sqltarget_example_files/example.sql"), + ], + title="table title", + ) + assert t.to_json_data()["targets"][0].rawQuery is True + assert t.to_json_data()["targets"][0].rawSql == "SELECT example, count(id)\nFROM test\nGROUP BY example;\n" + print(t.to_json_data()["targets"][0]) -@pytest.mark.parametrize("attrs,expected", CW_TESTDATA) -def test_cloud_watch_target_json_data(attrs, expected): - assert G.CloudWatchTarget(**attrs).to_json_data() == expected + t = G.Table( + dataSource="some data source", + targets=[ + G.SqlTarget(srcFilePath="grafanalib/tests/examples/sqltarget_example_files/example_with_params.sql", sqlParams={ + "example": "example", + "starting_date": "1970-01-01", + "ending_date": "1971-01-01", + },), + ], + title="table title", + ) + assert t.to_json_data()["targets"][0].rawQuery is True + assert t.to_json_data()["targets"][0].rawSql == "SELECT example\nFROM test\nWHERE example='example' AND example_date BETWEEN '1970-01-01' AND '1971-01-01';\n" + print(t.to_json_data()["targets"][0]) diff --git a/grafanalib/tests/test_elasticsearch.py b/grafanalib/tests/test_elasticsearch.py new file mode 100644 index 00000000..61361c5f --- /dev/null +++ b/grafanalib/tests/test_elasticsearch.py @@ -0,0 +1,41 @@ +"""Tests for elasticsearch.""" + +import grafanalib.elasticsearch as E +import pytest + + +def test_rate_metric_agg(): + t = E.RateMetricAgg() + json_data = t.to_json_data() + + assert json_data["id"] == "0" + assert json_data["hide"] is False + assert json_data["field"] == "" + assert len(json_data["settings"]) == 0 + assert json_data["type"] == "rate" + assert len(json_data) == 5 + + t = E.RateMetricAgg( + field="some-field", + hide=True, + id=2, + unit="minute", + mode="sum", + script="some script" + ) + json_data = t.to_json_data() + + assert json_data["id"] == "2" + assert json_data["hide"] is True + assert json_data["field"] == "some-field" + assert len(json_data["settings"]) == 3 + assert json_data["settings"]["unit"] == "minute" + assert json_data["settings"]["mode"] == "sum" + assert json_data["settings"]["script"] == "some script" + assert json_data["type"] == "rate" + assert len(json_data) == 5 + + with pytest.raises(ValueError): + t = E.RateMetricAgg( + mode="invalid mode" + ) diff --git a/grafanalib/tests/test_examples.py b/grafanalib/tests/test_examples.py new file mode 100644 index 00000000..87d2bb27 --- /dev/null +++ b/grafanalib/tests/test_examples.py @@ -0,0 +1,46 @@ +'''Run examples.''' + +from contextlib import redirect_stdout +import glob +import io +import os + +from grafanalib import _gen + + +def test_examples(): + '''Run examples in ./examples directory.''' + + # Run dashboard examples + examples_dir = os.path.join(os.path.dirname(__file__), 'examples') + dashboards = glob.glob('{}/*.dashboard.py'.format(examples_dir)) + assert len(dashboards) == 2 + + stdout = io.StringIO() + for example in dashboards: + with redirect_stdout(stdout): + ret = _gen.generate_dashboard([example]) + assert ret == 0 + assert stdout.getvalue() != '' + + # Run alertgroup example + alerts = glob.glob('{}/*.alertgroup.py'.format(examples_dir)) + assert len(alerts) == 2 + + stdout = io.StringIO() + for example in alerts: + with redirect_stdout(stdout): + ret = _gen.generate_alertgroup([example]) + assert ret == 0 + assert stdout.getvalue() != '' + + # Run file based provisioning of alerts example + alerts = glob.glob('{}/*.alertfilebasedprovisioning.py'.format(examples_dir)) + assert len(alerts) == 1 + + stdout = io.StringIO() + for example in alerts: + with redirect_stdout(stdout): + ret = _gen.generate_alertgroup([example]) + assert ret == 0 + assert stdout.getvalue() != '' diff --git a/grafanalib/tests/test_grafanalib.py b/grafanalib/tests/test_grafanalib.py index b33b81bf..a8a06cdb 100644 --- a/grafanalib/tests/test_grafanalib.py +++ b/grafanalib/tests/test_grafanalib.py @@ -25,10 +25,10 @@ def test_serialization(): ), ], id=1, - yAxes=[ + yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), - ], + ), ) stream = StringIO() _gen.write_dashboard(graph, stream) @@ -56,20 +56,45 @@ def test_auto_id(): hide=True ), ], - yAxes=[ + yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), G.YAxis(format=G.SHORT_FORMAT), - ], + ), ) ]), ], ).auto_panel_ids() assert dashboard.rows[0].panels[0].id == 1 + dashboard = G.Dashboard( + title="Test dashboard", + panels=[ + G.RowPanel(gridPos=G.GridPos(h=1, w=24, x=0, y=8)), + G.Graph( + title="CPU Usage by Namespace (rate[5m])", + dataSource="My data source", + targets=[ + G.Target( + expr='whatever', + legendFormat='{{namespace}}', + refId='A', + ), + ], + yAxes=G.YAxes( + G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), + G.YAxis(format=G.SHORT_FORMAT), + ), + gridPos=G.GridPos(h=1, w=24, x=0, y=8) + ) + ], + ).auto_panel_ids() + assert dashboard.panels[0].id == 1 + -def test_auto_refids(): +def test_auto_refids_preserves_provided_ids(): """ - auto_ref_ids() provides refIds for all targets without refIds already set. + auto_ref_ids() provides refIds for all targets without refIds already + set. """ dashboard = G.Dashboard( title="Test dashboard", @@ -77,7 +102,6 @@ def test_auto_refids(): G.Row(panels=[ G.Graph( title="CPU Usage by Namespace (rate[5m])", - dataSource="My data source", targets=[ G.Target( expr='whatever #Q', @@ -87,16 +111,11 @@ def test_auto_refids(): expr='hidden whatever', legendFormat='{{namespace}}', refId='Q', - hide=True ), G.Target( expr='another target' ), ], - yAxes=[ - G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), - G.YAxis(format=G.SHORT_FORMAT), - ], ).auto_ref_ids() ]), ], @@ -105,11 +124,67 @@ def test_auto_refids(): assert dashboard.rows[0].panels[0].targets[1].refId == 'Q' assert dashboard.rows[0].panels[0].targets[2].refId == 'B' + dashboard = G.Dashboard( + title="Test dashboard", + panels=[ + G.Graph( + title="CPU Usage by Namespace (rate[5m])", + dataSource="My data source", + targets=[ + G.Target( + expr='whatever #Q', + legendFormat='{{namespace}}', + ), + G.Target( + expr='hidden whatever', + legendFormat='{{namespace}}', + refId='Q', + ), + G.Target( + expr='another target' + ), + ], + yAxes=G.YAxes( + G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), + G.YAxis(format=G.SHORT_FORMAT), + ), + gridPos=G.GridPos(h=1, w=24, x=0, y=8) + ).auto_ref_ids() + ], + ).auto_panel_ids() + assert dashboard.panels[0].targets[0].refId == 'A' + assert dashboard.panels[0].targets[1].refId == 'Q' + assert dashboard.panels[0].targets[2].refId == 'B' + + +def test_auto_refids(): + """ + auto_ref_ids() provides refIds for all targets without refIds already + set. + """ + dashboard = G.Dashboard( + title="Test dashboard", + rows=[ + G.Row(panels=[ + G.Graph( + title="CPU Usage by Namespace (rate[5m])", + targets=[G.Target(expr="metric %d" % i) + for i in range(53)], + ).auto_ref_ids() + ]), + ], + ) + assert dashboard.rows[0].panels[0].targets[0].refId == 'A' + assert dashboard.rows[0].panels[0].targets[25].refId == 'Z' + assert dashboard.rows[0].panels[0].targets[26].refId == 'AA' + assert dashboard.rows[0].panels[0].targets[51].refId == 'AZ' + assert dashboard.rows[0].panels[0].targets[52].refId == 'BA' + def test_row_show_title(): row = G.Row().to_json_data() - assert row['title'] == 'New row' - assert not row['showTitle'] + assert row['title'] == '' + assert row['showTitle'] row = G.Row(title='My title').to_json_data() assert row['title'] == 'My title' @@ -118,3 +193,24 @@ def test_row_show_title(): row = G.Row(title='My title', showTitle=False).to_json_data() assert row['title'] == 'My title' assert not row['showTitle'] + + +def test_row_panel_show_title(): + row = G.RowPanel().to_json_data() + assert row['title'] == '' + assert row['panels'] == [] + + row = G.RowPanel(title='My title').to_json_data() + assert row['title'] == 'My title' + + row = G.RowPanel(title='My title', panels=['a', 'b']).to_json_data() + assert row['title'] == 'My title' + assert row['panels'][0] == 'a' + + +def test_row_panel_collapsed(): + row = G.RowPanel().to_json_data() + assert row['collapsed'] is False + + row = G.RowPanel(collapsed=True).to_json_data() + assert row['collapsed'] is True diff --git a/grafanalib/tests/test_humio.py b/grafanalib/tests/test_humio.py new file mode 100644 index 00000000..2551cfb8 --- /dev/null +++ b/grafanalib/tests/test_humio.py @@ -0,0 +1,25 @@ +"""Tests for Humio Datasource""" + +import grafanalib.core as G +import grafanalib.humio as H +from grafanalib import _gen +from io import StringIO + + +def test_serialization_humio_metrics_target(): + """Serializing a graph doesn't explode.""" + graph = G.Graph( + title="Humio Logs", + dataSource="Humio data source", + targets=[ + H.HumioTarget(), + ], + id=1, + yAxes=G.YAxes( + G.YAxis(format=G.SHORT_FORMAT, label="ms"), + G.YAxis(format=G.SHORT_FORMAT), + ), + ) + stream = StringIO() + _gen.write_dashboard(graph, stream) + assert stream.getvalue() != '' diff --git a/grafanalib/tests/test_opentsdb.py b/grafanalib/tests/test_opentsdb.py index f7644a61..7a1db2a6 100644 --- a/grafanalib/tests/test_opentsdb.py +++ b/grafanalib/tests/test_opentsdb.py @@ -29,10 +29,10 @@ def test_serialization_opentsdb_target(): ]), ], id=1, - yAxes=[ + yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), - ], + ), ) stream = StringIO() _gen.write_dashboard(graph, stream) diff --git a/grafanalib/tests/test_validators.py b/grafanalib/tests/test_validators.py index af4f4049..e3bf144e 100644 --- a/grafanalib/tests/test_validators.py +++ b/grafanalib/tests/test_validators.py @@ -10,9 +10,12 @@ def create_attribute(): default=None, validator=None, repr=True, - cmp=True, + cmp=None, + eq=True, + order=False, hash=True, - init=True) + init=True, + inherited=False) def test_is_in(): diff --git a/grafanalib/tests/test_zabbix.py b/grafanalib/tests/test_zabbix.py index fb7ebeeb..5874bd87 100644 --- a/grafanalib/tests/test_zabbix.py +++ b/grafanalib/tests/test_zabbix.py @@ -27,10 +27,10 @@ def test_serialization_zabbix_target(): ]), ], id=1, - yAxes=[ + yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), - ], + ), ) stream = StringIO() _gen.write_dashboard(graph, stream) diff --git a/grafanalib/validators.py b/grafanalib/validators.py index 16584f35..e7c69a2c 100644 --- a/grafanalib/validators.py +++ b/grafanalib/validators.py @@ -47,7 +47,7 @@ def is_color_code(instance, attribute, value): Value considered as valid color code if it starts with # char followed by hexadecimal. """ - err = "{attr} should be a valid color code".format(attr=attribute.name) + err = "{attr} should be a valid color code (e.g. #37872D)".format(attr=attribute.name) if not value.startswith("#"): raise ValueError(err) if len(value) != 7: diff --git a/grafanalib/weave.py b/grafanalib/weave.py index 5a01ad38..8ef4c9dc 100644 --- a/grafanalib/weave.py +++ b/grafanalib/weave.py @@ -10,20 +10,20 @@ from grafanalib import prometheus -YELLOW = "#EAB839" -GREEN = "#7EB26D" -BLUE = "#6ED0E0" -ORANGE = "#EF843C" -RED = "#E24D42" +YELLOW = '#EAB839' +GREEN = '#7EB26D' +BLUE = '#6ED0E0' +ORANGE = '#EF843C' +RED = '#E24D42' ALIAS_COLORS = { - "1xx": YELLOW, - "2xx": GREEN, - "3xx": BLUE, - "4xx": ORANGE, - "5xx": RED, - "success": GREEN, - "error": RED, + '1xx': YELLOW, + '2xx': GREEN, + '3xx': BLUE, + '4xx': ORANGE, + '5xx': RED, + 'success': GREEN, + 'error': RED, } @@ -46,23 +46,24 @@ def QPSGraph(data_source, title, expressions, **kwargs): title=title, expressions=exprs, aliasColors=ALIAS_COLORS, - yAxes=[ + yAxes=G.YAxes( G.YAxis(format=G.OPS_FORMAT), G.YAxis(format=G.SHORT_FORMAT), - ], + ), **kwargs )) def stacked(graph): """Turn a graph into a stacked graph.""" - return attr.assoc( + return attr.evolve( graph, lineWidth=0, nullPointMode=G.NULL_AS_ZERO, stack=True, fill=10, tooltip=G.Tooltip( + sort=G.SORT_DESC, valueType=G.INDIVIDUAL, ), ) diff --git a/grafanalib/zabbix.py b/grafanalib/zabbix.py index 3fb9038c..4aea0494 100644 --- a/grafanalib/zabbix.py +++ b/grafanalib/zabbix.py @@ -7,65 +7,65 @@ RGBA, Percent, Pixels, DashboardLink, DEFAULT_ROW_HEIGHT, BLANK, GREEN) -ZABBIX_TRIGGERS_TYPE = "alexanderzobnin-zabbix-triggers-panel" +ZABBIX_TRIGGERS_TYPE = 'alexanderzobnin-zabbix-triggers-panel' ZABBIX_QMODE_METRICS = 0 ZABBIX_QMODE_SERVICES = 1 ZABBIX_QMODE_TEXT = 2 ZABBIX_SLA_PROP_STATUS = { - "name": "Status", - "property": "status"} + 'name': 'Status', + 'property': 'status'} ZABBIX_SLA_PROP_SLA = { - "name": "SLA", - "property": "sla"} + 'name': 'SLA', + 'property': 'sla'} ZABBIX_SLA_PROP_OKTIME = { - "name": "OK time", - "property": "okTime"} + 'name': 'OK time', + 'property': 'okTime'} ZABBIX_SLA_PROP_PROBTIME = { - "name": "Problem time", - "property": "problemTime"} + 'name': 'Problem time', + 'property': 'problemTime'} ZABBIX_SLA_PROP_DOWNTIME = { - "name": "Down time", - "property": "downtimeTime", + 'name': 'Down time', + 'property': 'downtimeTime', } ZABBIX_EVENT_PROBLEMS = { - "text": "Problems", - "value": [1]} + 'text': 'Problems', + 'value': [1]} ZABBIX_EVENT_OK = { - "text": "OK", - "value": [0]} + 'text': 'OK', + 'value': [0]} ZABBIX_EVENT_ALL = { - "text": "All", - "value": [0, 1]} + 'text': 'All', + 'value': [0, 1]} -ZABBIX_TRIGGERS_SHOW_ALL = "all triggers" -ZABBIX_TRIGGERS_SHOW_ACK = "acknowledged" -ZABBIX_TRIGGERS_SHOW_NACK = "unacknowledged" +ZABBIX_TRIGGERS_SHOW_ALL = 'all triggers' +ZABBIX_TRIGGERS_SHOW_ACK = 'acknowledged' +ZABBIX_TRIGGERS_SHOW_NACK = 'unacknowledged' ZABBIX_SORT_TRIGGERS_BY_CHANGE = { - "text": "last change", - "value": "lastchange", + 'text': 'last change', + 'value': 'lastchange', } ZABBIX_SORT_TRIGGERS_BY_SEVERITY = { - "text": "severity", - "value": "priority", + 'text': 'severity', + 'value': 'priority', } ZABBIX_SEVERITY_COLORS = ( - ("#B7DBAB", "Not classified"), - ("#82B5D8", "Information"), - ("#E5AC0E", "Warning"), - ("#C15C17", "Average"), - ("#BF1B00", "High"), - ("#890F02", "Disaster"), + ('#B7DBAB', 'Not classified'), + ('#82B5D8', 'Information'), + ('#E5AC0E', 'Warning'), + ('#C15C17', 'Average'), + ('#BF1B00', 'High'), + ('#890F02', 'Disaster'), ) @@ -81,7 +81,7 @@ class ZabbixTargetOptions(object): def to_json_data(self): return { - "showDisabledItems": self.showDisabledItems + 'showDisabledItems': self.showDisabledItems } @@ -91,7 +91,7 @@ class ZabbixTargetField(object): def to_json_data(self): return { - "filter": self.filter + 'filter': self.filter } @@ -103,7 +103,7 @@ class ZabbixTarget(object): to visualize monitoring data from Zabbix and create dashboards for analyzing metrics and realtime monitoring. - Grafana docs on using Zabbix pluging: http://docs.grafana-zabbix.org/ + Grafana docs on using Zabbix plugin: https://alexanderzobnin.github.io/grafana-zabbix/ :param application: zabbix application name :param expr: zabbix arbitary query @@ -147,23 +147,23 @@ class ZabbixTarget(object): def to_json_data(self): obj = { - "application": ZabbixTargetField(self.application), - "expr": self.expr, - "functions": self.functions, - "group": ZabbixTargetField(self.group), - "host": ZabbixTargetField(self.host), - "intervalFactor": self.intervalFactor, - "item": ZabbixTargetField(self.item), - "mode": self.mode, - "options": self.options, - "refId": self.refId, + 'application': ZabbixTargetField(self.application), + 'expr': self.expr, + 'functions': self.functions, + 'group': ZabbixTargetField(self.group), + 'host': ZabbixTargetField(self.host), + 'intervalFactor': self.intervalFactor, + 'item': ZabbixTargetField(self.item), + 'mode': self.mode, + 'options': self.options, + 'refId': self.refId, } if self.mode == ZABBIX_QMODE_SERVICES: - obj["slaProperty"] = self.slaProperty, - obj["itservice"] = {"name": self.itService} + obj['slaProperty'] = self.slaProperty, + obj['itservice'] = {'name': self.itService} if self.mode == ZABBIX_QMODE_TEXT: - obj["textFilter"] = self.textFilter - obj["useCaptureGroups"] = self.useCaptureGroups + obj['textFilter'] = self.textFilter + obj['useCaptureGroups'] = self.useCaptureGroups return obj @@ -172,22 +172,22 @@ class ZabbixDeltaFunction(object): """ZabbixDeltaFunction Convert absolute values to delta, for example, bits to bits/sec - http://docs.grafana-zabbix.org/reference/functions/#delta + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#delta """ added = attr.ib(default=False, validator=instance_of(bool)) def to_json_data(self): text = "delta()" definition = { - "category": "Transform", - "name": "delta", - "defaultParams": [], - "params": []} + 'category': 'Transform', + 'name': 'delta', + 'defaultParams': [], + 'params': []} return { - "added": self.added, - "text": text, - "def": definition, - "params": [], + 'added': self.added, + 'text': text, + 'def': definition, + 'params': [], } @@ -197,12 +197,12 @@ class ZabbixGroupByFunction(object): Takes each timeseries and consolidate its points falled in given interval into one point using function, which can be one of: avg, min, max, median. - http://docs.grafana-zabbix.org/reference/functions/#groupBy + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions//#groupBy """ - _options = ("avg", "min", "max", "median") - _default_interval = "1m" - _default_function = "avg" + _options = ('avg', 'min', 'max', 'median') + _default_interval = '1m' + _default_function = 'avg' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval, validator=is_interval) @@ -212,24 +212,24 @@ class ZabbixGroupByFunction(object): def to_json_data(self): text = "groupBy({interval}, {function})" definition = { - "category": "Transform", - "name": "groupBy", - "defaultParams": [ + 'category': 'Transform', + 'name': 'groupBy', + 'defaultParams': [ self._default_interval, self._default_function, ], - "params": [ - {"name": "interval", - "type": "string"}, - {"name": "function", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'interval', + 'type': 'string'}, + {'name': 'function', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval, function=self.function), - "params": [self.interval, self.function], - "added": self.added, + 'params': [self.interval, self.function], + 'added': self.added, } @@ -238,7 +238,7 @@ class ZabbixScaleFunction(object): """ZabbixScaleFunction Takes timeseries and multiplies each point by the given factor. - http://docs.grafana-zabbix.org/reference/functions/#scale + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions//#scale """ _default_factor = 100 @@ -249,19 +249,19 @@ class ZabbixScaleFunction(object): def to_json_data(self): text = "scale({factor})" definition = { - "category": "Transform", - "name": "scale", - "defaultParams": [self._default_factor], - "params": [ - {"name": "factor", - "options": [100, 0.01, 10, -1], - "type": "float"}] + 'category': 'Transform', + 'name': 'scale', + 'defaultParams': [self._default_factor], + 'params': [ + {'name': 'factor', + 'options': [100, 0.01, 10, -1], + 'type': 'float'}] } return { - "def": definition, - "text": text.format(factor=self.factor), - "params": [self.factor], - "added": self.added, + 'def': definition, + 'text': text.format(factor=self.factor), + 'params': [self.factor], + 'added': self.added, } @@ -271,13 +271,13 @@ class ZabbixAggregateByFunction(object): Takes all timeseries and consolidate all its points falled in given interval into one point using function, which can be one of: - avg, min, max, median. - http://docs.grafana-zabbix.org/reference/functions/#aggregateBy + avg, min, max, median. + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#aggregateBy """ - _options = ("avg", "min", "max", "median") - _default_interval = "1m" - _default_function = "avg" + _options = ('avg', 'min', 'max', 'median') + _default_interval = '1m' + _default_function = 'avg' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval, validator=is_interval) @@ -287,24 +287,24 @@ class ZabbixAggregateByFunction(object): def to_json_data(self): text = "aggregateBy({interval}, {function})" definition = { - "category": "Aggregate", - "name": "aggregateBy", - "defaultParams": [ + 'category': 'Aggregate', + 'name': 'aggregateBy', + 'defaultParams': [ self._default_interval, self._default_function, ], - "params": [ - {"name": "interval", - "type": "string"}, - {"name": "function", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'interval', + 'type': 'string'}, + {'name': 'function', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval, function=self.function), - "params": [self.interval, self.function], - "added": self.added, + 'params': [self.interval, self.function], + 'added': self.added, } @@ -313,10 +313,10 @@ class ZabbixAverageFunction(object): """ZabbixAverageFunction Deprecated, use aggregateBy(interval, avg) instead. - http://docs.grafana-zabbix.org/reference/functions/#average + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#average """ - _default_interval = "1m" + _default_interval = '1m' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval, validator=is_interval) @@ -324,21 +324,21 @@ class ZabbixAverageFunction(object): def to_json_data(self): text = "average({interval})" definition = { - "category": "Aggregate", + 'category': "Aggregate", "name": "average", "defaultParams": [ self._default_interval, ], - "params": [ - {"name": "interval", - "type": "string"}] + 'params': [ + {'name': 'interval', + 'type': 'string'}] } return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval), - "params": [self.interval], - "added": self.added, + 'params': [self.interval], + 'added': self.added, } @@ -347,10 +347,10 @@ class ZabbixMaxFunction(object): """ZabbixMaxFunction Deprecated, use aggregateBy(interval, max) instead. - http://docs.grafana-zabbix.org/reference/functions/#max + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#max """ - _default_interval = "1m" + _default_interval = '1m' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval, validator=is_interval) @@ -358,21 +358,21 @@ class ZabbixMaxFunction(object): def to_json_data(self): text = "max({interval})" definition = { - "category": "Aggregate", - "name": "max", - "defaultParams": [ + 'category': 'Aggregate', + 'name': 'max', + 'defaultParams': [ self._default_interval, ], - "params": [ - {"name": "interval", - "type": "string"}] + 'params': [ + {'name': 'interval', + 'type': 'string'}] } return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval), - "params": [self.interval], - "added": self.added, + 'params': [self.interval], + 'added': self.added, } @@ -381,32 +381,32 @@ class ZabbixMedianFunction(object): """ZabbixMedianFunction Deprecated, use aggregateBy(interval, median) instead. - http://docs.grafana-zabbix.org/reference/functions/#median + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#median """ - _default_interval = "1m" + _default_interval = '1m' added = attr.ib(default=False, validator=instance_of(bool)) - interval = attr.ib(default="1m", validator=is_interval) + interval = attr.ib(default='1m', validator=is_interval) def to_json_data(self): text = "median({interval})" definition = { - "category": "Aggregate", - "name": "median", - "defaultParams": [ + 'category': 'Aggregate', + 'name': 'median', + 'defaultParams': [ self._default_interval, ], - "params": [ - {"name": "interval", - "type": "string"}] + 'params': [ + {'name': 'interval', + 'type': 'string'}] } return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval), - "params": [self.interval], - "added": self.added, + 'params': [self.interval], + 'added': self.added, } @@ -415,10 +415,10 @@ class ZabbixMinFunction(object): """ZabbixMinFunction Deprecated, use aggregateBy(interval, min) instead. - http://docs.grafana-zabbix.org/reference/functions/#min + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#min """ - _default_interval = "1m" + _default_interval = '1m' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval, validator=is_interval) @@ -426,21 +426,21 @@ class ZabbixMinFunction(object): def to_json_data(self): text = "min({interval})" definition = { - "category": "Aggregate", - "name": "min", - "defaultParams": [ + 'category': 'Aggregate', + 'name': 'min', + 'defaultParams': [ self._default_interval, ], - "params": [ - {"name": "interval", - "type": "string"}] + 'params': [ + {'name': 'interval', + 'type': 'string'}] } return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval), - "params": [self.interval], - "added": self.added, + 'params': [self.interval], + 'added': self.added, } @@ -452,31 +452,31 @@ class ZabbixSumSeriesFunction(object): This method required interpolation of each timeseries so it may cause high CPU load. Try to combine it with groupBy() function to reduce load. - http://docs.grafana-zabbix.org/reference/functions/#sumSeries + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#sumSeries """ added = attr.ib(default=False) def to_json_data(self): text = "sumSeries()" definition = { - "category": "Aggregate", - "name": "sumSeries", - "defaultParams": [], - "params": []} + 'category': 'Aggregate', + 'name': 'sumSeries', + 'defaultParams': [], + 'params': []} return { - "added": self.added, - "text": text, - "def": definition, - "params": [], + 'added': self.added, + 'text': text, + 'def': definition, + 'params': [], } @attr.s class ZabbixBottomFunction(object): - _options = ("avg", "min", "max", "median") + _options = ('avg', 'min', 'max', 'median') _default_number = 5 - _default_function = "avg" + _default_function = 'avg' added = attr.ib(default=False, validator=instance_of(bool)) number = attr.ib(default=_default_number, validator=instance_of(int)) @@ -486,33 +486,33 @@ class ZabbixBottomFunction(object): def to_json_data(self): text = "bottom({number}, {function})" definition = { - "category": "Filter", - "name": "bottom", - "defaultParams": [ + 'category': 'Filter', + 'name': 'bottom', + 'defaultParams': [ self._default_number, self._default_function, ], - "params": [ - {"name": "number", - "type": "string"}, - {"name": "function", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'number', + 'type': 'string'}, + {'name': 'function', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( number=self.number, function=self.function), - "params": [self.number, self.function], - "added": self.added, + 'params': [self.number, self.function], + 'added': self.added, } @attr.s class ZabbixTopFunction(object): - _options = ("avg", "min", "max", "median") + _options = ('avg', 'min', 'max', 'median') _default_number = 5 - _default_function = "avg" + _default_function = 'avg' added = attr.ib(default=False, validator=instance_of(bool)) number = attr.ib(default=_default_number, validator=instance_of(int)) @@ -522,24 +522,24 @@ class ZabbixTopFunction(object): def to_json_data(self): text = "top({number}, {function})" definition = { - "category": "Filter", - "name": "top", - "defaultParams": [ + 'category': 'Filter', + 'name': 'top', + 'defaultParams': [ self._default_number, self._default_function, ], - "params": [ - {"name": "number", - "type": "string"}, - {"name": "function", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'number', + 'type': 'string'}, + {'name': 'function', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( number=self.number, function=self.function), - "params": [self.number, self.function], - "added": self.added, + 'params': [self.number, self.function], + 'added': self.added, } @@ -549,11 +549,11 @@ class ZabbixTrendValueFunction(object): Specifying type of trend value returned by Zabbix when trends are used (avg, min or max). - http://docs.grafana-zabbix.org/reference/functions/#trendValue + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#trendValue """ _options = ('avg', 'min', 'max') - _default_type = "avg" + _default_type = 'avg' added = attr.ib(default=False, validator=instance_of(bool)) type = attr.ib(default=_default_type, validator=is_in(_options)) @@ -561,21 +561,21 @@ class ZabbixTrendValueFunction(object): def to_json_data(self): text = "trendValue({type})" definition = { - "category": "Trends", - "name": "trendValue", - "defaultParams": [ + 'category': 'Trends', + 'name': 'trendValue', + 'defaultParams': [ self._default_type, ], - "params": [ - {"name": "type", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'type', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( type=self.type), - "params": [self.type], - "added": self.added, + 'params': [self.type], + 'added': self.added, } @@ -587,11 +587,11 @@ class ZabbixTimeShiftFunction(object): If no sign is given, a minus sign ( - ) is implied which will shift the metric back in time. If a plus sign ( + ) is given, the metric will be shifted forward in time. - http://docs.grafana-zabbix.org/reference/functions/#timeShift + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#timeShift """ - _options = ("24h", "7d", "1M", "+24h", "-24h") - _default_interval = "24h" + _options = ('24h', '7d', '1M', '+24h', '-24h') + _default_interval = '24h' added = attr.ib(default=False, validator=instance_of(bool)) interval = attr.ib(default=_default_interval) @@ -599,21 +599,21 @@ class ZabbixTimeShiftFunction(object): def to_json_data(self): text = "timeShift({interval})" definition = { - "category": "Time", - "name": "timeShift", - "defaultParams": [ + 'category': 'Time', + 'name': 'timeShift', + 'defaultParams': [ self._default_interval, ], - "params": [ - {"name": "interval", - "options": self._options, - "type": "string"}]} + 'params': [ + {'name': 'interval', + 'options': self._options, + 'type': 'string'}]} return { - "def": definition, - "text": text.format( + 'def': definition, + 'text': text.format( interval=self.interval), - "params": [self.interval], - "added": self.added, + 'params': [self.interval], + 'added': self.added, } @@ -622,7 +622,7 @@ class ZabbixSetAliasFunction(object): """ZabbixSetAliasFunction Returns given alias instead of the metric name. - http://docs.grafana-zabbix.org/reference/functions/#setAlias + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#setAlias """ alias = attr.ib(validator=instance_of(str)) added = attr.ib(default=False, validator=instance_of(bool)) @@ -630,17 +630,17 @@ class ZabbixSetAliasFunction(object): def to_json_data(self): text = "setAlias({alias})" definition = { - "category": "Alias", - "name": "setAlias", - "defaultParams": [], - "params": [ - {"name": "alias", - "type": "string"}]} + 'category': 'Alias', + 'name': 'setAlias', + 'defaultParams': [], + 'params': [ + {'name': 'alias', + 'type': 'string'}]} return { - "def": definition, - "text": text.format(alias=self.alias), - "params": [self.alias], - "added": self.added, + 'def': definition, + 'text': text.format(alias=self.alias), + 'params': [self.alias], + 'added': self.added, } @@ -649,7 +649,7 @@ class ZabbixSetAliasByRegexFunction(object): """ZabbixSetAliasByRegexFunction Returns part of the metric name matched by regex. - http://docs.grafana-zabbix.org/reference/functions/#setAliasByRegex + https://alexanderzobnin.github.io/grafana-zabbix/reference/functions/#setAliasByRegex """ regexp = attr.ib(validator=instance_of(str)) @@ -658,17 +658,17 @@ class ZabbixSetAliasByRegexFunction(object): def to_json_data(self): text = "setAliasByRegex({regexp})" definition = { - "category": "Alias", - "name": "setAliasByRegex", - "defaultParams": [], - "params": [ - {"name": "aliasByRegex", - "type": "string"}]} + 'category': 'Alias', + 'name': 'setAliasByRegex', + 'defaultParams': [], + 'params': [ + {'name': 'aliasByRegex', + 'type': 'string'}]} return { - "def": definition, - "text": text.format(regexp=self.regexp), - "params": [self.regexp], - "added": self.added, + 'def': definition, + 'text': text.format(regexp=self.regexp), + 'params': [self.regexp], + 'added': self.added, } @@ -713,10 +713,10 @@ class ZabbixColor(object): def to_json_data(self): return { - "color": self.color, - "priority": self.priority, - "severity": self.severity, - "show": self.show, + 'color': self.color, + 'priority': self.priority, + 'severity': self.severity, + 'show': self.show, } @@ -730,10 +730,10 @@ class ZabbixTrigger(object): def to_json_data(self): return { - "application": ZabbixTargetField(self.application), - "group": ZabbixTargetField(self.group), - "host": ZabbixTargetField(self.host), - "trigger": ZabbixTargetField(self.trigger), + 'application': ZabbixTargetField(self.application), + 'group': ZabbixTargetField(self.group), + 'host': ZabbixTargetField(self.host), + 'trigger': ZabbixTargetField(self.trigger), } @@ -819,10 +819,16 @@ class ZabbixTriggersPanel(object): span = attr.ib(default=None) statusField = attr.ib(default=False, validator=instance_of(bool)) transparent = attr.ib(default=False, validator=instance_of(bool)) - triggerSeverity = attr.ib( - default=ZABBIX_SEVERITY_COLORS, - converter=convertZabbixSeverityColors, - ) + try: + triggerSeverity = attr.ib( + default=ZABBIX_SEVERITY_COLORS, + converter=convertZabbixSeverityColors, + ) + except TypeError: + triggerSeverity = attr.ib( + default=ZABBIX_SEVERITY_COLORS, + convert=convertZabbixSeverityColors, + ) triggers = attr.ib( default=attr.Factory(ZabbixTrigger), validator=instance_of(ZabbixTrigger), @@ -830,37 +836,37 @@ class ZabbixTriggersPanel(object): def to_json_data(self): return { - "type": ZABBIX_TRIGGERS_TYPE, - "datasource": self.dataSource, - "title": self.title, - "ackEventColor": self.ackEventColor, - "ageField": self.ageField, - "customLastChangeFormat": self.customLastChangeFormat, - "description": self.description, - "fontSize": self.fontSize, - "height": self.height, - "hideHostsInMaintenance": self.hideHostsInMaintenance, - "hostField": self.hostField, - "hostTechNameField": self.hostTechNameField, - "id": self.id, - "infoField": self.infoField, - "lastChangeField": self.lastChangeField, - "lastChangeFormat": self.lastChangeFormat, - "limit": self.limit, - "links": self.links, - "markAckEvents": self.markAckEvents, - "minSpan": self.minSpan, - "okEventColor": self.okEventColor, - "pageSize": self.pageSize, - "repeat": self.repeat, - "scroll": self.scroll, - "severityField": self.severityField, - "showEvents": self.showEvents, - "showTriggers": self.showTriggers, - "sortTriggersBy": self.sortTriggersBy, - "span": self.span, - "statusField": self.statusField, - "transparent": self.transparent, - "triggers": self.triggers, - "triggerSeverity": self.triggerSeverity, + 'type': ZABBIX_TRIGGERS_TYPE, + 'datasource': self.dataSource, + 'title': self.title, + 'ackEventColor': self.ackEventColor, + 'ageField': self.ageField, + 'customLastChangeFormat': self.customLastChangeFormat, + 'description': self.description, + 'fontSize': self.fontSize, + 'height': self.height, + 'hideHostsInMaintenance': self.hideHostsInMaintenance, + 'hostField': self.hostField, + 'hostTechNameField': self.hostTechNameField, + 'id': self.id, + 'infoField': self.infoField, + 'lastChangeField': self.lastChangeField, + 'lastChangeFormat': self.lastChangeFormat, + 'limit': self.limit, + 'links': self.links, + 'markAckEvents': self.markAckEvents, + 'minSpan': self.minSpan, + 'okEventColor': self.okEventColor, + 'pageSize': self.pageSize, + 'repeat': self.repeat, + 'scroll': self.scroll, + 'severityField': self.severityField, + 'showEvents': self.showEvents, + 'showTriggers': self.showTriggers, + 'sortTriggersBy': self.sortTriggersBy, + 'span': self.span, + 'statusField': self.statusField, + 'transparent': self.transparent, + 'triggers': self.triggers, + 'triggerSeverity': self.triggerSeverity, } diff --git a/setup.py b/setup.py index caddcf29..0eb10c4e 100644 --- a/setup.py +++ b/setup.py @@ -14,11 +14,15 @@ def local_file(name): # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see - # https://packaging.python.org/en/latest/single_source_version.html - version='0.6.1', + # https://packaging.python.org/en/latest/guides/single-sourcing-package-version/ + version='0.7.0', description='Library for building Grafana dashboards', long_description=open(README).read(), url='https://github.com/weaveworks/grafanalib', + project_urls={ + "Documentation": "https://grafanalib.readthedocs.io", + "Source": "https://github.com/weaveworks/grafanalib", + }, author='Weaveworks', author_email='help+grafanalib@weave.works', license='Apache', @@ -29,8 +33,11 @@ def local_file(name): 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Topic :: System :: Monitoring', ], install_requires=[ @@ -46,6 +53,8 @@ def local_file(name): 'console_scripts': [ 'generate-dashboard=grafanalib._gen:generate_dashboard_script', 'generate-dashboards=grafanalib._gen:generate_dashboards_script', + 'generate-alertgroup=grafanalib._gen:generate_alertgroup_script', + 'generate-alertgroups=grafanalib._gen:generate_alertgroups_script' ], }, ) diff --git a/tools/.gitignore b/tools/.gitignore deleted file mode 100644 index 308ae9d3..00000000 --- a/tools/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -cover/cover -socks/proxy -socks/image.tar -runner/runner -*.pyc -*~ -terraform.tfstate -terraform.tfstate.backup -*.retry -build/**/.uptodate diff --git a/tools/README.md b/tools/README.md deleted file mode 100644 index 9092b8e2..00000000 --- a/tools/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Weaveworks Build Tools - -Included in this repo are tools shared by weave.git and scope.git. They include - -- ```build```: a set of docker base-images for building weave - projects. These should be used instead of giving each project its - own build image. -- ```provisioning```: a set of Terraform scripts to provision virtual machines in GCP, AWS or Digital Ocean. -- ```config_management```: a set of Ansible playbooks to configure virtual machines for development, testing, etc. -- ```cover```: a tool which merges overlapping coverage reports generated by go - test -- ```files-with-type```: a tool to search directories for files of a given - MIME type -- ```lint```: a script to lint go, sh and hcl files; runs various tools like - golint, go vet, errcheck, shellcheck etc -- ```rebuild-image```: a script to rebuild docker images when their input files - change; useful when you using docker images to build your software, but you - don't want to build the image every time. -- ```shell-lint```: a script to lint multiple shell files with - [shellcheck](http://www.shellcheck.net/) -- ```socks```: a simple, dockerised SOCKS proxy for getting your laptop onto - the Weave network -- ```test```: a script to run all go unit tests in subdirectories, gather the - coverage results, and merge them into a single report. -- ```runner```: a tool for running tests in parallel; given each test is - suffixed with the number of hosts it requires, and the hosts available are - contained in the environment variable HOSTS, the tool will run tests in - parallel, on different hosts. -- ```scheduler```: an appengine application that can be used to distribute - tests across different shards in CircleCI. - -## Requirements - -- ```lint``` requires shfmt to lint sh files; get shfmt with - ```go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt``` - -## Using build-tools.git - -To allow you to tie your code to a specific version of build-tools.git, such -that future changes don't break you, we recommendation that you [`git subtree`]() -this repository into your own repository: - -[`git subtree`]: http://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/ - -``` -git subtree add --prefix tools https://github.com/weaveworks/build-tools.git master --squash -```` - -To update the code in build-tools.git, the process is therefore: -- PR into build-tools.git, go through normal review process etc. -- Do `git subtree pull --prefix tools https://github.com/weaveworks/build-tools.git master --squash` - in your repo, and PR that. diff --git a/tools/build/Makefile b/tools/build/Makefile deleted file mode 100644 index cea049be..00000000 --- a/tools/build/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -.PHONY: all clean images -.DEFAULT_GOAL := all - -# Boiler plate for bulding Docker containers. -# All this must go at top of file I'm afraid. -IMAGE_PREFIX := quay.io/weaveworks/build- -IMAGE_TAG := $(shell ../image-tag) -UPTODATE := .uptodate - -# Every directory with a Dockerfile in it builds an image called -# $(IMAGE_PREFIX). Dependencies (i.e. things that go in the image) -# still need to be explicitly declared. -%/$(UPTODATE): %/Dockerfile %/* - $(SUDO) docker build -t $(IMAGE_PREFIX)$(shell basename $(@D)) $(@D)/ - $(SUDO) docker tag $(IMAGE_PREFIX)$(shell basename $(@D)) $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG) - touch $@ - -# Get a list of directories containing Dockerfiles -DOCKERFILES := $(shell find . -name tools -prune -o -name vendor -prune -o -type f -name 'Dockerfile' -print) -UPTODATE_FILES := $(patsubst %/Dockerfile,%/$(UPTODATE),$(DOCKERFILES)) -DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) -IMAGE_NAMES := $(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)%,$(shell basename $(dir)))) -images: - $(info $(IMAGE_NAMES)) - @echo > /dev/null - -# Define imagetag-golang, etc, for each image, which parses the dockerfile and -# prints an image tag. For example: -# FROM golang:1.8.1-stretch -# in the "foo/Dockerfile" becomes: -# $ make imagetag-foo -# 1.8.1-stretch -define imagetag_dep -.PHONY: imagetag-$(1) -$(patsubst $(IMAGE_PREFIX)%,imagetag-%,$(1)): $(patsubst $(IMAGE_PREFIX)%,%,$(1))/Dockerfile - @cat $$< | grep "^FROM " | head -n1 | sed 's/FROM \(.*\):\(.*\)/\2/' -endef -$(foreach image, $(IMAGE_NAMES), $(eval $(call imagetag_dep, $(image)))) - -all: $(UPTODATE_FILES) - -clean: - $(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true - rm -rf $(UPTODATE_FILES) - - diff --git a/tools/circle.yml b/tools/circle.yml deleted file mode 100644 index 976a68cc..00000000 --- a/tools/circle.yml +++ /dev/null @@ -1,55 +0,0 @@ -machine: - services: - - docker - environment: - GOPATH: /home/ubuntu - SRCDIR: /home/ubuntu/src/github.com/weaveworks/tools - PATH: $PATH:$HOME/bin - -dependencies: - post: - - sudo chmod a+wr --recursive /usr/local/go/pkg - - go clean -i net - - go install -tags netgo std - - mkdir -p $(dirname $SRCDIR) - - cp -r $(pwd)/ $SRCDIR - - | - curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \ - echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \ - chmod +x shfmt && \ - sudo mv shfmt /usr/bin - - | - cd $SRCDIR; - go get \ - github.com/fzipp/gocyclo \ - github.com/golang/lint/golint \ - github.com/kisielk/errcheck \ - github.com/fatih/hclfmt - - pip install yapf==0.16.2 flake8==3.3.0 - -test: - override: - - cd $SRCDIR; ./lint . - - cd $SRCDIR/cover; make - - cd $SRCDIR/socks; make - - cd $SRCDIR/runner; make - - cd $SRCDIR/build; make - -deployment: - snapshot: - branch: master - commands: - - docker login -e "$DOCKER_REGISTRY_EMAIL" -u "$DOCKER_REGISTRY_USER" -p "$DOCKER_REGISTRY_PASS" "$DOCKER_REGISTRY_URL" - - | - cd $SRCDIR/build; - for image in $(make images); do - # Tag the built images with the revision of this repo. - docker push "${image}:${GIT_TAG}" - - # Tag the built images with something derived from the base images in - # their respective Dockerfiles. So "FROM golang:1.8.0-stretch" as a - # base image would lead to a tag of "1.8.0-stretch" - IMG_TAG=$(make "imagetag-${image#quay.io/weaveworks/build-}") - docker tag "${image}:latest" "${image}:${IMG_TAG}" - docker push "${image}:${IMG_TAG}" - done diff --git a/tools/config_management/README.md b/tools/config_management/README.md deleted file mode 100644 index bf1f6f65..00000000 --- a/tools/config_management/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# Weaveworks configuration management - -## Introduction - -This project allows you to configure a machine with: - -* Docker and Weave Net for development: `setup_weave-net_dev.yml` -* Docker and Weave Net for testing: `setup_weave-net_test.yml` -* Docker, Kubernetes and Weave Kube (CNI plugin): `setup_weave-kube.yml` - -You can then use these environments for development, testing and debugging. - -## Set up - -You will need [Python](https://www.python.org/downloads/) and [Ansible 2.+](http://docs.ansible.com/ansible/intro_installation.html) installed on your machine and added to your `PATH` in order to be able to configure environments automatically. - -* On any platform, if you have Python installed: `pip install ansible` -* On macOS: `brew install ansible` -* On Linux (via Aptitude): `sudo apt install ansible` -* On Linux (via YUM): `sudo yum install ansible` -* For other platforms or more details, see [here](http://docs.ansible.com/ansible/intro_installation.html) - -Frequent errors during installation are: - -* `fatal error: Python.h: No such file or directory`: install `python-dev` -* `fatal error: ffi.h: No such file or directory`: install `libffi-dev` -* `fatal error: openssl/opensslv.h: No such file or directory`: install `libssl-dev` - -Full steps for a blank Ubuntu/Debian Linux machine: - - sudo apt-get install -qq -y python-pip python-dev libffi-dev libssl-dev - sudo pip install -U cffi - sudo pip install ansible - -## Tags - -These can be used to selectively run (`--tags "tag1,tag2"`) or skip (`--skip-tags "tag1,tag2"`) tasks. - - * `output`: print potentially useful output from hosts (e.g. output of `kubectl get pods --all-namespaces`) - -## Usage - -### Local machine - -``` -ansible-playbook -u -i "localhost", -c local setup_weave-kube.yml -``` - -### Vagrant - -Provision your local VM using Vagrant: - -``` -cd $(mktemp -d -t XXX) -vagrant init ubuntu/xenial64 # or, e.g. centos/7 -vagrant up -``` - -then set the following environment variables by extracting the output of `vagrant ssh-config`: - -``` -eval $(vagrant ssh-config | sed \ --ne 's/\ *HostName /vagrant_ssh_host=/p' \ --ne 's/\ *User /vagrant_ssh_user=/p' \ --ne 's/\ *Port /vagrant_ssh_port=/p' \ --ne 's/\ *IdentityFile /vagrant_ssh_id_file=/p') -``` - -and finally run: - -``` -ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \ ---ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ --i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml -``` - -or, for specific versions of Kubernetes and Docker: - -``` -ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \ ---ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ --i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml \ ---extra-vars "docker_version=1.12.3 kubernetes_version=1.4.4" -``` - -NOTE: Kubernetes APT repo includes only the latest version, so currently -retrieving an older version will fail. - -### Terraform - -Provision your machine using the Terraform scripts from `../provisioning`, then run: - -``` -terraform output ansible_inventory > /tmp/ansible_inventory -``` - -and - -``` -ansible-playbook \ - --private-key="$(terraform output private_key_path)" \ - -u "$(terraform output username)" \ - -i /tmp/ansible_inventory \ - --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ - ../../config_management/setup_weave-kube.yml - -``` - -To specify versions of Kubernetes and Docker see Vagrant examples above. - -N.B.: `--ssh-extra-args` is used to provide: - -* `StrictHostKeyChecking=no`: as VMs come and go, the same IP can be used by a different machine, so checking the host's SSH key may fail. Note that this introduces a risk of a man-in-the-middle attack. -* `UserKnownHostsFile=/dev/null`: if you previously connected a VM with the same IP but a different public key, and added it to `~/.ssh/known_hosts`, SSH may still fail to connect, hence we use `/dev/null` instead of `~/.ssh/known_hosts`. - - -### Docker installation role - -Various ways to install Docker are provided: - -- `docker-from-docker-ce-repo` -- `docker-from-docker-repo` -- `docker-from-get.docker.com` -- `docker-from-tarball` - -each producing a slightly different outcome, which can be useful for testing various setup scenarios. - -The `docker-install` role selects one of the above ways to install Docker based on the `docker_install_role` variable. -The default value for this variable is configured in `group_vars/all`. -You can however override it with whichever role you would want to run by passing the name of the role as a key-value pair in `extra-vars`, e.g.: - -``` -ansible-playbook .yml \ - --extra-vars "docker_install_role=docker-from-docker-ce-repo" -``` - - -## Resources - -* [https://www.vagrantup.com/docs/provisioning/ansible.html](https://www.vagrantup.com/docs/provisioning/ansible.html) -* [http://docs.ansible.com/ansible/guide_vagrant.html](http://docs.ansible.com/ansible/guide_vagrant.html) diff --git a/tools/config_management/group_vars/all b/tools/config_management/group_vars/all deleted file mode 100644 index d728cce8..00000000 --- a/tools/config_management/group_vars/all +++ /dev/null @@ -1,11 +0,0 @@ ---- -go_version: 1.8.1 -terraform_version: 0.8.5 -docker_version: 17.06 -docker_install_role: 'docker-from-docker-ce-repo' -kubernetes_version: 1.6.1 -kubernetes_cni_version: 0.5.1 -kubernetes_token: '123456.0123456789123456' -etcd_container_version: 2.2.5 -kube_discovery_container_version: 1.0 -pause_container_version: 3.0 diff --git a/tools/config_management/library/setup_ansible_dependencies.yml b/tools/config_management/library/setup_ansible_dependencies.yml deleted file mode 100644 index 50263369..00000000 --- a/tools/config_management/library/setup_ansible_dependencies.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -################################################################################ -# Install Ansible's dependencies: python and lsb_release, required respectively -# to run Ansible modules and gather Ansible facts. -# -# See also: -# - http://docs.ansible.com/ansible/intro_installation.html#managed-node-requirements -# - http://docs.ansible.com/ansible/setup_module.html -################################################################################ - -- name: check if python is installed (as required by ansible modules) - raw: test -e /usr/bin/python - register: is_python_installed - failed_when: is_python_installed.rc not in [0, 1] - changed_when: false # never mutates state. - -- name: install python if missing (as required by ansible modules) - when: is_python_installed|failed # skip otherwise - raw: (test -e /usr/bin/apt-get && apt-get update && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum update && yum install -y python) - changed_when: is_python_installed.rc == 1 - -- name: check if lsb_release is installed (as required for ansible facts) - raw: test -e /usr/bin/lsb_release - register: is_lsb_release_installed - failed_when: is_lsb_release_installed.rc not in [0, 1] - changed_when: false # never mutates state. - -- name: install lsb_release if missing (as required for ansible facts) - when: is_lsb_release_installed|failed # skip otherwise - raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y redhat-lsb-core) - changed_when: is_lsb_release_installed.rc == 1 - -- setup: # gather 'facts', i.e. compensates for 'gather_facts: false' in calling playbook. diff --git a/tools/config_management/roles/dev-tools/files/apt-daily.timer.conf b/tools/config_management/roles/dev-tools/files/apt-daily.timer.conf deleted file mode 100644 index bd19c61f..00000000 --- a/tools/config_management/roles/dev-tools/files/apt-daily.timer.conf +++ /dev/null @@ -1,2 +0,0 @@ -[Timer] -Persistent=false diff --git a/tools/config_management/roles/dev-tools/tasks/main.yml b/tools/config_management/roles/dev-tools/tasks/main.yml deleted file mode 100644 index 96ac3a21..00000000 --- a/tools/config_management/roles/dev-tools/tasks/main.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -# Set up Development Environment. - -- name: install development tools - package: - name: "{{ item }}" - state: present - with_items: - # weave net dependencies - - make - - vagrant - # ansible dependencies - - python-pip - - python-dev - - libffi-dev - - libssl-dev - # terraform dependencies - - unzip - # other potentially useful tools: - - aufs-tools - - ethtool - - iputils-arping - - libpcap-dev - - git - - mercurial - - bc - - jq - -- name: install ansible - pip: - name: ansible - state: present - -- name: install terraform - unarchive: - src: 'https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_linux_{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.zip' - remote_src: yes - dest: /usr/bin - mode: 0555 - creates: /usr/bin/terraform - -# Ubuntu runs an apt update process that will run on first boot from image. -# This is of questionable value when the machines are only going to live for a few minutes. -# If you leave them on they will run the process daily. -# Also we have seen the update process create a 'defunct' process which then throws off Weave Net smoke-test checks. -# So, we override the 'persistent' setting so it will still run at the scheduled time but will not try to catch up on first boot. -- name: copy apt daily override - copy: src=apt-daily.timer.conf dest=/etc/systemd/system/apt-daily.timer.d/ diff --git a/tools/config_management/roles/docker-configuration/files/docker.conf b/tools/config_management/roles/docker-configuration/files/docker.conf deleted file mode 100644 index 626d8022..00000000 --- a/tools/config_management/roles/docker-configuration/files/docker.conf +++ /dev/null @@ -1,3 +0,0 @@ -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000" diff --git a/tools/config_management/roles/docker-configuration/tasks/main.yml b/tools/config_management/roles/docker-configuration/tasks/main.yml deleted file mode 100644 index d6736968..00000000 --- a/tools/config_management/roles/docker-configuration/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Configure Docker -# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install - -- name: ensure docker group is present (or create it) - group: - name: docker - state: present - -- name: add user to docker group (avoids sudo-ing) - user: - name: "{{ ansible_user }}" - group: docker - state: present - -- name: ensure docker's systemd directory exists - file: - path: /etc/systemd/system/docker.service.d - state: directory - recurse: yes - when: ansible_os_family != "RedHat" - -- name: enable docker remote api over tcp - copy: - src: "{{ role_path }}/files/docker.conf" - dest: /etc/systemd/system/docker.service.d/docker.conf - register: docker_conf - when: ansible_os_family != "RedHat" - -- name: restart docker service - systemd: - name: docker - state: restarted - daemon_reload: yes # ensure docker.conf is picked up. - enabled: yes - when: docker_conf.changed or ansible_os_family == "RedHat" diff --git a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml b/tools/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml deleted file mode 100644 index 3e2ae127..00000000 --- a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Debian / Ubuntu specific: - -- name: install dependencies for docker repository - package: - name: "{{ item }}" - state: present - with_items: - - apt-transport-https - - ca-certificates - -- name: add apt key for the docker repository - apt_key: - keyserver: hkp://ha.pool.sks-keyservers.net:80 - id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 - state: present - register: apt_key_docker_repo - -- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }}) - apt_repository: - repo: deb https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename|lower }} stable - state: present - register: apt_docker_repo - -- name: update apt's cache - apt: - update_cache: yes - when: apt_key_docker_repo.changed or apt_docker_repo.changed - -- name: install docker-engine - package: - name: "{{ item }}" - state: present - with_items: - - docker-ce={{ docker_version }}* diff --git a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml b/tools/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml deleted file mode 100644 index 0acb6d8c..00000000 --- a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# Set up Docker -# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install - -# Distribution-specific tasks: -- include: debian.yml - when: ansible_os_family == "Debian" - -- include: redhat.yml - when: ansible_os_family == "RedHat" diff --git a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml b/tools/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml deleted file mode 100644 index ea9a3fa4..00000000 --- a/tools/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Docker installation from Docker's CentOS Community Edition -# See also: https://docs.docker.com/engine/installation/linux/centos/ - -- name: remove all potentially pre existing packages - yum: - name: '{{ item }}' - state: absent - with_items: - - docker - - docker-common - - container-selinux - - docker-selinux - - docker-engine - -- name: install yum-utils - yum: - name: yum-utils - state: present - -- name: add docker ce repo - command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - -# Note that Docker CE versions do not follow regular Docker versions, but look -# like, for example: "17.03.0.el7" -- name: install docker - yum: - name: 'docker-ce-{{ docker_version }}' - update_cache: yes - state: present diff --git a/tools/config_management/roles/docker-from-docker-repo/tasks/debian.yml b/tools/config_management/roles/docker-from-docker-repo/tasks/debian.yml deleted file mode 100644 index cc33c2c9..00000000 --- a/tools/config_management/roles/docker-from-docker-repo/tasks/debian.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Debian / Ubuntu specific: - -- name: install dependencies for docker repository - package: - name: "{{ item }}" - state: present - with_items: - - apt-transport-https - - ca-certificates - -- name: add apt key for the docker repository - apt_key: - keyserver: hkp://ha.pool.sks-keyservers.net:80 - id: 58118E89F3A912897C070ADBF76221572C52609D - state: present - register: apt_key_docker_repo - -- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }}) - apt_repository: - repo: deb https://apt.dockerproject.org/repo {{ ansible_distribution | lower }}-{{ ansible_distribution_release }} main - state: present - register: apt_docker_repo - -- name: update apt's cache - apt: - update_cache: yes - when: apt_key_docker_repo.changed or apt_docker_repo.changed - -- name: install docker-engine - package: - name: "{{ item }}" - state: present - with_items: - - docker-engine={{ docker_version }}* diff --git a/tools/config_management/roles/docker-from-docker-repo/tasks/main.yml b/tools/config_management/roles/docker-from-docker-repo/tasks/main.yml deleted file mode 100644 index 0acb6d8c..00000000 --- a/tools/config_management/roles/docker-from-docker-repo/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# Set up Docker -# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install - -# Distribution-specific tasks: -- include: debian.yml - when: ansible_os_family == "Debian" - -- include: redhat.yml - when: ansible_os_family == "RedHat" diff --git a/tools/config_management/roles/docker-from-docker-repo/tasks/redhat.yml b/tools/config_management/roles/docker-from-docker-repo/tasks/redhat.yml deleted file mode 100644 index d29964e1..00000000 --- a/tools/config_management/roles/docker-from-docker-repo/tasks/redhat.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# RedHat / CentOS specific: - -- name: add docker' yum repository (centos/{{ ansible_lsb.major_release }}) - yum_repository: - name: docker - description: Docker YUM repo - file: external_repos - baseurl: https://yum.dockerproject.org/repo/main/centos/{{ ansible_lsb.major_release }} - enabled: yes - gpgkey: https://yum.dockerproject.org/gpg - gpgcheck: yes - state: present - -- name: update yum's cache - yum: - name: "*" - update_cache: yes - -- name: install docker-engine - package: - name: "{{ item }}" - state: present - with_items: - - docker-engine-{{ docker_version }} diff --git a/tools/config_management/roles/docker-from-get.docker.com/tasks/debian.yml b/tools/config_management/roles/docker-from-get.docker.com/tasks/debian.yml deleted file mode 100644 index 7444194e..00000000 --- a/tools/config_management/roles/docker-from-get.docker.com/tasks/debian.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Debian / Ubuntu specific: - -- name: apt-import gpg key for the docker repository - shell: curl -sSL https://get.docker.com/gpg | sudo apt-key add - - -- name: install docker - shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ -e s/docker-ce/docker-ce={{ docker_version }}*/ | sh' diff --git a/tools/config_management/roles/docker-from-get.docker.com/tasks/main.yml b/tools/config_management/roles/docker-from-get.docker.com/tasks/main.yml deleted file mode 100644 index 92c497b7..00000000 --- a/tools/config_management/roles/docker-from-get.docker.com/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# Set up Docker -# See also: legacy gce.sh script - -# Distribution-specific tasks: -- include: debian.yml - when: ansible_os_family == "Debian" - -- include: redhat.yml - when: ansible_os_family == "RedHat" diff --git a/tools/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml b/tools/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml deleted file mode 100644 index ea7cbfc4..00000000 --- a/tools/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# RedHat / CentOS specific: - -- name: rpm-import gpg key for the docker repository - shell: curl -sSLo /tmp/docker.gpg https://get.docker.com/gpg && sudo rpm --import /tmp/docker.gpg - -- name: install docker - shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine-{{ docker_version }}*/ | sh' - -- name: wait for docker installation to complete - shell: yum install -y yum-utils && yum-complete-transaction diff --git a/tools/config_management/roles/docker-from-tarball/tasks/main.yml b/tools/config_management/roles/docker-from-tarball/tasks/main.yml deleted file mode 100644 index a233d10a..00000000 --- a/tools/config_management/roles/docker-from-tarball/tasks/main.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- -# Set up Docker -# See also: -# - https://docs.docker.com/engine/installation/linux/ubuntulinux/#install -# - https://github.com/docker/docker/releases - -- include_role: - name: docker-prerequisites - -- name: install daemon - package: - name: daemon - state: present - -- name: 'create directory {{ docker_dir }}/{{ docker_version }}' - file: - path: '{{ docker_dir }}/{{ docker_version }}' - state: directory - mode: 0755 - -- name: download and extract docker - unarchive: - src: 'https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz' - remote_src: yes - dest: '{{ docker_dir }}/{{ docker_version }}' - extra_opts: '--strip-components=1' - mode: 0555 - creates: '{{ docker_dir }}/{{ docker_version }}/docker' - -- name: create symlink to current version - file: - src: '{{ docker_dir }}/{{ docker_version }}' - dest: '{{ docker_dir }}/current' - state: link - mode: 0555 - -- name: list all files to symlink - find: - paths: '{{ docker_dir }}/current' - file_type: file - register: binaries - changed_when: false - -- name: create symlinks to all binaries - file: - src: '{{ item }}' - dest: /usr/bin/{{ item | basename }} - state: link - with_items: "{{ binaries.files | map(attribute='path') | list }}" - -- name: killall docker - command: killall docker - register: killall - failed_when: false - changed_when: killall.rc == 0 - -- name: start dockerd - command: daemon -- /usr/bin/dockerd - -- include_role: - name: docker-configuration diff --git a/tools/config_management/roles/docker-from-tarball/vars/main.yml b/tools/config_management/roles/docker-from-tarball/vars/main.yml deleted file mode 100644 index d4106684..00000000 --- a/tools/config_management/roles/docker-from-tarball/vars/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -docker_dir: '/opt/docker' -docker_url: '{{ "rc" in {{ docker_version }} | ternary( > - "https://test.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz", > - "https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz") }}' diff --git a/tools/config_management/roles/docker-install/tasks/main.yml b/tools/config_management/roles/docker-install/tasks/main.yml deleted file mode 100644 index cc803cab..00000000 --- a/tools/config_management/roles/docker-install/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# Set up Docker - -- include_role: - name: docker-prerequisites - -# Dynamically include docker installation role using 'when' as Ansible does not -# allow for include_role's name to be set to a variable. Indeed: -# - include_role: -# name: '{{ docker_install_role }}' -# fails with: -# ERROR! 'docker_install_role' is undefined -- include_role: - name: docker-from-docker-repo - when: docker_install_role == 'docker-from-docker-repo' - -- include_role: - name: docker-from-docker-ce-repo - when: docker_install_role == 'docker-from-docker-ce-repo' - -- include_role: - name: docker-from-get.docker.com - when: docker_install_role == 'docker-from-get.docker.com' - -- include_role: - name: docker-from-tarball - when: docker_install_role == 'docker-from-tarball' - -- include_role: - name: docker-configuration diff --git a/tools/config_management/roles/docker-prerequisites/tasks/debian.yml b/tools/config_management/roles/docker-prerequisites/tasks/debian.yml deleted file mode 100644 index 48b0c2e3..00000000 --- a/tools/config_management/roles/docker-prerequisites/tasks/debian.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Install Docker's dependencies -# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install - -- name: install linux-image-extra-*/virtual - package: - name: "{{ item }}" - state: present - with_items: - - linux-image-extra-{{ ansible_kernel }} - - linux-image-extra-virtual diff --git a/tools/config_management/roles/docker-prerequisites/tasks/main.yml b/tools/config_management/roles/docker-prerequisites/tasks/main.yml deleted file mode 100644 index a8177372..00000000 --- a/tools/config_management/roles/docker-prerequisites/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -# Distribution-specific tasks: -- include: debian.yml - when: ansible_os_family == "Debian" diff --git a/tools/config_management/roles/golang-from-tarball/tasks/main.yml b/tools/config_management/roles/golang-from-tarball/tasks/main.yml deleted file mode 100644 index 55476bf6..00000000 --- a/tools/config_management/roles/golang-from-tarball/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Set up Go. - -- name: install go - unarchive: - src: 'https://storage.googleapis.com/golang/go{{ go_version }}.linux-{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.tar.gz' - remote_src: yes - dest: /usr/local - mode: 0777 - creates: /usr/local/go/bin/go - -- name: set go env. vars. and add go to path - blockinfile: - dest: '$HOME/.bashrc' - block: | - export PATH=$PATH:/usr/local/go/bin - export GOPATH=$HOME - state: present - create: yes - mode: 0644 - become: '{{ item }}' - with_items: - - true # Run as root - - false # Run as SSH user - -- name: source ~/.bashrc from ~/.bash_profile - lineinfile: - dest: '$HOME/.bash_profile' - line: '[ -r $HOME/.bashrc ] && source $HOME/.bashrc' - state: present - create: yes - mode: 0644 - become: '{{ item }}' - with_items: - - true # Run as root - - false # Run as SSH user diff --git a/tools/config_management/roles/kubelet-stop/tasks/main.yml b/tools/config_management/roles/kubelet-stop/tasks/main.yml deleted file mode 100644 index 6e5f3148..00000000 --- a/tools/config_management/roles/kubelet-stop/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- name: check if kubelet service exists - stat: - path: /etc/init.d/kubelet - register: kubelet - -# avoids having weave-net and weave-kube conflict in some test cases (e.g. 130_expose_test.sh) -- name: stop kubelet service - systemd: - name: kubelet - state: stopped - enabled: no - when: kubelet.stat.exists diff --git a/tools/config_management/roles/kubernetes-docker-images/tasks/main.yml b/tools/config_management/roles/kubernetes-docker-images/tasks/main.yml deleted file mode 100644 index 801c4637..00000000 --- a/tools/config_management/roles/kubernetes-docker-images/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- name: docker pull images used by k8s tests - docker_image: - name: '{{ item }}' - state: present - with_items: - - gcr.io/google_containers/etcd-amd64:{{ etcd_container_version }} - - gcr.io/google_containers/kube-apiserver-amd64:v{{ kubernetes_version }} - - gcr.io/google_containers/kube-controller-manager-amd64:v{{ kubernetes_version }} - - gcr.io/google_containers/kube-proxy-amd64:v{{ kubernetes_version }} - - gcr.io/google_containers/kube-scheduler-amd64:v{{ kubernetes_version }} - - gcr.io/google_containers/kube-discovery-amd64:{{ kube_discovery_container_version }} - - gcr.io/google_containers/pause-amd64:{{ pause_container_version }} diff --git a/tools/config_management/roles/kubernetes-install/tasks/debian.yml b/tools/config_management/roles/kubernetes-install/tasks/debian.yml deleted file mode 100644 index 9f16edfd..00000000 --- a/tools/config_management/roles/kubernetes-install/tasks/debian.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# Debian / Ubuntu specific: - -- name: add apt key for the kubernetes repository - apt_key: - url: https://packages.cloud.google.com/apt/doc/apt-key.gpg - state: present - register: apt_key_k8s_repo - -- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}) - apt_repository: - repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main - state: present - register: apt_k8s_repo - when: '"alpha" not in kubernetes_version and "beta" not in kubernetes_version' - -- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}-unstable) - apt_repository: - repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }}-unstable main - state: present - register: apt_k8s_repo - when: '"alpha" in kubernetes_version or "beta" in kubernetes_version' - -- name: update apt's cache - apt: - update_cache: yes - when: apt_key_k8s_repo.changed or apt_k8s_repo.changed - -- name: install kubelet and kubectl - package: - name: "{{ item }}" - state: present - with_items: - - kubelet={{ kubernetes_version }}* - - kubectl={{ kubernetes_version }}* - - kubeadm={{ kubernetes_version }}* - - kubernetes-cni={{ kubernetes_cni_version }}* diff --git a/tools/config_management/roles/kubernetes-install/tasks/main.yml b/tools/config_management/roles/kubernetes-install/tasks/main.yml deleted file mode 100644 index 50dcddaf..00000000 --- a/tools/config_management/roles/kubernetes-install/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# Install Kubernetes - -# Distribution-specific tasks: -- include: debian.yml - when: ansible_os_family == "Debian" - -- include: redhat.yml - when: ansible_os_family == "RedHat" - -- name: install ebtables - package: - name: "{{ item }}" - state: present - with_items: - - ebtables diff --git a/tools/config_management/roles/kubernetes-install/tasks/redhat.yml b/tools/config_management/roles/kubernetes-install/tasks/redhat.yml deleted file mode 100644 index 293729dc..00000000 --- a/tools/config_management/roles/kubernetes-install/tasks/redhat.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# RedHat / CentOS specific: - -- name: add kubernetes' yum repository (kubernetes-el{{ ansible_lsb.major_release }}-x86-64) - yum_repository: - name: kubernetes - description: Kubernetes YUM repo - file: external_repos - baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el{{ ansible_lsb.major_release }}-x86_64 - enabled: yes - gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg - gpgcheck: yes - state: present - register: yum_k8s_repo - -- name: update yum's cache - yum: - name: "*" - update_cache: yes - when: yum_k8s_repo.changed - -- name: install kubelet and kubectl - package: - name: "{{ item }}" - state: present - with_items: - - kubelet-{{ kubernetes_version }}* - - kubectl-{{ kubernetes_version }}* - - kubeadm-{{ kubernetes_version }}* - - kubernetes-cni-{{ kubernetes_cni_version }}* diff --git a/tools/config_management/roles/kubernetes-start/tasks/main.yml b/tools/config_management/roles/kubernetes-start/tasks/main.yml deleted file mode 100644 index d343b21c..00000000 --- a/tools/config_management/roles/kubernetes-start/tasks/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Start Kubernetes - -- name: kubeadm reset - command: kubeadm reset - -- name: restart kubelet service - systemd: - name: kubelet - state: restarted - enabled: yes - -- name: optionally set kubeconfig option - set_fact: - kubeconfig: '{{ (kubernetes_version >= "1.5.4") | ternary("--kubeconfig /etc/kubernetes/admin.conf", "") }}' - kubernetes_version_option: '{{ (kubernetes_version >= "1.6") | ternary("kubernetes_version", "use-kubernetes-version") }}' - -- name: kubeadm init on the master - command: 'kubeadm init --{{ kubernetes_version_option }}=v{{ kubernetes_version }} --token={{ kubernetes_token }}' - when: ' {{ play_hosts[0] == inventory_hostname }}' - -- name: allow pods to be run on the master (if only node) - command: 'kubectl {{ kubeconfig }} taint nodes --all {{ (kubernetes_version < "1.6") | ternary("dedicated-", "node-role.kubernetes.io/master:NoSchedule-") }}' - when: '{{ play_hosts | length }} == 1' - -- name: kubeadm join on workers - command: 'kubeadm join --token={{ kubernetes_token }} {{ hostvars[play_hosts[0]].private_ip }}{{ (kubernetes_version > "1.6") | ternary(":6443", "") }}' - when: ' {{ play_hosts[0] != inventory_hostname }}' - -- name: list kubernetes' pods - command: kubectl {{ kubeconfig }} get pods --all-namespaces - when: ' {{ play_hosts[0] == inventory_hostname }}' - changed_when: false - register: kubectl_get_pods - tags: - - output - -- name: print outpout of `kubectl get pods --all-namespaces` - debug: msg="{{ kubectl_get_pods.stdout_lines }}" - when: ' {{ play_hosts[0] == inventory_hostname }}' - tags: - - output diff --git a/tools/config_management/roles/setup-ansible/pre_tasks/main.yml b/tools/config_management/roles/setup-ansible/pre_tasks/main.yml deleted file mode 100644 index efb15491..00000000 --- a/tools/config_management/roles/setup-ansible/pre_tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Set machine up to be able to run ansible playbooks. - -- name: check if python is installed (as required by ansible modules) - raw: test -e /usr/bin/python - register: is_python_installed - failed_when: is_python_installed.rc not in [0, 1] - changed_when: false # never mutates state. - -- name: install python if missing (as required by ansible modules) - when: is_python_installed|failed # skip otherwise - raw: (test -e /usr/bin/apt-get && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum install -y python) - changed_when: is_python_installed.rc == 1 - -- name: check if lsb_release is installed (as required for ansible facts) - raw: test -e /usr/bin/lsb_release - register: is_lsb_release_installed - failed_when: is_lsb_release_installed.rc not in [0, 1] - changed_when: false # never mutates state. - -- name: install lsb_release if missing (as required for ansible facts) - when: is_lsb_release_installed|failed # skip otherwise - raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y lsb_release) - changed_when: is_lsb_release_installed.rc == 1 - -- setup: # gather 'facts', i.e. compensates for the above 'gather_facts: false'. diff --git a/tools/config_management/roles/sock-shop/tasks/tasks.yml b/tools/config_management/roles/sock-shop/tasks/tasks.yml deleted file mode 100644 index 9667ab04..00000000 --- a/tools/config_management/roles/sock-shop/tasks/tasks.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# Set up sock-shop on top of Kubernetes. -# Dependencies on other roles: -# - kubernetes - -- name: create sock-shop namespace in k8s - command: kubectl --kubeconfig /etc/kubernetes/admin.conf create namespace sock-shop - -- name: create sock-shop in k8s - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -n sock-shop -f "https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml?raw=true" - -- name: describe front-end service - command: kubectl --kubeconfig /etc/kubernetes/admin.conf describe svc front-end -n sock-shop - changed_when: false - register: kubectl_describe_svc_frontend - tags: - - output - -- name: print outpout of `kubectl describe svc front-end -n sock-shop` - debug: msg="{{ kubectl_describe_svc_frontend.stdout_lines }}" - tags: - - output - -- name: list sock-shop k8s' pods - command: kubectl --kubeconfig /etc/kubernetes/admin.conf get pods -n sock-shop - changed_when: false - register: kubectl_get_pods - tags: - - output - -- name: print outpout of `kubectl get pods -n sock-shop` - debug: msg="{{ kubectl_get_pods.stdout_lines }}" - tags: - - output diff --git a/tools/config_management/roles/weave-kube/tasks/main.yml b/tools/config_management/roles/weave-kube/tasks/main.yml deleted file mode 100644 index e2025eef..00000000 --- a/tools/config_management/roles/weave-kube/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Set up Weave Kube on top of Kubernetes. - -- name: set url for weave-kube daemonset - set_fact: - weave_kube_url: '{{ (kubernetes_version < "1.6") | ternary("https://git.io/weave-kube", "https://git.io/weave-kube-1.6") }}' - -- name: configure weave net's cni plugin - command: 'kubectl {{ kubeconfig }} apply -f {{ weave_kube_url }}' - when: '{{ play_hosts[0] == inventory_hostname }}' - -- name: list kubernetes' pods - command: 'kubectl {{ kubeconfig }} get pods --all-namespaces' - when: '{{ play_hosts[0] == inventory_hostname }}' - changed_when: false - register: kubectl_get_pods - tags: - - output - -- name: print outpout of `kubectl get pods --all-namespaces` - debug: msg="{{ kubectl_get_pods.stdout_lines }}" - when: '{{ play_hosts[0] == inventory_hostname }}' - tags: - - output diff --git a/tools/config_management/roles/weave-net-sources/tasks/main.yml b/tools/config_management/roles/weave-net-sources/tasks/main.yml deleted file mode 100644 index b0a7815c..00000000 --- a/tools/config_management/roles/weave-net-sources/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Set up Development Environment for Weave Net. - -- name: check if weave net has been checked out - become: false # Run as SSH-user - stat: - path: $HOME/src/github.com/weaveworks/weave - register: weave - failed_when: false - changed_when: false - -- name: git clone weave net - become: false # Run as SSH-user - git: - repo: https://github.com/weaveworks/weave.git - dest: $HOME/src/github.com/weaveworks/weave - when: not weave.stat.exists - -- name: create a convenience symlink to $HOME/src/github.com/weaveworks/weave - become: false # Run as SSH-user - file: - src: $HOME/src/github.com/weaveworks/weave - dest: $HOME/weave - state: link diff --git a/tools/config_management/roles/weave-net-utilities/tasks/main.yml b/tools/config_management/roles/weave-net-utilities/tasks/main.yml deleted file mode 100644 index 6883d23a..00000000 --- a/tools/config_management/roles/weave-net-utilities/tasks/main.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- - -- name: install epel-release - package: - name: "{{ item }}" - state: present - with_items: - - epel-release - when: ansible_os_family == "RedHat" - -- name: install jq - package: - name: "{{ item }}" - state: present - with_items: - - jq - -- name: install ethtool (used by the weave script) - package: - name: "{{ item }}" - state: present - with_items: - - ethtool - -- name: install nsenter (used by the weave script) - command: docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter - -- name: install pip (for docker-py) - package: - name: "{{ item }}" - state: present - with_items: - - python-pip - -- name: install docker-py (for docker_image) - pip: - name: docker-py - state: present - -- name: docker pull images used by tests - docker_image: - name: '{{ item }}' - state: present - with_items: - - alpine - - aanand/docker-dnsutils - - weaveworks/hello-world - -- name: docker pull docker-py which is used by tests - docker_image: - name: joffrey/docker-py - tag: '{{ item }}' - state: present - with_items: - - '1.8.1' - - '1.9.0-rc2' diff --git a/tools/config_management/roles/weave-net/tasks/main.yml b/tools/config_management/roles/weave-net/tasks/main.yml deleted file mode 100644 index 0ef5e351..00000000 --- a/tools/config_management/roles/weave-net/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Set up Weave Net. - -- name: install weave net - get_url: - url: https://git.io/weave - dest: /usr/local/bin/weave - mode: 0555 - -- name: stop weave net - command: /usr/local/bin/weave stop - -- name: start weave net - command: /usr/local/bin/weave launch - -- name: get weave net's status - command: /usr/local/bin/weave status - changed_when: false - register: weave_status - tags: - - output - -- name: print outpout of `weave status` - debug: msg="{{ weave_status.stdout_lines }}" - tags: - - output diff --git a/tools/config_management/setup_weave-kube.yml b/tools/config_management/setup_weave-kube.yml deleted file mode 100644 index 5c68c978..00000000 --- a/tools/config_management/setup_weave-kube.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -################################################################################ -# Install Docker and Kubernetes, and configure Kubernetes to -# use Weave Net's CNI plugin (a.k.a. Weave Kube). -# -# See also: -# - http://kubernetes.io/docs/getting-started-guides/kubeadm/ -# - https://github.com/weaveworks/weave-kube -################################################################################ - -- name: install docker, kubernetes and weave-kube - hosts: all - gather_facts: false # required in case Python is not available on the host - become: true - become_user: root - - pre_tasks: - - include: library/setup_ansible_dependencies.yml - - roles: - - docker-install - - weave-net-utilities - - kubernetes-install - - kubernetes-docker-images - - kubelet-stop - - kubernetes-start - - weave-kube diff --git a/tools/config_management/setup_weave-net_debug.yml b/tools/config_management/setup_weave-net_debug.yml deleted file mode 100644 index ff73a527..00000000 --- a/tools/config_management/setup_weave-net_debug.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -################################################################################ -# Install Docker from Docker's official repository and Weave Net. -################################################################################ - -- name: install docker and weave net for development - hosts: all - gather_facts: false # required in case Python is not available on the host - become: true - become_user: root - - pre_tasks: - - include: library/setup_ansible_dependencies.yml - - roles: - - docker-install - - weave-net-utilities - - weave-net diff --git a/tools/config_management/setup_weave-net_dev.yml b/tools/config_management/setup_weave-net_dev.yml deleted file mode 100644 index bdfa08e9..00000000 --- a/tools/config_management/setup_weave-net_dev.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -################################################################################ -# Install Docker from Docker's official repository and Weave Net. -################################################################################ - -- name: install docker and weave net for development - hosts: all - gather_facts: false # required in case Python is not available on the host - become: true - become_user: root - - pre_tasks: - - include: library/setup_ansible_dependencies.yml - - roles: - - dev-tools - - golang-from-tarball - - docker-install - # Do not run this role when building with Vagrant, as sources have been already checked out: - - { role: weave-net-sources, when: "ansible_user != 'vagrant'" } diff --git a/tools/config_management/setup_weave-net_test.yml b/tools/config_management/setup_weave-net_test.yml deleted file mode 100644 index fbd155df..00000000 --- a/tools/config_management/setup_weave-net_test.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -################################################################################ -# Install Docker from Docker's official repository and Weave Net. -################################################################################ - -- name: install docker and weave net for testing - hosts: all - gather_facts: false # required in case Python is not available on the host - become: true - become_user: root - - pre_tasks: - - include: library/setup_ansible_dependencies.yml - - roles: - - docker-install - - weave-net-utilities - - kubernetes-install - - kubernetes-docker-images - - kubelet-stop diff --git a/tools/cover/Makefile b/tools/cover/Makefile deleted file mode 100644 index 1453e63e..00000000 --- a/tools/cover/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: all clean - -all: cover - -cover: *.go - go get -tags netgo ./$(@D) - go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) - -clean: - rm -rf cover - go clean ./... diff --git a/tools/cover/cover.go b/tools/cover/cover.go deleted file mode 100644 index 4c5fcfd7..00000000 --- a/tools/cover/cover.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "fmt" - "os" - "sort" - - "golang.org/x/tools/cover" -) - -func merge(p1, p2 *cover.Profile) *cover.Profile { - output := cover.Profile{ - FileName: p1.FileName, - Mode: p1.Mode, - } - - i, j := 0, 0 - for i < len(p1.Blocks) && j < len(p2.Blocks) { - bi, bj := p1.Blocks[i], p2.Blocks[j] - if bi.StartLine == bj.StartLine && bi.StartCol == bj.StartCol { - - if bi.EndLine != bj.EndLine || - bi.EndCol != bj.EndCol || - bi.NumStmt != bj.NumStmt { - panic("Not run on same source!") - } - - output.Blocks = append(output.Blocks, cover.ProfileBlock{ - StartLine: bi.StartLine, - StartCol: bi.StartCol, - EndLine: bi.EndLine, - EndCol: bi.EndCol, - NumStmt: bi.NumStmt, - Count: bi.Count + bj.Count, - }) - i++ - j++ - } else if bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol { - output.Blocks = append(output.Blocks, bi) - i++ - } else { - output.Blocks = append(output.Blocks, bj) - j++ - } - } - - for ; i < len(p1.Blocks); i++ { - output.Blocks = append(output.Blocks, p1.Blocks[i]) - } - - for ; j < len(p2.Blocks); j++ { - output.Blocks = append(output.Blocks, p2.Blocks[j]) - } - - return &output -} - -func print(profiles []*cover.Profile) { - fmt.Println("mode: atomic") - for _, profile := range profiles { - for _, block := range profile.Blocks { - fmt.Printf("%s:%d.%d,%d.%d %d %d\n", profile.FileName, block.StartLine, block.StartCol, - block.EndLine, block.EndCol, block.NumStmt, block.Count) - } - } -} - -// Copied from https://github.com/golang/tools/blob/master/cover/profile.go -type byFileName []*cover.Profile - -func (p byFileName) Len() int { return len(p) } -func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } -func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func main() { - outputProfiles := map[string]*cover.Profile{} - for _, input := range os.Args[1:] { - inputProfiles, err := cover.ParseProfiles(input) - if err != nil { - panic(fmt.Sprintf("Error parsing %s: %v", input, err)) - } - for _, ip := range inputProfiles { - op := outputProfiles[ip.FileName] - if op == nil { - outputProfiles[ip.FileName] = ip - } else { - outputProfiles[ip.FileName] = merge(op, ip) - } - } - } - profiles := make([]*cover.Profile, 0, len(outputProfiles)) - for _, profile := range outputProfiles { - profiles = append(profiles, profile) - } - sort.Sort(byFileName(profiles)) - print(profiles) -} diff --git a/tools/cover/gather_coverage.sh b/tools/cover/gather_coverage.sh deleted file mode 100755 index 271ac7d4..00000000 --- a/tools/cover/gather_coverage.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# This scripts copies all the coverage reports from various circle shards, -# merges them and produces a complete report. - -set -ex -DESTINATION=$1 -FROMDIR=$2 -mkdir -p "$DESTINATION" - -if [ -n "$CIRCLECI" ]; then - for i in $(seq 1 $((CIRCLE_NODE_TOTAL - 1))); do - scp "node$i:$FROMDIR"/* "$DESTINATION" || true - done -fi - -go get github.com/weaveworks/build-tools/cover -cover "$DESTINATION"/* >profile.cov -go tool cover -html=profile.cov -o coverage.html -go tool cover -func=profile.cov -o coverage.txt -tar czf coverage.tar.gz "$DESTINATION" diff --git a/tools/dependencies/cross_versions.py b/tools/dependencies/cross_versions.py deleted file mode 100755 index dd920f0e..00000000 --- a/tools/dependencies/cross_versions.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python - -# Generate the cross product of latest versions of Weave Net's dependencies: -# - Go -# - Docker -# - Kubernetes -# -# Dependencies: -# - python -# - git -# - list_versions.py -# -# Testing: -# $ python -m doctest -v cross_versions.py - -from os import linesep -from sys import argv, exit, stdout, stderr -from getopt import getopt, GetoptError -from list_versions import DEPS, get_versions_from, filter_versions -from itertools import product - -# See also: /usr/include/sysexits.h -_ERROR_RUNTIME = 1 -_ERROR_ILLEGAL_ARGS = 64 - - -def _usage(error_message=None): - if error_message: - stderr.write('ERROR: ' + error_message + linesep) - stdout.write( - linesep.join([ - 'Usage:', ' cross_versions.py [OPTION]...', 'Examples:', - ' cross_versions.py', ' cross_versions.py -r', - ' cross_versions.py --rc', ' cross_versions.py -l', - ' cross_versions.py --latest', 'Options:', - '-l/--latest Include only the latest version of each major and' - ' minor versions sub-tree.', - '-r/--rc Include release candidate versions.', - '-h/--help Prints this!', '' - ])) - - -def _validate_input(argv): - try: - config = {'rc': False, 'latest': False} - opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc']) - for opt, value in opts: - if opt in ('-h', '--help'): - _usage() - exit() - if opt in ('-l', '--latest'): - config['latest'] = True - if opt in ('-r', '--rc'): - config['rc'] = True - if len(args) != 0: - raise ValueError('Unsupported argument(s): %s.' % args) - return config - except GetoptError as e: - _usage(str(e)) - exit(_ERROR_ILLEGAL_ARGS) - except ValueError as e: - _usage(str(e)) - exit(_ERROR_ILLEGAL_ARGS) - - -def _versions(dependency, config): - return map(str, - filter_versions( - get_versions_from(DEPS[dependency]['url'], - DEPS[dependency]['re']), - DEPS[dependency]['min'], **config)) - - -def cross_versions(config): - docker_versions = _versions('docker', config) - k8s_versions = _versions('kubernetes', config) - return product(docker_versions, k8s_versions) - - -def main(argv): - try: - config = _validate_input(argv) - print(linesep.join('\t'.join(triple) - for triple in cross_versions(config))) - except Exception as e: - print(str(e)) - exit(_ERROR_RUNTIME) - - -if __name__ == '__main__': - main(argv[1:]) diff --git a/tools/dependencies/list_os_images.sh b/tools/dependencies/list_os_images.sh deleted file mode 100755 index 00db0d06..00000000 --- a/tools/dependencies/list_os_images.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -function usage() { - cat <&2 "No AWS owner ID for $1." - exit 1 -} - -if [ -z "$1" ]; then - echo >&2 "No specified provider." - usage - exit 1 -fi - -if [ -z "$2" ]; then - if [ "$1" == "help" ]; then - usage - exit 0 - else - echo >&2 "No specified operating system." - usage - exit 1 - fi -fi - -case "$1" in - 'gcp') - gcloud compute images list --standard-images --regexp=".*?$2.*" \ - --format="csv[no-heading][separator=/](selfLink.map().scope(projects).segment(0),family)" \ - | sort -d - ;; - 'aws') - aws --region "${3:-us-east-1}" ec2 describe-images \ - --owners "$(find_aws_owner_id "$2")" \ - --filters "Name=name,Values=$2*" \ - --query 'Images[*].{name:Name,id:ImageId}' - # Other examples: - # - CentOS: aws --region us-east-1 ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=aw0evgkw8e5c1q413zgy5pjce - # - Debian: aws --region us-east-1 ec2 describe-images --owners 379101102735 --filters "Name=architecture,Values=x86_64" "Name=name,Values=debian-jessie-*" "Name=root-device-type,Values=ebs" "Name=virtualization-type,Values=hvm" - ;; - 'do') - curl -s -X GET \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \ - "https://api.digitalocean.com/v2/images?page=1&per_page=999999" \ - | jq --raw-output ".images | .[] | .slug" | grep "$2" | sort -d - ;; - *) - echo >&2 "Unknown provider [$1]." - usage - exit 1 - ;; -esac diff --git a/tools/dependencies/list_versions.py b/tools/dependencies/list_versions.py deleted file mode 100755 index e008ecfe..00000000 --- a/tools/dependencies/list_versions.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/env python - -# List all available versions of Weave Net's dependencies: -# - Go -# - Docker -# - Kubernetes -# -# Depending on the parameters passed, it can gather the equivalent of the below -# bash one-liners: -# git ls-remote --tags https://github.com/golang/go \ -# | grep -oP '(?<=refs/tags/go)[\.\d]+$' \ -# | sort --version-sort -# git ls-remote --tags https://github.com/golang/go \ -# | grep -oP '(?<=refs/tags/go)[\.\d]+rc\d+$' \ -# | sort --version-sort \ -# | tail -n 1 -# git ls-remote --tags https://github.com/docker/docker \ -# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \ -# | sort --version-sort -# git ls-remote --tags https://github.com/docker/docker \ -# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-rc\d*$' \ -# | sort --version-sort \ -# | tail -n 1 -# git ls-remote --tags https://github.com/kubernetes/kubernetes \ -# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \ -# | sort --version-sort -# git ls-remote --tags https://github.com/kubernetes/kubernetes \ -# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-beta\.\d+$' \ -# | sort --version-sort | tail -n 1 -# -# Dependencies: -# - python -# - git -# -# Testing: -# $ python -m doctest -v list_versions.py - -from os import linesep, path -from sys import argv, exit, stdout, stderr -from getopt import getopt, GetoptError -from subprocess import Popen, PIPE -from pkg_resources import parse_version -from itertools import groupby -from six.moves import filter -import shlex -import re - -# See also: /usr/include/sysexits.h -_ERROR_RUNTIME = 1 -_ERROR_ILLEGAL_ARGS = 64 - -_TAG_REGEX = '^[0-9a-f]{40}\s+refs/tags/%s$' -_VERSION = 'version' -DEPS = { - 'go': { - 'url': 'https://github.com/golang/go', - 're': 'go(?P<%s>[\d\.]+(?:rc\d)*)' % _VERSION, - 'min': None - }, - 'docker': { - 'url': 'https://github.com/docker/docker', - 're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-rc\d)*)' % _VERSION, - # Weave Net only works with Docker from 1.10.0 onwards, so we ignore - # all previous versions: - 'min': '1.10.0', - }, - 'kubernetes': { - 'url': 'https://github.com/kubernetes/kubernetes', - 're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-beta\.\d)*)' % _VERSION, - # Weave Kube requires Kubernetes 1.4.2+, so we ignore all previous - # versions: - 'min': '1.4.2', - } -} - - -class Version(object): - ''' Helper class to parse and manipulate (sort, filter, group) software - versions. ''' - - def __init__(self, version): - self.version = version - self.digits = [ - int(x) if x else 0 - for x in re.match('(\d*)\.?(\d*)\.?(\d*).*?', version).groups() - ] - self.major, self.minor, self.patch = self.digits - self.__parsed = parse_version(version) - self.is_rc = self.__parsed.is_prerelease - - def __lt__(self, other): - return self.__parsed.__lt__(other.__parsed) - - def __gt__(self, other): - return self.__parsed.__gt__(other.__parsed) - - def __le__(self, other): - return self.__parsed.__le__(other.__parsed) - - def __ge__(self, other): - return self.__parsed.__ge__(other.__parsed) - - def __eq__(self, other): - return self.__parsed.__eq__(other.__parsed) - - def __ne__(self, other): - return self.__parsed.__ne__(other.__parsed) - - def __str__(self): - return self.version - - def __repr__(self): - return self.version - - -def _read_go_version_from_dockerfile(): - # Read Go version from weave/build/Dockerfile - dockerfile_path = path.join( - path.dirname(path.dirname(path.dirname(path.realpath(__file__)))), - 'build', 'Dockerfile') - with open(dockerfile_path, 'r') as f: - for line in f: - m = re.match('^FROM golang:(\S*)$', line) - if m: - return m.group(1) - raise RuntimeError( - "Failed to read Go version from weave/build/Dockerfile." - " You may be running this script from somewhere else than weave/tools." - ) - - -def _try_set_min_go_version(): - ''' Set the current version of Go used to build Weave Net's containers as - the minimum version. ''' - try: - DEPS['go']['min'] = _read_go_version_from_dockerfile() - except IOError as e: - stderr.write('WARNING: No minimum Go version set. Root cause: %s%s' % - (e, linesep)) - - -def _sanitize(out): - return out.decode('ascii').strip().split(linesep) - - -def _parse_tag(tag, version_pattern, debug=False): - ''' Parse Git tag output's line using the provided `version_pattern`, e.g.: - >>> _parse_tag( - '915b77eb4efd68916427caf8c7f0b53218c5ea4a refs/tags/v1.4.6', - 'v(?P\d+\.\d+\.\d+(?:\-beta\.\d)*)') - '1.4.6' - ''' - pattern = _TAG_REGEX % version_pattern - m = re.match(pattern, tag) - if m: - return m.group(_VERSION) - elif debug: - stderr.write( - 'ERROR: Failed to parse version out of tag [%s] using [%s].%s' % - (tag, pattern, linesep)) - - -def get_versions_from(git_repo_url, version_pattern): - ''' Get release and release candidates' versions from the provided Git - repository. ''' - git = Popen( - shlex.split('git ls-remote --tags %s' % git_repo_url), stdout=PIPE) - out, err = git.communicate() - status_code = git.returncode - if status_code != 0: - raise RuntimeError('Failed to retrieve git tags from %s. ' - 'Status code: %s. Output: %s. Error: %s' % - (git_repo_url, status_code, out, err)) - return list( - filter(None, (_parse_tag(line, version_pattern) - for line in _sanitize(out)))) - - -def _tree(versions, level=0): - ''' Group versions by major, minor and patch version digits. ''' - if not versions or level >= len(versions[0].digits): - return # Empty versions or no more digits to group by. - versions_tree = [] - for _, versions_group in groupby(versions, lambda v: v.digits[level]): - subtree = _tree(list(versions_group), level + 1) - if subtree: - versions_tree.append(subtree) - # Return the current subtree if non-empty, or the list of "leaf" versions: - return versions_tree if versions_tree else versions - - -def _is_iterable(obj): - ''' - Check if the provided object is an iterable collection, i.e. not a string, - e.g. a list, a generator: - >>> _is_iterable('string') - False - >>> _is_iterable([1, 2, 3]) - True - >>> _is_iterable((x for x in [1, 2, 3])) - True - ''' - return hasattr(obj, '__iter__') and not isinstance(obj, str) - - -def _leaf_versions(tree, rc): - ''' - Recursively traverse the versions tree in a depth-first fashion, - and collect the last node of each branch, i.e. leaf versions. - ''' - versions = [] - if _is_iterable(tree): - for subtree in tree: - versions.extend(_leaf_versions(subtree, rc)) - if not versions: - if rc: - last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None) - last_prod = next( - filter(lambda v: not v.is_rc, reversed(tree)), None) - if last_rc and last_prod and (last_prod < last_rc): - versions.extend([last_prod, last_rc]) - elif not last_prod: - versions.append(last_rc) - else: - # Either there is no RC, or we ignore the RC as older than - # the latest production version: - versions.append(last_prod) - else: - versions.append(tree[-1]) - return versions - - -def filter_versions(versions, min_version=None, rc=False, latest=False): - ''' Filter provided versions - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version=None, latest=False, rc=False) - [1.0.0, 1.0.1, 1.1.1, 2.0.0] - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version=None, latest=True, rc=False) - [1.0.1, 1.1.1, 2.0.0] - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version=None, latest=False, rc=True) - [1.0.0-beta.1, 1.0.0, 1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0] - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version='1.1.0', latest=False, rc=True) - [1.1.1, 1.1.2-rc1, 2.0.0] - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version=None, latest=True, rc=True) - [1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0] - - >>> filter_versions( - ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], - min_version='1.1.0', latest=True, rc=True) - [1.1.1, 1.1.2-rc1, 2.0.0] - ''' - versions = sorted([Version(v) for v in versions]) - if min_version: - min_version = Version(min_version) - versions = [v for v in versions if v >= min_version] - if not rc: - versions = [v for v in versions if not v.is_rc] - if latest: - versions_tree = _tree(versions) - return _leaf_versions(versions_tree, rc) - else: - return versions - - -def _usage(error_message=None): - if error_message: - stderr.write('ERROR: ' + error_message + linesep) - stdout.write( - linesep.join([ - 'Usage:', ' list_versions.py [OPTION]... [DEPENDENCY]', - 'Examples:', ' list_versions.py go', - ' list_versions.py -r docker', - ' list_versions.py --rc docker', - ' list_versions.py -l kubernetes', - ' list_versions.py --latest kubernetes', 'Options:', - '-l/--latest Include only the latest version of each major and' - ' minor versions sub-tree.', - '-r/--rc Include release candidate versions.', - '-h/--help Prints this!', '' - ])) - - -def _validate_input(argv): - try: - config = {'rc': False, 'latest': False} - opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc']) - for opt, value in opts: - if opt in ('-h', '--help'): - _usage() - exit() - if opt in ('-l', '--latest'): - config['latest'] = True - if opt in ('-r', '--rc'): - config['rc'] = True - if len(args) != 1: - raise ValueError('Please provide a dependency to get versions of.' - ' Expected 1 argument but got %s: %s.' % - (len(args), args)) - dependency = args[0].lower() - if dependency not in DEPS.keys(): - raise ValueError( - 'Please provide a valid dependency.' - ' Supported one dependency among {%s} but got: %s.' % - (', '.join(DEPS.keys()), dependency)) - return dependency, config - except GetoptError as e: - _usage(str(e)) - exit(_ERROR_ILLEGAL_ARGS) - except ValueError as e: - _usage(str(e)) - exit(_ERROR_ILLEGAL_ARGS) - - -def main(argv): - try: - dependency, config = _validate_input(argv) - if dependency == 'go': - _try_set_min_go_version() - versions = get_versions_from(DEPS[dependency]['url'], - DEPS[dependency]['re']) - versions = filter_versions(versions, DEPS[dependency]['min'], **config) - print(linesep.join(map(str, versions))) - except Exception as e: - print(str(e)) - exit(_ERROR_RUNTIME) - - -if __name__ == '__main__': - main(argv[1:]) diff --git a/tools/files-with-type b/tools/files-with-type deleted file mode 100755 index 8238980c..00000000 --- a/tools/files-with-type +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -# -# Find all files with a given MIME type. -# -# e.g. -# $ files-with-type text/x-shellscript k8s infra - -mime_type=$1 -shift - -git ls-files "$@" | grep -vE '^vendor/' | xargs file --mime-type | grep "${mime_type}" | sed -e 's/:.*$//' diff --git a/tools/image-tag b/tools/image-tag deleted file mode 100755 index 31f023da..00000000 --- a/tools/image-tag +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -WORKING_SUFFIX=$(if git status --porcelain | grep -qE '^(?:[^?][^ ]|[^ ][^?])\s'; then echo "-WIP"; else echo ""; fi) -BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD) -echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX" diff --git a/tools/integration/assert.sh b/tools/integration/assert.sh deleted file mode 100755 index 200b393b..00000000 --- a/tools/integration/assert.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/bin/bash -# assert.sh 1.1 - bash unit testing framework -# Copyright (C) 2009-2015 Robert Lehmann -# -# http://github.com/lehmannro/assert.sh -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see . - -export DISCOVERONLY=${DISCOVERONLY:-} -export DEBUG=${DEBUG:-} -export STOP=${STOP:-} -export INVARIANT=${INVARIANT:-} -export CONTINUE=${CONTINUE:-} - -args="$(getopt -n "$0" -l \ - verbose,help,stop,discover,invariant,continue vhxdic "$@")" \ - || exit -1 -for arg in $args; do - case "$arg" in - -h) - echo "$0 [-vxidc]" \ - "[--verbose] [--stop] [--invariant] [--discover] [--continue]" - echo "$(sed 's/./ /g' <<<"$0") [-h] [--help]" - exit 0 - ;; - --help) - cat < [stdin] - ((tests_ran++)) || : - [[ -z "$DISCOVERONLY" ]] || return - expected=$(echo -ne "${2:-}") - result="$(eval "$1" 2>/dev/null <<<"${3:-}")" || true - if [[ "$result" == "$expected" ]]; then - [[ -z "$DEBUG" ]] || echo -n . - return - fi - result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<<"$result")" - [[ -z "$result" ]] && result="nothing" || result="\"$result\"" - [[ -z "$2" ]] && expected="nothing" || expected="\"$2\"" - _assert_fail "expected $expected${_indent}got $result" "$1" "$3" -} - -assert_raises() { - # assert_raises [stdin] - ((tests_ran++)) || : - [[ -z "$DISCOVERONLY" ]] || return - status=0 - (eval "$1" <<<"${3:-}") >/dev/null 2>&1 || status=$? - expected=${2:-0} - if [[ "$status" -eq "$expected" ]]; then - [[ -z "$DEBUG" ]] || echo -n . - return - fi - _assert_fail "program terminated with code $status instead of $expected" "$1" "$3" -} - -_assert_fail() { - # _assert_fail - [[ -n "$DEBUG" ]] && echo -n X - report="test #$tests_ran \"$2${3:+ <<< $3}\" failed:${_indent}$1" - if [[ -n "$STOP" ]]; then - [[ -n "$DEBUG" ]] && echo - echo "$report" - exit 1 - fi - tests_errors[$tests_failed]="$report" - ((tests_failed++)) || : -} - -skip_if() { - # skip_if - (eval "$@") >/dev/null 2>&1 && status=0 || status=$? - [[ "$status" -eq 0 ]] || return - skip -} - -skip() { - # skip (no arguments) - shopt -q extdebug && tests_extdebug=0 || tests_extdebug=1 - shopt -q -o errexit && tests_errexit=0 || tests_errexit=1 - # enable extdebug so returning 1 in a DEBUG trap handler skips next command - shopt -s extdebug - # disable errexit (set -e) so we can safely return 1 without causing exit - set +o errexit - tests_trapped=0 - trap _skip DEBUG -} -_skip() { - if [[ $tests_trapped -eq 0 ]]; then - # DEBUG trap for command we want to skip. Do not remove the handler - # yet because *after* the command we need to reset extdebug/errexit (in - # another DEBUG trap.) - tests_trapped=1 - [[ -z "$DEBUG" ]] || echo -n s - return 1 - else - trap - DEBUG - [[ $tests_extdebug -eq 0 ]] || shopt -u extdebug - [[ $tests_errexit -eq 1 ]] || set -o errexit - return 0 - fi -} - -_assert_reset -: ${tests_suite_status:=0} # remember if any of the tests failed so far -_assert_cleanup() { - local status=$? - # modify exit code if it's not already non-zero - [[ $status -eq 0 && -z $CONTINUE ]] && exit $tests_suite_status -} -trap _assert_cleanup EXIT diff --git a/tools/integration/config.sh b/tools/integration/config.sh deleted file mode 100644 index 54192192..00000000 --- a/tools/integration/config.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash -# NB only to be sourced - -set -e - -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Protect against being sourced multiple times to prevent -# overwriting assert.sh global state -if ! [ -z "$SOURCED_CONFIG_SH" ]; then - return -fi -SOURCED_CONFIG_SH=true - -# these ought to match what is in Vagrantfile -N_MACHINES=${N_MACHINES:-3} -IP_PREFIX=${IP_PREFIX:-192.168.48} -IP_SUFFIX_BASE=${IP_SUFFIX_BASE:-10} - -if [ -z "$HOSTS" ]; then - for i in $(seq 1 "$N_MACHINES"); do - IP="${IP_PREFIX}.$((IP_SUFFIX_BASE + i))" - HOSTS="$HOSTS $IP" - done -fi - -# these are used by the tests -# shellcheck disable=SC2034 -HOST1=$(echo "$HOSTS" | cut -f 1 -d ' ') -# shellcheck disable=SC2034 -HOST2=$(echo "$HOSTS" | cut -f 2 -d ' ') -# shellcheck disable=SC2034 -HOST3=$(echo "$HOSTS" | cut -f 3 -d ' ') - -# shellcheck disable=SC1090 -. "$DIR/assert.sh" - -SSH_DIR=${SSH_DIR:-$DIR} -SSH=${SSH:-ssh -l vagrant -i \"$SSH_DIR/insecure_private_key\" -o \"UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts\" -o CheckHostIP=no -o StrictHostKeyChecking=no} - -SMALL_IMAGE="alpine" -# shellcheck disable=SC2034 -TEST_IMAGES="$SMALL_IMAGE" - -# shellcheck disable=SC2034 -PING="ping -nq -W 1 -c 1" -DOCKER_PORT=2375 - -remote() { - rem=$1 - shift 1 - "$@" > >(while read -r line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done) -} - -colourise() { - ([ -t 0 ] && echo -ne $'\e['"$1"'m') || true - shift - # It's important that we don't do this in a subshell, as some - # commands we execute need to modify global state - "$@" - ([ -t 0 ] && echo -ne $'\e[0m') || true -} - -whitely() { - colourise '1;37' "$@" -} - -greyly() { - colourise '0;37' "$@" -} - -redly() { - colourise '1;31' "$@" -} - -greenly() { - colourise '1;32' "$@" -} - -run_on() { - host=$1 - shift 1 - [ -z "$DEBUG" ] || greyly echo "Running on $host:" "$@" >&2 - # shellcheck disable=SC2086 - remote "$host" $SSH "$host" "$@" -} - -docker_on() { - host=$1 - shift 1 - [ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT:" "$@" >&2 - docker -H "tcp://$host:$DOCKER_PORT" "$@" -} - -weave_on() { - host=$1 - shift 1 - [ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT:" "$@" >&2 - DOCKER_HOST=tcp://$host:$DOCKER_PORT $WEAVE "$@" -} - -exec_on() { - host=$1 - container=$2 - shift 2 - docker -H "tcp://$host:$DOCKER_PORT" exec "$container" "$@" -} - -rm_containers() { - host=$1 - shift - [ $# -eq 0 ] || docker_on "$host" rm -f "$@" >/dev/null -} - -start_suite() { - for host in $HOSTS; do - [ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave" - # shellcheck disable=SC2046 - rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null) - run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true - weave_on "$host" reset 2>/dev/null - done - whitely echo "$@" -} - -end_suite() { - whitely assert_end -} - -WEAVE=$DIR/../../integration/weave diff --git a/tools/integration/gce.sh b/tools/integration/gce.sh deleted file mode 100755 index 5c394018..00000000 --- a/tools/integration/gce.sh +++ /dev/null @@ -1,202 +0,0 @@ -#!/bin/bash -# This script has a bunch of GCE-related functions: -# ./gce.sh setup - starts two VMs on GCE and configures them to run our integration tests -# . ./gce.sh; ./run_all.sh - set a bunch of environment variables for the tests -# ./gce.sh destroy - tear down the VMs -# ./gce.sh make_template - make a fresh VM template; update TEMPLATE_NAME first! - -set -e - -: "${KEY_FILE:=/tmp/gce_private_key.json}" -: "${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}" -: "${IMAGE_FAMILY:=ubuntu-1404-lts}" -: "${IMAGE_PROJECT:=ubuntu-os-cloud}" -: "${USER_ACCOUNT:=ubuntu}" -: "${ZONE:=us-central1-a}" -: "${PROJECT:=}" -: "${TEMPLATE_NAME:=}" -: "${NUM_HOSTS:=}" - -if [ -z "${PROJECT}" ] || [ -z "${NUM_HOSTS}" ] || [ -z "${TEMPLATE_NAME}" ]; then - echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME" - exit 1 -fi - -SUFFIX="" -if [ -n "$CIRCLECI" ]; then - SUFFIX="-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX" -else - SUFFIX="-${USER}" -fi - -# Setup authentication -gcloud auth activate-service-account --key-file "$KEY_FILE" 1>/dev/null -gcloud config set project "$PROJECT" - -function vm_names() { - local names= - for i in $(seq 1 "$NUM_HOSTS"); do - names=("host$i$SUFFIX" "${names[@]}") - done - echo "${names[@]}" -} - -# Delete all vms in this account -function destroy() { - local names - # shellcheck disable=SC2046 - if [ $(gcloud compute firewall-rules list "test-allow-docker$SUFFIX" 2>/dev/null | wc -l) -gt 0 ]; then - gcloud compute firewall-rules delete "test-allow-docker$SUFFIX" - fi - names="$(vm_names)" - # shellcheck disable=SC2086 - if [ "$(gcloud compute instances list --zones "$ZONE" -q $names | wc -l)" -le 1 ]; then - return 0 - fi - for i in {0..10}; do - # gcloud instances delete can sometimes hang. - case $( - set +e - timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1" - echo $? - ) in - 0) - return 0 - ;; - 124) - # 124 means it timed out - break - ;; - *) - return 1 - ;; - esac - done -} - -function internal_ip() { - jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" "$1" -} - -function external_ip() { - jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" "$1" -} - -function try_connect() { - for i in {0..10}; do - ssh -t "$1" true && return - sleep 2 - done -} - -function install_docker_on() { - name=$1 - echo "Installing Docker on $name for user ${USER_ACCOUNT}" - # shellcheck disable=SC2087 - ssh -t "$name" sudo bash -x -s <> /etc/default/docker; -service docker restart -EOF - # It seems we need a short delay for docker to start up, so I put this in - # a separate ssh connection. This installs nsenter. - ssh -t "$name" sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter -} - -function copy_hosts() { - hostname=$1 - hosts=$2 - ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\"" <"$hosts" -} - -# Create new set of VMs -function setup() { - destroy - - names=($(vm_names)) - gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" --tags "test$SUFFIX" --network=test - my_ip="$(curl -s http://ipinfo.io/ip)" - gcloud compute firewall-rules create "test-allow-docker$SUFFIX" --network=test --allow tcp:2375,tcp:12375,tcp:4040,tcp:80 --target-tags "test$SUFFIX" --source-ranges "$my_ip" - - gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" - sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config - - # build an /etc/hosts file for these vms - hosts=$(mktemp hosts.XXXXXXXXXX) - json=$(mktemp json.XXXXXXXXXX) - gcloud compute instances list --format=json >"$json" - for name in "${names[@]}"; do - echo "$(internal_ip "$json" "$name") $name.$ZONE.$PROJECT" >>"$hosts" - done - - for name in "${names[@]}"; do - hostname="$name.$ZONE.$PROJECT" - - # Add the remote ip to the local /etc/hosts - sudo sed -i "/$hostname/d" /etc/hosts - sudo sh -c "echo \"$(external_ip "$json" "$name") $hostname\" >>/etc/hosts" - try_connect "$hostname" - - copy_hosts "$hostname" "$hosts" & - done - - wait - - rm "$hosts" "$json" -} - -function make_template() { - gcloud compute instances create "$TEMPLATE_NAME" --image-family "$IMAGE_FAMILY" --image-project "$IMAGE_PROJECT" --zone "$ZONE" - gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" - name="$TEMPLATE_NAME.$ZONE.$PROJECT" - try_connect "$name" - install_docker_on "$name" - gcloud -q compute instances delete "$TEMPLATE_NAME" --keep-disks boot --zone "$ZONE" - gcloud compute images create "$TEMPLATE_NAME" --source-disk "$TEMPLATE_NAME" --source-disk-zone "$ZONE" -} - -function hosts() { - hosts= - args= - json=$(mktemp json.XXXXXXXXXX) - gcloud compute instances list --format=json >"$json" - for name in $(vm_names); do - hostname="$name.$ZONE.$PROJECT" - hosts=($hostname "${hosts[@]}") - args=("--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}") - done - echo export SSH=\"ssh -l "${USER_ACCOUNT}"\" - echo "export HOSTS=\"${hosts[*]}\"" - echo "export ADD_HOST_ARGS=\"${args[*]}\"" - rm "$json" -} - -case "$1" in - setup) - setup - ;; - - hosts) - hosts - ;; - - destroy) - destroy - ;; - - make_template) - # see if template exists - if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then - make_template - else - echo "Reusing existing template:" - gcloud compute images describe "$TEMPLATE_NAME" | grep "^creationTimestamp" - fi - ;; -esac diff --git a/tools/integration/run_all.sh b/tools/integration/run_all.sh deleted file mode 100755 index 837e0d49..00000000 --- a/tools/integration/run_all.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -set -ex - -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# shellcheck disable=SC1090 -. "$DIR/config.sh" - -whitely echo Sanity checks -if ! bash "$DIR/sanity_check.sh"; then - whitely echo ...failed - exit 1 -fi -whitely echo ...ok - -# shellcheck disable=SC2068 -TESTS=(${@:-$(find . -name '*_test.sh')}) -RUNNER_ARGS=() - -# If running on circle, use the scheduler to work out what tests to run -if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ]; then - RUNNER_ARGS=("${RUNNER_ARGS[@]}" -scheduler) -fi - -# If running on circle or PARALLEL is not empty, run tests in parallel -if [ -n "$CIRCLECI" ] || [ -n "$PARALLEL" ]; then - RUNNER_ARGS=("${RUNNER_ARGS[@]}" -parallel) -fi - -make -C "${DIR}/../runner" -HOSTS="$HOSTS" "${DIR}/../runner/runner" "${RUNNER_ARGS[@]}" "${TESTS[@]}" diff --git a/tools/integration/sanity_check.sh b/tools/integration/sanity_check.sh deleted file mode 100755 index 192112de..00000000 --- a/tools/integration/sanity_check.sh +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/bash -# shellcheck disable=SC1090,SC1091 -. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/config.sh" - -set -e - -whitely echo Ping each host from the other -for host in $HOSTS; do - for other in $HOSTS; do - [ "$host" = "$other" ] || run_on "$host" "$PING" "$other" - done -done - -whitely echo Check we can reach docker - -for host in $HOSTS; do - echo - echo "Host Version Info: $host" - echo "=====================================" - echo "# docker version" - docker_on "$host" version - echo "# docker info" - docker_on "$host" info - echo "# weave version" - weave_on "$host" version -done diff --git a/tools/lint b/tools/lint deleted file mode 100755 index 63c50661..00000000 --- a/tools/lint +++ /dev/null @@ -1,255 +0,0 @@ -#!/bin/bash -# This scipt lints files for common errors. -# -# For go files, it runs gofmt and go vet, and optionally golint and -# gocyclo, if they are installed. -# -# For shell files, it runs shfmt. If you don't have that installed, you can get -# it with: -# go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt -# -# With no arguments, it lints the current files staged -# for git commit. Or you can pass it explicit filenames -# (or directories) and it will lint them. -# -# To use this script automatically, run: -# ln -s ../../bin/lint .git/hooks/pre-commit - -set -e - -LINT_IGNORE_FILE=${LINT_IGNORE_FILE:-".lintignore"} - -IGNORE_LINT_COMMENT= -IGNORE_SPELLINGS= -PARALLEL= -while true; do - case "$1" in - -nocomment) - IGNORE_LINT_COMMENT=1 - shift 1 - ;; - -notestpackage) - # NOOP, still accepted for backwards compatibility - shift 1 - ;; - -ignorespelling) - IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS" - shift 2 - ;; - -p) - PARALLEL=1 - shift 1 - ;; - *) - break - ;; - esac -done - -spell_check() { - local filename="$1" - local lint_result=0 - - # we don't want to spell check tar balls, binaries, Makefile and json files - if file "$filename" | grep executable >/dev/null 2>&1; then - return $lint_result - fi - if [[ $filename == *".tar" || $filename == *".gz" || $filename == *".json" || $(basename "$filename") == "Makefile" ]]; then - return $lint_result - fi - - # misspell is completely optional. If you don't like it - # don't have it installed. - if ! type misspell >/dev/null 2>&1; then - return $lint_result - fi - - if ! misspell -error -i "$IGNORE_SPELLINGS" "${filename}"; then - lint_result=1 - fi - - return $lint_result -} - -lint_go() { - local filename="$1" - local lint_result=0 - - if [ -n "$(gofmt -s -l "${filename}")" ]; then - lint_result=1 - echo "${filename}: run gofmt -s -w ${filename}" - fi - - go tool vet "${filename}" || lint_result=$? - - # golint is completely optional. If you don't like it - # don't have it installed. - if type golint >/dev/null 2>&1; then - # golint doesn't set an exit code it seems - if [ -z "$IGNORE_LINT_COMMENT" ]; then - lintoutput=$(golint "${filename}") - else - lintoutput=$(golint "${filename}" | grep -vE 'comment|dot imports|ALL_CAPS') - fi - if [ -n "$lintoutput" ]; then - lint_result=1 - echo "$lintoutput" - fi - fi - - # gocyclo is completely optional. If you don't like it - # don't have it installed. Also never blocks a commit, - # it just warns. - if type gocyclo >/dev/null 2>&1; then - gocyclo -over 25 "${filename}" | while read -r line; do - echo "${filename}": higher than 25 cyclomatic complexity - "${line}" - done - fi - - return $lint_result -} - -lint_sh() { - local filename="$1" - local lint_result=0 - - if ! diff -u "${filename}" <(shfmt -i 4 "${filename}"); then - lint_result=1 - echo "${filename}: run shfmt -i 4 -w ${filename}" - fi - - # the shellcheck is completely optional. If you don't like it - # don't have it installed. - if type shellcheck >/dev/null 2>&1; then - shellcheck "${filename}" || lint_result=1 - fi - - return $lint_result -} - -lint_tf() { - local filename="$1" - local lint_result=0 - - if ! diff -u <(hclfmt "${filename}") "${filename}"; then - lint_result=1 - echo "${filename}: run hclfmt -w ${filename}" - fi - - return $lint_result -} - -lint_md() { - local filename="$1" - local lint_result=0 - - for i in '=======' '>>>>>>>'; do - if grep -q "${i}" "${filename}"; then - lint_result=1 - echo "${filename}: bad merge/rebase!" - fi - done - - return $lint_result -} - -lint_py() { - local filename="$1" - local lint_result=0 - - if yapf --diff "${filename}" | grep -qE '^[+-]'; then - lint_result=1 - echo "${filename}: run yapf --in-place ${filename}" - else - # Only run flake8 if yapf passes, since they pick up a lot of similar issues - flake8 "${filename}" || lint_result=1 - fi - - return $lint_result -} - -lint() { - filename="$1" - ext="${filename##*\.}" - local lint_result=0 - - # Don't lint deleted files - if [ ! -f "$filename" ]; then - return - fi - - # Don't lint specific files - case "$(basename "${filename}")" in - static.go) return ;; - coverage.html) return ;; - *.pb.go) return ;; - esac - - if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" == "text/x-shellscript" ]]; then - ext="sh" - fi - - case "$ext" in - go) lint_go "${filename}" || lint_result=1 ;; - sh) lint_sh "${filename}" || lint_result=1 ;; - tf) lint_tf "${filename}" || lint_result=1 ;; - md) lint_md "${filename}" || lint_result=1 ;; - py) lint_py "${filename}" || lint_result=1 ;; - esac - - spell_check "${filename}" || lint_result=1 - - return $lint_result -} - -lint_files() { - local lint_result=0 - while read -r filename; do - lint "${filename}" || lint_result=1 - done - exit $lint_result -} - -matches_any() { - local filename="$1" - local patterns="$2" - while read -r pattern; do - # shellcheck disable=SC2053 - # Use the [[ operator without quotes on $pattern - # in order to "glob" the provided filename: - [[ "$filename" == $pattern ]] && return 0 - done <<<"$patterns" - return 1 -} - -filter_out() { - local patterns_file="$1" - if [ -n "$patterns_file" ] && [ -r "$patterns_file" ]; then - local patterns - patterns=$(sed '/^#.*$/d ; /^\s*$/d' "$patterns_file") # Remove blank lines and comments before we start iterating. - [ -n "$DEBUG" ] && echo >&2 "> Filters:" && echo >&2 "$patterns" - local filtered_out=() - while read -r filename; do - matches_any "$filename" "$patterns" && filtered_out+=("$filename") || echo "$filename" - done - [ -n "$DEBUG" ] && echo >&2 "> Files filtered out (i.e. NOT linted):" && printf >&2 '%s\n' "${filtered_out[@]}" - else - cat # No patterns provided: simply propagate stdin to stdout. - fi -} - -list_files() { - if [ $# -gt 0 ]; then - find "$@" | grep -vE '(^|/)vendor/' - else - git ls-files --exclude-standard | grep -vE '(^|/)vendor/' - fi -} - -if [ $# = 1 ] && [ -f "$1" ]; then - lint "$1" -elif [ -n "$PARALLEL" ]; then - list_files "$@" | filter_out "$LINT_IGNORE_FILE" | xargs -n1 -P16 "$0" -else - list_files "$@" | filter_out "$LINT_IGNORE_FILE" | lint_files -fi diff --git a/tools/provisioning/README.md b/tools/provisioning/README.md deleted file mode 100755 index 627bb42e..00000000 --- a/tools/provisioning/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Weaveworks provisioning - -## Introduction - -This project allows you to get hold of some machine either locally or on one of the below cloud providers: - -* Amazon Web Services -* Digital Ocean -* Google Cloud Platform - -You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. - -## Set up - -* You will need [Vagrant](https://www.vagrantup.com) installed on your machine and added to your `PATH` in order to be able to provision local (virtual) machines automatically. - - * On macOS: `brew install vagrant` - * On Linux (via Aptitude): `sudo apt install vagrant` - * If you need a specific version: - - curl -fsS https://releases.hashicorp.com/terraform/x.y.z/terraform_x.y.z_linux_amd64.zip | gunzip > terraform && chmod +x terraform && sudo mv terraform /usr/bin - - * For other platforms or more details, see [here](https://www.vagrantup.com/docs/installation/) - -* You will need [Terraform](https://www.terraform.io) installed on your machine and added to your `PATH` in order to be able to provision cloud-hosted machines automatically. - - * On macOS: `brew install terraform` - * On Linux (via Aptitude): `sudo apt install terraform` - * For other platforms or more details, see [here](https://www.terraform.io/intro/getting-started/install.html) - -* Depending on the cloud provider, you may have to create an account, manually onboard, create and register SSH keys, etc. - Please refer to the `README.md` in each sub-folder for more details. - -## Usage in scripts - -Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either: - -* `gcp_on` / `gcp_off` -* `do_on` / `do_off` -* `aws_on` / `aws_off` - -## Usage in shell - -Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either: - -* `gcp_on` / `gcp_off` -* `do_on` / `do_off` -* `aws_on` / `aws_off` - -Indeed, the functions defined in `setup.sh` are also exported as aliases, so you can call them from your shell directly. - -Other aliases are also defined, in order to make your life easier: - -* `tf_ssh`: to ease SSH-ing into the virtual machines, reading the username and IP address to use from Terraform, as well as setting default SSH options. -* `tf_ansi`: to ease applying an Ansible playbook to a set of virtual machines, dynamically creating the inventory, as well as setting default SSH options. diff --git a/tools/provisioning/aws/README.md b/tools/provisioning/aws/README.md deleted file mode 100644 index f4f018f9..00000000 --- a/tools/provisioning/aws/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# Amazon Web Services - -## Introduction - -This project allows you to get hold of some machine on Amazon Web Services. -You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. - -## Setup - -* Log in [weaveworks.signin.aws.amazon.com/console](https://weaveworks.signin.aws.amazon.com/console/) with your account. - -* Go to `Services` > `IAM` > `Users` > Click on your username > `Security credentials` > `Create access key`. - Your access key and secret key will appear on the screen. Set these as environment variables: - -``` -export AWS_ACCESS_KEY_ID= -export AWS_SECRET_ACCESS_KEY= -``` - -* Go to `Services` > `EC2` > Select the availability zone you want to use (see top right corner, e.g. `us-east-1`) > `Import Key Pair`. - Enter your SSH public key and the name for it, and click `Import`. - Set the path to your private key as an environment variable: - -``` -export TF_VAR_aws_public_key_name= -export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" -``` - -* Set your current IP address as an environment variable: - -``` -export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) -``` - - or pass it as a Terraform variable: - -``` -$ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)' -``` - -### Bash aliases - -You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: - -``` -function _aws_on() { - export AWS_ACCESS_KEY_ID="" # Replace with appropriate value. - export AWS_SECRET_ACCESS_KEY="" # Replace with appropriate value. - export TF_VAR_aws_public_key_name="" # Replace with appropriate value. - export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value. -} -alias _aws_on='_aws_on' -function _aws_off() { - unset AWS_ACCESS_KEY_ID - unset AWS_SECRET_ACCESS_KEY - unset TF_VAR_aws_public_key_name - unset TF_VAR_aws_private_key_path -} -alias _aws_off='_aws_off' -``` - -N.B.: - -* sourcing `../setup.sh` defines aliases called `aws_on` and `aws_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above); -* `../setup.sh`'s `aws_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information. - -## Usage - -* Create the machine: `terraform apply` -* Show the machine's status: `terraform show` -* Stop and destroy the machine: `terraform destroy` -* SSH into the newly-created machine: - -``` -$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips` -# N.B.: the default username will differ depending on the AMI/OS you installed, e.g. ubuntu for Ubuntu, ec2-user for Red Hat, etc. -``` - -or - -``` -source ../setup.sh -tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned. -``` - -## Resources - -* [https://www.terraform.io/docs/providers/aws/](https://www.terraform.io/docs/providers/aws/) -* [https://www.terraform.io/docs/providers/aws/r/instance.html](https://www.terraform.io/docs/providers/aws/r/instance.html) -* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html) diff --git a/tools/provisioning/aws/main.tf b/tools/provisioning/aws/main.tf deleted file mode 100755 index f4be8c34..00000000 --- a/tools/provisioning/aws/main.tf +++ /dev/null @@ -1,137 +0,0 @@ -# Specify the provider and access details -provider "aws" { - # Access key, secret key and region are sourced from environment variables or input arguments -- see README.md - region = "${var.aws_dc}" -} - -resource "aws_security_group" "allow_ssh" { - name = "${var.name}_allow_ssh" - description = "AWS security group to allow SSH-ing onto AWS EC2 instances (created using Terraform)." - - # Open TCP port for SSH: - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["${var.client_ip}/32"] - } - - tags { - Name = "${var.name}_allow_ssh" - App = "${var.app}" - CreatedBy = "terraform" - } -} - -resource "aws_security_group" "allow_docker" { - name = "${var.name}_allow_docker" - description = "AWS security group to allow communication with Docker on AWS EC2 instances (created using Terraform)." - - # Open TCP port for Docker: - ingress { - from_port = 2375 - to_port = 2375 - protocol = "tcp" - cidr_blocks = ["${var.client_ip}/32"] - } - - tags { - Name = "${var.name}_allow_docker" - App = "${var.app}" - CreatedBy = "terraform" - } -} - -resource "aws_security_group" "allow_weave" { - name = "${var.name}_allow_weave" - description = "AWS security group to allow communication with Weave on AWS EC2 instances (created using Terraform)." - - # Open TCP port for Weave: - ingress { - from_port = 12375 - to_port = 12375 - protocol = "tcp" - cidr_blocks = ["${var.client_ip}/32"] - } - - tags { - Name = "${var.name}_allow_weave" - App = "${var.app}" - CreatedBy = "terraform" - } -} - -resource "aws_security_group" "allow_private_ingress" { - name = "${var.name}_allow_private_ingress" - description = "AWS security group to allow all private ingress traffic on AWS EC2 instances (created using Terraform)." - - # Full inbound local network access on both TCP and UDP - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["${var.aws_vpc_cidr_block}"] - } - - tags { - Name = "${var.name}_allow_private_ingress" - App = "${var.app}" - CreatedBy = "terraform" - } -} - -resource "aws_security_group" "allow_all_egress" { - name = "${var.name}_allow_all_egress" - description = "AWS security group to allow all egress traffic on AWS EC2 instances (created using Terraform)." - - # Full outbound internet access on both TCP and UDP - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - Name = "${var.name}_allow_all_egress" - App = "${var.app}" - CreatedBy = "terraform" - } -} - -resource "aws_instance" "tf_test_vm" { - instance_type = "${var.aws_size}" - count = "${var.num_hosts}" - - # Lookup the correct AMI based on the region we specified - ami = "${lookup(var.aws_amis, var.aws_dc)}" - - key_name = "${var.aws_public_key_name}" - - security_groups = [ - "${aws_security_group.allow_ssh.name}", - "${aws_security_group.allow_docker.name}", - "${aws_security_group.allow_weave.name}", - "${aws_security_group.allow_private_ingress.name}", - "${aws_security_group.allow_all_egress.name}", - ] - - # Wait for machine to be SSH-able: - provisioner "remote-exec" { - inline = ["exit"] - - connection { - type = "ssh" - - # Lookup the correct username based on the AMI we specified - user = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}" - private_key = "${file("${var.aws_private_key_path}")}" - } - } - - tags { - Name = "${var.name}-${count.index}" - App = "${var.app}" - CreatedBy = "terraform" - } -} diff --git a/tools/provisioning/aws/outputs.tf b/tools/provisioning/aws/outputs.tf deleted file mode 100755 index 587986b8..00000000 --- a/tools/provisioning/aws/outputs.tf +++ /dev/null @@ -1,54 +0,0 @@ -output "username" { - value = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}" -} - -output "public_ips" { - value = ["${aws_instance.tf_test_vm.*.public_ip}"] -} - -output "hostnames" { - value = "${join("\n", - "${formatlist("%v.%v.%v", - aws_instance.tf_test_vm.*.tags.Name, - aws_instance.tf_test_vm.*.availability_zone, - var.app - )}" - )}" -} - -# /etc/hosts file for the Droplets: -output "private_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - aws_instance.tf_test_vm.*.private_ip, - aws_instance.tf_test_vm.*.tags.Name, - aws_instance.tf_test_vm.*.availability_zone, - var.app - )}" - )}" -} - -# /etc/hosts file for the client: -output "public_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - aws_instance.tf_test_vm.*.public_ip, - aws_instance.tf_test_vm.*.tags.Name, - aws_instance.tf_test_vm.*.availability_zone, - var.app - )}" - )}" -} - -output "ansible_inventory" { - value = "${format("[all]\n%s", join("\n", - "${formatlist("%v private_ip=%v", - aws_instance.tf_test_vm.*.public_ip, - aws_instance.tf_test_vm.*.private_ip, - )}" - ))}" -} - -output "private_key_path" { - value = "${var.aws_private_key_path}" -} diff --git a/tools/provisioning/aws/variables.tf b/tools/provisioning/aws/variables.tf deleted file mode 100755 index 5f4b4628..00000000 --- a/tools/provisioning/aws/variables.tf +++ /dev/null @@ -1,68 +0,0 @@ -variable "client_ip" { - description = "IP address of the client machine" -} - -variable "app" { - description = "Name of the application using the created EC2 instance(s)." - default = "default" -} - -variable "name" { - description = "Name of the EC2 instance(s)." - default = "test" -} - -variable "num_hosts" { - description = "Number of EC2 instance(s)." - default = 1 -} - -variable "aws_vpc_cidr_block" { - description = "AWS VPC CIDR block to use to attribute private IP addresses." - default = "172.31.0.0/16" -} - -variable "aws_public_key_name" { - description = "Name of the SSH keypair to use in AWS." -} - -variable "aws_private_key_path" { - description = "Path to file containing private key" - default = "~/.ssh/id_rsa" -} - -variable "aws_dc" { - description = "The AWS region to create things in." - default = "us-east-1" -} - -variable "aws_amis" { - default = { - # Ubuntu Server 16.04 LTS (HVM), SSD Volume Type: - "us-east-1" = "ami-40d28157" - "eu-west-2" = "ami-23d0da47" - - # Red Hat Enterprise Linux 7.3 (HVM), SSD Volume Type: - - #"us-east-1" = "ami-b63769a1" - - # CentOS 7 (x86_64) - with Updates HVM - - #"us-east-1" = "ami-6d1c2007" - } -} - -variable "aws_usernames" { - description = "User to SSH as into the AWS instance." - - default = { - "ami-40d28157" = "ubuntu" # Ubuntu Server 16.04 LTS (HVM) - "ami-b63769a1" = "ec2-user" # Red Hat Enterprise Linux 7.3 (HVM) - "ami-6d1c2007" = "centos" # CentOS 7 (x86_64) - with Updates HVM - } -} - -variable "aws_size" { - description = "AWS' selected machine size" - default = "t2.medium" # Instance with 2 cores & 4 GB memory -} diff --git a/tools/provisioning/do/README.md b/tools/provisioning/do/README.md deleted file mode 100755 index d958f18d..00000000 --- a/tools/provisioning/do/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Digital Ocean - -## Introduction - -This project allows you to get hold of some machine on Digital Ocean. -You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. - -## Setup - -* Log in [cloud.digitalocean.com](https://cloud.digitalocean.com) with your account. - -* Go to `Settings` > `Security` > `SSH keys` > `Add SSH Key`. - Enter your SSH public key and the name for it, and click `Add SSH Key`. - Set the path to your private key as an environment variable: - -``` -export DIGITALOCEAN_SSH_KEY_NAME= -export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" -``` - -* Go to `API` > `Tokens` > `Personal access tokens` > `Generate New Token` - Enter your token name and click `Generate Token` to get your 64-characters-long API token. - Set these as environment variables: - -``` -export DIGITALOCEAN_TOKEN_NAME="" -export DIGITALOCEAN_TOKEN= -``` - -* Run the following command to get the Digital Ocean ID for your SSH public key (e.g. `1234567`) and set it as an environment variable: - -``` -$ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" \ --H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" \ -| jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id') -``` - - or pass it as a Terraform variable: - -``` -$ terraform \ --var 'do_private_key_path=' \ --var 'do_public_key_id=' -``` - -### Bash aliases - -You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: - -``` -function _do_on() { - export DIGITALOCEAN_TOKEN_NAME="" # Replace with appropriate value. - export DIGITALOCEAN_TOKEN= # Replace with appropriate value. - export DIGITALOCEAN_SSH_KEY_NAME="" # Replace with appropriate value. - export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value. - export TF_VAR_do_public_key_path="$HOME/.ssh/id_rsa.pub" # Replace with appropriate value. - export TF_VAR_do_public_key_id= # Replace with appropriate value. -} -alias _do_on='_do_on' -function _do_off() { - unset DIGITALOCEAN_TOKEN_NAME - unset DIGITALOCEAN_TOKEN - unset DIGITALOCEAN_SSH_KEY_NAME - unset TF_VAR_do_private_key_path - unset TF_VAR_do_public_key_path - unset TF_VAR_do_public_key_id -} -alias _do_off='_do_off' -``` - -N.B.: - -* sourcing `../setup.sh` defines aliases called `do_on` and `do_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above); -* `../setup.sh`'s `do_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information. - -## Usage - -* Create the machine: `terraform apply` -* Show the machine's status: `terraform show` -* Stop and destroy the machine: `terraform destroy` -* SSH into the newly-created machine: - -``` -$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips` -``` - -or - -``` -source ../setup.sh -tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned. -``` - -## Resources - -* [https://www.terraform.io/docs/providers/do/](https://www.terraform.io/docs/providers/do/) -* [https://www.terraform.io/docs/providers/do/r/droplet.html](https://www.terraform.io/docs/providers/do/r/droplet.html) -* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html) diff --git a/tools/provisioning/do/main.tf b/tools/provisioning/do/main.tf deleted file mode 100755 index 834cdf7d..00000000 --- a/tools/provisioning/do/main.tf +++ /dev/null @@ -1,42 +0,0 @@ -provider "digitalocean" { - # See README.md for setup instructions. -} - -# Tags to label and organize droplets: -resource "digitalocean_tag" "name" { - name = "${var.name}" -} - -resource "digitalocean_tag" "app" { - name = "${var.app}" -} - -resource "digitalocean_tag" "terraform" { - name = "terraform" -} - -resource "digitalocean_droplet" "tf_test_vm" { - ssh_keys = ["${var.do_public_key_id}"] - image = "${var.do_os}" - region = "${var.do_dc}" - size = "${var.do_size}" - name = "${var.name}-${count.index}" - count = "${var.num_hosts}" - - tags = [ - "${var.app}", - "${var.name}", - "terraform", - ] - - # Wait for machine to be SSH-able: - provisioner "remote-exec" { - inline = ["exit"] - - connection { - type = "ssh" - user = "${var.do_username}" - private_key = "${file("${var.do_private_key_path}")}" - } - } -} diff --git a/tools/provisioning/do/outputs.tf b/tools/provisioning/do/outputs.tf deleted file mode 100755 index 5f0ff455..00000000 --- a/tools/provisioning/do/outputs.tf +++ /dev/null @@ -1,57 +0,0 @@ -output "username" { - value = "${var.do_username}" -} - -output "public_ips" { - value = ["${digitalocean_droplet.tf_test_vm.*.ipv4_address}"] -} - -output "hostnames" { - value = "${join("\n", - "${formatlist("%v.%v.%v", - digitalocean_droplet.tf_test_vm.*.name, - digitalocean_droplet.tf_test_vm.*.region, - var.app - )}" - )}" -} - -# /etc/hosts file for the Droplets: -# N.B.: by default Digital Ocean droplets only have public IPs, but in order to -# be consistent with other providers' recipes, we provide an output to generate -# an /etc/hosts file on the Droplets, even though it is using public IPs only. -output "private_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - digitalocean_droplet.tf_test_vm.*.ipv4_address, - digitalocean_droplet.tf_test_vm.*.name, - digitalocean_droplet.tf_test_vm.*.region, - var.app - )}" - )}" -} - -# /etc/hosts file for the client: -output "public_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - digitalocean_droplet.tf_test_vm.*.ipv4_address, - digitalocean_droplet.tf_test_vm.*.name, - digitalocean_droplet.tf_test_vm.*.region, - var.app - )}" - )}" -} - -output "ansible_inventory" { - value = "${format("[all]\n%s", join("\n", - "${formatlist("%v private_ip=%v", - digitalocean_droplet.tf_test_vm.*.ipv4_address, - digitalocean_droplet.tf_test_vm.*.ipv4_address - )}" - ))}" -} - -output "private_key_path" { - value = "${var.do_private_key_path}" -} diff --git a/tools/provisioning/do/variables.tf b/tools/provisioning/do/variables.tf deleted file mode 100755 index 6f7f40ed..00000000 --- a/tools/provisioning/do/variables.tf +++ /dev/null @@ -1,185 +0,0 @@ -variable "client_ip" { - description = "IP address of the client machine" -} - -variable "app" { - description = "Name of the application using the created droplet(s)." - default = "default" -} - -variable "name" { - description = "Name of the droplet(s)." - default = "test" -} - -variable "num_hosts" { - description = "Number of droplet(s)." - default = 1 -} - -variable "do_private_key_path" { - description = "Digital Ocean SSH private key path" - default = "~/.ssh/id_rsa" -} - -variable "do_public_key_id" { - description = "Digital Ocean ID for your SSH public key" - - # You can retrieve it and set it as an environment variable this way: - - # $ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" | jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id') -} - -variable "do_username" { - description = "Digital Ocean SSH username" - default = "root" -} - -variable "do_os" { - description = "Digital Ocean OS" - default = "ubuntu-16-04-x64" -} - -# curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/images?page=1&per_page=999999" | jq ".images | .[] | .slug" | grep -P "ubuntu|coreos|centos" | grep -v alpha | grep -v beta -# "ubuntu-16-04-x32" -# "ubuntu-16-04-x64" -# "ubuntu-16-10-x32" -# "ubuntu-16-10-x64" -# "ubuntu-14-04-x32" -# "ubuntu-14-04-x64" -# "ubuntu-12-04-x64" -# "ubuntu-12-04-x32" -# "coreos-stable" -# "centos-6-5-x32" -# "centos-6-5-x64" -# "centos-7-0-x64" -# "centos-7-x64" -# "centos-6-x64" -# "centos-6-x32" -# "centos-5-x64" -# "centos-5-x32" - -# Digital Ocean datacenters -# See also: -# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/regions" | jq -c ".regions | .[] | .slug" | sort -u - -variable "do_dc_ams2" { - description = "Digital Ocean Amsterdam Datacenter 2" - default = "ams2" -} - -variable "do_dc_ams3" { - description = "Digital Ocean Amsterdam Datacenter 3" - default = "ams3" -} - -variable "do_dc_blr1" { - description = "Digital Ocean Bangalore Datacenter 1" - default = "blr1" -} - -variable "do_dc_fra1" { - description = "Digital Ocean Frankfurt Datacenter 1" - default = "fra1" -} - -variable "do_dc_lon1" { - description = "Digital Ocean London Datacenter 1" - default = "lon1" -} - -variable "do_dc_nyc1" { - description = "Digital Ocean New York Datacenter 1" - default = "nyc1" -} - -variable "do_dc_nyc2" { - description = "Digital Ocean New York Datacenter 2" - default = "nyc2" -} - -variable "do_dc_nyc3" { - description = "Digital Ocean New York Datacenter 3" - default = "nyc3" -} - -variable "do_dc_sfo1" { - description = "Digital Ocean San Francisco Datacenter 1" - default = "sfo1" -} - -variable "do_dc_sfo2" { - description = "Digital Ocean San Francisco Datacenter 2" - default = "sfo2" -} - -variable "do_dc_sgp1" { - description = "Digital Ocean Singapore Datacenter 1" - default = "sgp1" -} - -variable "do_dc_tor1" { - description = "Digital Ocean Toronto Datacenter 1" - default = "tor1" -} - -variable "do_dc" { - description = "Digital Ocean's selected datacenter" - default = "lon1" -} - -variable "do_size" { - description = "Digital Ocean's selected machine size" - default = "4gb" -} - -# Digital Ocean sizes - - -# See also: - - -# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/sizes" | jq -c ".sizes | .[] | .slug" - - -# "512mb" - - -# "1gb" - - -# "2gb" - - -# "4gb" - - -# "8gb" - - -# "16gb" - - -# "m-16gb" - - -# "32gb" - - -# "m-32gb" - - -# "48gb" - - -# "m-64gb" - - -# "64gb" - - -# "m-128gb" - - -# "m-224gb" - diff --git a/tools/provisioning/gcp/README.md b/tools/provisioning/gcp/README.md deleted file mode 100755 index b2d6622c..00000000 --- a/tools/provisioning/gcp/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# Google Cloud Platform - -## Introduction - -This project allows you to get hold of some machine on Google Cloud Platform. -You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. - -## Setup - -* Log in [console.cloud.google.com](https://console.cloud.google.com) with your Google account. - -* Go to `API Manager` > `Credentials` > `Create credentials` > `Service account key`, - in `Service account`, select `Compute Engine default service account`, - in `Key type`, select `JSON`, and then click `Create`. - -* This will download a JSON file to your machine. Place this file wherever you want and then create the following environment variables: - -``` -$ export GOOGLE_CREDENTIALS_FILE="path/to/your.json" -$ export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE") -``` - -* Go to `Compute Engine` > `Metadata` > `SSH keys` and add your username and SSH public key; - or - set it up using `gcloud compute project-info add-metadata --metadata-from-file sshKeys=~/.ssh/id_rsa.pub`. - If you used your default SSH key (i.e. `~/.ssh/id_rsa.pub`), then you do not have anything to do. - Otherwise, you will have to either define the below environment variable: - -``` -$ export TF_VAR_gcp_public_key_path= -$ export TF_VAR_gcp_private_key_path= -``` - - or to pass these as Terraform variables: - -``` -$ terraform \ --var 'gcp_public_key_path=' \ --var 'gcp_private_key_path=' -``` - -* Set the username in your public key as an environment variable. - This will be used as the username of the Linux account created on the machine, which you will need to SSH into it later on. - - N.B.: - * GCP already has the username set from the SSH public key you uploaded in the previous step. - * If your username is an email address, e.g. `name@domain.com`, then GCP uses `name` as the username. - -``` -export TF_VAR_gcp_username= -``` - -* Set your current IP address as an environment variable: - -``` -export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) -``` - - or pass it as a Terraform variable: - -``` -$ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)' -``` - -* Set your project as an environment variable: - -``` -export TF_VAR_gcp_project=weave-net-tests -``` - - or pass it as a Terraform variable: - -``` -$ terraform -var 'gcp_project=weave-net-tests' -``` - -### Bash aliases - -You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: - -``` -function _gcp_on() { - export GOOGLE_CREDENTIALS_FILE="&authuser=1 - region = "${var.gcp_region}" - - project = "${var.gcp_project}" -} - -resource "google_compute_instance" "tf_test_vm" { - name = "${var.name}-${count.index}" - machine_type = "${var.gcp_size}" - zone = "${var.gcp_zone}" - count = "${var.num_hosts}" - - disk { - image = "${var.gcp_image}" - } - - tags = [ - "${var.app}", - "${var.name}", - "terraform", - ] - - network_interface { - network = "${var.gcp_network}" - - access_config { - // Ephemeral IP - } - } - - metadata { - ssh-keys = "${var.gcp_username}:${file("${var.gcp_public_key_path}")}" - } - - # Wait for machine to be SSH-able: - provisioner "remote-exec" { - inline = ["exit"] - - connection { - type = "ssh" - user = "${var.gcp_username}" - private_key = "${file("${var.gcp_private_key_path}")}" - } - } -} - -resource "google_compute_firewall" "fw-allow-docker-and-weave" { - name = "${var.name}-allow-docker-and-weave" - network = "${var.gcp_network}" - target_tags = ["${var.name}"] - - allow { - protocol = "tcp" - ports = ["2375", "12375"] - } - - source_ranges = ["${var.client_ip}"] -} - -# Required for FastDP crypto in Weave Net: -resource "google_compute_firewall" "fw-allow-esp" { - name = "${var.name}-allow-esp" - network = "${var.gcp_network}" - target_tags = ["${var.name}"] - - allow { - protocol = "esp" - } - - source_ranges = ["${var.gcp_network_global_cidr}"] -} diff --git a/tools/provisioning/gcp/outputs.tf b/tools/provisioning/gcp/outputs.tf deleted file mode 100755 index 9aa1e33e..00000000 --- a/tools/provisioning/gcp/outputs.tf +++ /dev/null @@ -1,66 +0,0 @@ -output "username" { - value = "${var.gcp_username}" -} - -output "public_ips" { - value = ["${google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip}"] -} - -output "hostnames" { - value = "${join("\n", - "${formatlist("%v.%v.%v", - google_compute_instance.tf_test_vm.*.name, - google_compute_instance.tf_test_vm.*.zone, - var.app - )}" - )}" -} - -# /etc/hosts file for the Compute Engine instances: -output "private_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - google_compute_instance.tf_test_vm.*.network_interface.0.address, - google_compute_instance.tf_test_vm.*.name, - google_compute_instance.tf_test_vm.*.zone, - var.app - )}" - )}" -} - -# /etc/hosts file for the client: -output "public_etc_hosts" { - value = "${join("\n", - "${formatlist("%v %v.%v.%v", - google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip, - google_compute_instance.tf_test_vm.*.name, - google_compute_instance.tf_test_vm.*.zone, - var.app - )}" - )}" -} - -output "ansible_inventory" { - value = "${format("[all]\n%s", join("\n", - "${formatlist("%v private_ip=%v", - google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip, - google_compute_instance.tf_test_vm.*.network_interface.0.address - )}" - ))}" -} - -output "private_key_path" { - value = "${var.gcp_private_key_path}" -} - -output "instances_names" { - value = ["${google_compute_instance.tf_test_vm.*.name}"] -} - -output "image" { - value = "${var.gcp_image}" -} - -output "zone" { - value = "${var.gcp_zone}" -} diff --git a/tools/provisioning/gcp/variables.tf b/tools/provisioning/gcp/variables.tf deleted file mode 100755 index 6b2027b2..00000000 --- a/tools/provisioning/gcp/variables.tf +++ /dev/null @@ -1,77 +0,0 @@ -variable "gcp_username" { - description = "Google Cloud Platform SSH username" -} - -variable "app" { - description = "Name of the application using the created Compute Engine instance(s)." - default = "default" -} - -variable "name" { - description = "Name of the Compute Engine instance(s)." - default = "test" -} - -variable "num_hosts" { - description = "Number of Compute Engine instance(s)." - default = 1 -} - -variable "client_ip" { - description = "IP address of the client machine" -} - -variable "gcp_public_key_path" { - description = "Path to file containing public key" - default = "~/.ssh/id_rsa.pub" -} - -variable "gcp_private_key_path" { - description = "Path to file containing private key" - default = "~/.ssh/id_rsa" -} - -variable "gcp_project" { - description = "Google Cloud Platform project" - default = "weave-net-tests" -} - -variable "gcp_image" { - # See also: https://cloud.google.com/compute/docs/images - # For example: - # - "ubuntu-os-cloud/ubuntu-1604-lts" - # - "debian-cloud/debian-8" - # - "centos-cloud/centos-7" - # - "rhel-cloud/rhel7" - description = "Google Cloud Platform OS" - - default = "ubuntu-os-cloud/ubuntu-1604-lts" -} - -variable "gcp_size" { - # See also: - # $ gcloud compute machine-types list - description = "Google Cloud Platform's selected machine size" - - default = "n1-standard-1" -} - -variable "gcp_region" { - description = "Google Cloud Platform's selected region" - default = "us-central1" -} - -variable "gcp_zone" { - description = "Google Cloud Platform's selected zone" - default = "us-central1-a" -} - -variable "gcp_network" { - description = "Google Cloud Platform's selected network" - default = "test" -} - -variable "gcp_network_global_cidr" { - description = "CIDR covering all regions for the selected Google Cloud Platform network" - default = "10.128.0.0/9" -} diff --git a/tools/provisioning/setup.sh b/tools/provisioning/setup.sh deleted file mode 100755 index 456878e0..00000000 --- a/tools/provisioning/setup.sh +++ /dev/null @@ -1,361 +0,0 @@ -#!/bin/bash -# -# Description: -# Helper functions to programmatically provision (e.g. for CIT). -# Aliases on these functions are also created so that this script can be -# sourced in your shell, in your ~/.bashrc file, etc. and directly called. -# -# Usage: -# Source this file and call the relevant functions. -# - -function ssh_public_key() { - echo -e "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZBgLQts30PYXEMJnCU21QC+1ZE0Sv/Ry48Au3nYXn1KNoW/7C2qQ3KO2ZnpZRHCstFiU8QIlB9edi0cgcAoDWBkCiFBZEORxMvohWtrRQzf+x59o48lVjA/Fn7G+9hmavhLaDf6Qe7OhH8XUshNtnIQIUvNEWXKE75k32wUbuF8ibhJNpOOYKL4tVXK6IIKg6jR88BwGKPY/NZCl/HbhjnDJY0zCU1pZSprN6o/S953y/XXVozkh1772fCNeu4USfbt0oZOEJ57j6EWwEYIJhoeAEMAoD8ELt/bc/5iex8cuarM4Uib2JHO6WPWbBQ0NlrARIOKLrxkjjfGWarOLWBAgvwQn5zLg1pKb7aI4+jbA+ZSrII5B2HuYE9MDlU8NPL4pHrRfapGLkG/Fe9zNPvScXh+9iSWfD6G5ZoISutjiJO/iVYN0QSuj9QEIj9tl20czFz3Dhnq4sPPl5hoLunyQfajY7C/ipv6ilJyrEc0V6Z9FdPhpEI+HOgJr2vDQTFscQuyfWuzGJDZf6zPdZWo2pBql9E7piARuNAjakylGar/ebkCgfy28XQoDbDT0P0VYp+E8W5EYacx+zc5MuNhRTvbsO12fydT8V61MtA78wM/b0059feph+0zTykEHk670mYVoE3erZX+U1/BVBLSV9QzopO6/Pgx2ryriJfQ== weaveworks-cit" -} - -function decrypt() { - if [ -z "$1" ]; then - echo >&2 "Failed to decode and decrypt $2: no secret key was provided." - return 1 - fi - echo "$3" | openssl base64 -d | openssl enc -d -aes256 -pass "pass:$1" -} - -function ssh_private_key() { - # The private key has been AES256-encrypted and then Base64-encoded using the following command: - # $ openssl enc -in /tmp/weaveworks_cit_id_rsa -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks_cit_id_rsa.aes.b64 - # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. - # N.B.: Ask the password to Marc, or otherwise re-generate the SSH key using: - # $ ssh-keygen -t rsa -b 4096 -C "weaveworks-cit" - decrypt "$1" "SSH private key" "$( - cat <&2 "Failed to decode and decrypt SSH private key: no secret key was provided." - return 1 - fi - local ssh_private_key_path="$HOME/.ssh/weaveworks_cit_id_rsa" - [ -e "$ssh_private_key_path" ] && rm -f "$ssh_private_key_path" - ssh_private_key "$1" >"$ssh_private_key_path" - chmod 400 "$ssh_private_key_path" - echo "$ssh_private_key_path" -} - -function gcp_credentials() { - # The below GCP service account JSON credentials have been AES256-encrypted and then Base64-encoded using the following command: - # $ openssl enc -in ~/.ssh/weaveworks-cit.json -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks-cit.json.aes.b64 - # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. - # N.B.: Ask the password to Marc, or otherwise re-generate the credentials for GCP, as per ../tools/provisioning/gcp/README.md. - decrypt "$1" "JSON credentials" "$( - cat <&2 "Failed to configure for Digital Ocean: no value for the SECRET_KEY environment variable." - return 1 - fi - - # SSH public key: - export TF_VAR_do_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub" - ssh_public_key >"$TF_VAR_do_public_key_path" - export DIGITALOCEAN_SSH_KEY_NAME="weaveworks-cit" - export TF_VAR_do_public_key_id=5228799 - - # SSH private key: - export TF_VAR_do_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") - - # API token: - # The below Digital Ocean token has been AES256-encrypted and then Base64-encoded using the following command: - # $ openssl enc -in /tmp/digital_ocean_token.txt -e -aes256 -pass stdin | openssl base64 > /tmp/digital_ocean_token.txt.aes.b64 - # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. - # N.B.: Ask the password to Marc, or otherwise re-generate the token for Digital Ocean, as per ../tools/provisioning/do/README.md. - export DIGITALOCEAN_TOKEN=$(decrypt "$SECRET_KEY" "Digital Ocean token" "U2FsdGVkX1/Gq5Rj9dDDraME8xK30JOyJ9dhfQzPBaaePJHqDPIG6of71DdJW0UyFUyRtbRflCPaZ8Um1pDJpU5LoNWQk4uCApC8+xciltT73uQtttLBG8FqgFBvYIHS") - export DIGITALOCEAN_TOKEN_NAME="weaveworks-cit" - export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) -} -alias do_on='do_on' - -function do_off() { - unset TF_VAR_do_public_key_path - unset DIGITALOCEAN_SSH_KEY_NAME - unset TF_VAR_do_public_key_id - unset TF_VAR_do_private_key_path - unset DIGITALOCEAN_TOKEN - unset DIGITALOCEAN_TOKEN_NAME - unset TF_VAR_client_ip -} -alias do_off='do_off' - -# shellcheck disable=2155 -function gcp_on() { - # Set up everything required to run tests on GCP. - # Steps from ../tools/provisioning/gcp/README.md have been followed. - # All sensitive files have been encrypted, see respective functions. - if [ -z "$SECRET_KEY" ]; then - echo >&2 "Failed to configure for Google Cloud Platform: no value for the SECRET_KEY environment variable." - return 1 - fi - - # SSH public key and SSH username: - export TF_VAR_gcp_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub" - ssh_public_key >"$TF_VAR_gcp_public_key_path" - export TF_VAR_gcp_username=$(cut -d' ' -f3 "$TF_VAR_gcp_public_key_path" | cut -d'@' -f1) - - # SSH private key: - export TF_VAR_gcp_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") - - # JSON credentials: - export GOOGLE_CREDENTIALS_FILE="$HOME/.ssh/weaveworks-cit.json" - [ -e "$GOOGLE_CREDENTIALS_FILE" ] && rm -f "$GOOGLE_CREDENTIALS_FILE" - gcp_credentials "$SECRET_KEY" >"$GOOGLE_CREDENTIALS_FILE" - chmod 400 "$GOOGLE_CREDENTIALS_FILE" - export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE") - - export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) - export TF_VAR_gcp_project="${PROJECT:-"weave-net-tests"}" - # shellcheck disable=2015 - [ -z "$PROJECT" ] && echo >&2 "WARNING: no value provided for PROJECT environment variable: defaulted it to $TF_VAR_gcp_project." || true -} -alias gcp_on='gcp_on' - -function gcp_off() { - unset TF_VAR_gcp_public_key_path - unset TF_VAR_gcp_username - unset TF_VAR_gcp_private_key_path - unset GOOGLE_CREDENTIALS_FILE - unset GOOGLE_CREDENTIALS - unset TF_VAR_client_ip - unset TF_VAR_gcp_project -} -alias gcp_off='gcp_off' - -# shellcheck disable=2155 -function aws_on() { - # Set up everything required to run tests on Amazon Web Services. - # Steps from ../tools/provisioning/aws/README.md have been followed. - # All sensitive files have been encrypted, see respective functions. - if [ -z "$SECRET_KEY" ]; then - echo >&2 "Failed to configure for Amazon Web Services: no value for the SECRET_KEY environment variable." - return 1 - fi - - # SSH public key: - export TF_VAR_aws_public_key_name="weaveworks_cit_id_rsa" - - # SSH private key: - export TF_VAR_aws_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") - - # The below AWS access key ID and secret access key have been AES256-encrypted and then Base64-encoded using the following commands: - # $ openssl enc -in /tmp/aws_access_key_id.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_access_key_id.txt.aes.b64 - # $ openssl enc -in /tmp/aws_secret_access_key.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_secret_access_key.txt.aes.b64 - # The below commands do the reverse, i.e. base64-decode and AES-decrypt the encrypted and encoded strings, and print it to stdout. - # N.B.: Ask the password to Marc, or otherwise re-generate the AWS access key ID and secret access key, as per ../tools/provisioning/aws/README.md. - export AWS_ACCESS_KEY_ID="$(decrypt "$SECRET_KEY" "AWS access key ID" "U2FsdGVkX18Txjm2PWSlJsToYm1vv4dMTtVLkRNiQbrC6Y6GuIHb1ao5MmGPJ1wf")" - export AWS_SECRET_ACCESS_KEY="$(decrypt "$SECRET_KEY" "AWS secret access key" "$( - cat <&2 <<-EOF -ERROR: $1 - -Usage: - \$ tf_ssh [OPTION]... -Examples: - \$ tf_ssh 1 - \$ tf_ssh 1 -o LogLevel VERBOSE - \$ tf_ssh 1 -i ~/.ssh/custom_private_key_id_rsa -Available machines: -EOF - cat -n >&2 <<<"$(terraform output public_etc_hosts)" -} - -# shellcheck disable=SC2155 -function tf_ssh() { - [ -z "$1" ] && tf_ssh_usage "No host ID provided." && return 1 - local ip="$(sed "$1q;d" <<<"$(terraform output public_etc_hosts)" | cut -d ' ' -f 1)" - shift # Drop the first argument, corresponding to the machine ID, to allow passing other arguments to SSH using "$@" -- see below. - [ -z "$ip" ] && tf_ssh_usage "Invalid host ID provided." && return 1 - # shellcheck disable=SC2029 - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" "$(terraform output username)@$ip" -} -alias tf_ssh='tf_ssh' - -function tf_ansi_usage() { - cat >&2 <<-EOF -ERROR: $1 - -Usage: - \$ tf_ansi [OPTION]... -Examples: - \$ tf_ansi setup_weave-net_dev - \$ tf_ansi 1 - \$ tf_ansi 1 -vvv --private-key=~/.ssh/custom_private_key_id_rsa - \$ tf_ansi setup_weave-kube --extra-vars "docker_version=1.12.6 kubernetes_version=1.5.6" -Available playbooks: -EOF - cat -n >&2 <<<"$(for file in "$(dirname "${BASH_SOURCE[0]}")"/../../config_management/*.yml; do basename "$file" | sed 's/.yml//'; done)" -} - -# shellcheck disable=SC2155,SC2064 -function tf_ansi() { - [ -z "$1" ] && tf_ansi_usage "No Ansible playbook provided." && return 1 - local id="$1" - shift # Drop the first argument to allow passing other arguments to Ansible using "$@" -- see below. - if [[ "$id" =~ ^[0-9]+$ ]]; then - local playbooks=(../../config_management/*.yml) - local path="${playbooks[(($id - 1))]}" # Select the ith entry in the list of playbooks (0-based). - else - local path="$(dirname "${BASH_SOURCE[0]}")/../../config_management/$id.yml" - fi - local inventory="$(mktemp /tmp/ansible_inventory_XXX)" - trap 'rm -f $inventory' SIGINT SIGTERM RETURN - echo -e "$(terraform output ansible_inventory)" >"$inventory" - [ ! -r "$path" ] && tf_ansi_usage "Ansible playbook not found: $path" && return 1 - ansible-playbook "$@" -u "$(terraform output username)" -i "$inventory" --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" "$path" -} -alias tf_ansi='tf_ansi' diff --git a/tools/publish-site b/tools/publish-site deleted file mode 100755 index 10760404..00000000 --- a/tools/publish-site +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -set -e -set -o pipefail - -: "${PRODUCT:=}" - -fatal() { - echo "$@" >&2 - exit 1 -} - -if [ ! -d .git ]; then - fatal "Current directory is not a git clone" -fi - -if [ -z "${PRODUCT}" ]; then - fatal "Must specify PRODUCT" -fi - -if ! BRANCH=$(git symbolic-ref --short HEAD) || [ -z "$BRANCH" ]; then - fatal "Could not determine branch" -fi - -case "$BRANCH" in - issues/*) - VERSION="${BRANCH#issues/}" - TAGS="$VERSION" - ;; - *) - if echo "$BRANCH" | grep -qE '^[0-9]+\.[0-9]+'; then - DESCRIBE=$(git describe --match 'v*') - if ! VERSION=$(echo "$DESCRIBE" | grep -oP '(?<=^v)[0-9]+\.[0-9]+\.[0-9]+'); then - fatal "Could not infer latest $BRANCH version from $DESCRIBE" - fi - TAGS="$VERSION latest" - else - VERSION="$BRANCH" - TAGS="$VERSION" - fi - ;; -esac - -for TAG in $TAGS; do - echo ">>> Publishing $PRODUCT $VERSION to $1/docs/$PRODUCT/$TAG" - wordepress \ - --url "$1" --user "$2" --password "$3" \ - --product "$PRODUCT" --version "$VERSION" --tag "$TAG" \ - publish site -done diff --git a/tools/push-images b/tools/push-images deleted file mode 100755 index 60edac7c..00000000 --- a/tools/push-images +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -QUAY_PREFIX=quay.io/ -IMAGES=$(make images) -IMAGE_TAG=$(./tools/image-tag) - -usage() { - echo "$0 [-no-docker-hub]" -} - -NO_DOCKER_HUB= -while [ $# -gt 0 ]; do - case "$1" in - -no-docker-hub) - NO_DOCKER_HUB=1 - shift 1 - ;; - *) - usage - exit 2 - ;; - esac -done - -pids="" -for image in ${IMAGES}; do - if [[ "$image" == *"build"* ]]; then - continue - fi - echo "Will push ${image}:${IMAGE_TAG}" - docker push "${image}:${IMAGE_TAG}" & - pids="$pids $!" - - if [ -z "$NO_DOCKER_HUB" ]; then - # remove the quey prefix and push to docker hub - docker_hub_image=${image#$QUAY_PREFIX} - docker tag "${image}:${IMAGE_TAG}" "${docker_hub_image}:${IMAGE_TAG}" - echo "Will push ${docker_hub_image}:${IMAGE_TAG}" - docker push "${docker_hub_image}:${IMAGE_TAG}" & - pids="$pids $!" - fi -done - -# Wait individually for tasks so we fail-exit on any non-zero return code -for p in $pids; do - wait $p -done - -wait diff --git a/tools/rebuild-image b/tools/rebuild-image deleted file mode 100755 index 1f0bb109..00000000 --- a/tools/rebuild-image +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Rebuild a cached docker image if the input files have changed. -# Usage: ./rebuild-image - -set -eux - -IMAGENAME=$1 -# shellcheck disable=SC2001 -SAVEDNAME=$(echo "$IMAGENAME" | sed "s/[\/\-]/\./g") -IMAGEDIR=$2 -shift 2 - -INPUTFILES=("$@") -CACHEDIR=$HOME/docker/ - -# Rebuild the image -rebuild() { - mkdir -p "$CACHEDIR" - rm "$CACHEDIR/$SAVEDNAME"* || true - docker build -t "$IMAGENAME" "$IMAGEDIR" - docker save "$IMAGENAME:latest" | gzip - >"$CACHEDIR/$SAVEDNAME-$CIRCLE_SHA1.gz" -} - -# Get the revision the cached image was build at -cached_image_rev() { - find "$CACHEDIR" -name "$SAVEDNAME-*" -type f | sed -n 's/^[^\-]*\-\([a-z0-9]*\).gz$/\1/p' -} - -# Have there been any revision between $1 and $2 -has_changes() { - local rev1=$1 - local rev2=$2 - local changes - changes=$(git diff --oneline "$rev1..$rev2" -- "${INPUTFILES[@]}" | wc -l) - [ "$changes" -gt 0 ] -} - -commit_timestamp() { - local rev=$1 - git show -s --format=%ct "$rev" -} - -# Is the SHA1 actually present in the repo? -# It could be it isn't, e.g. after a force push -is_valid_commit() { - local rev=$1 - git rev-parse --quiet --verify "$rev^{commit}" >/dev/null -} - -cached_revision=$(cached_image_rev) -if [ -z "$cached_revision" ]; then - echo ">>> No cached image found; rebuilding" - rebuild - exit 0 -fi - -if ! is_valid_commit "$cached_revision"; then - echo ">>> Git commit of cached image not found in repo; rebuilding" - rebuild - exit 0 -fi - -echo ">>> Found cached image rev $cached_revision" -if has_changes "$cached_revision" "$CIRCLE_SHA1"; then - echo ">>> Found changes, rebuilding" - rebuild - exit 0 -fi - -IMAGE_TIMEOUT="$((3 * 24 * 60 * 60))" -if [ "$(commit_timestamp "$cached_revision")" -lt "${IMAGE_TIMEOUT}" ]; then - echo ">>> Image is more the 24hrs old; rebuilding" - rebuild - exit 0 -fi - -# we didn't rebuild; import cached version -echo ">>> No changes found, importing cached image" -zcat "$CACHEDIR/$SAVEDNAME-$cached_revision.gz" | docker load diff --git a/tools/runner/Makefile b/tools/runner/Makefile deleted file mode 100644 index f19bcc7d..00000000 --- a/tools/runner/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: all clean - -all: runner - -runner: *.go - go get -tags netgo ./$(@D) - go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) - -clean: - rm -rf runner - go clean ./... diff --git a/tools/runner/runner.go b/tools/runner/runner.go deleted file mode 100644 index 38e5a62c..00000000 --- a/tools/runner/runner.go +++ /dev/null @@ -1,290 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "os" - "os/exec" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/mgutz/ansi" - "github.com/weaveworks/common/mflag" -) - -const ( - defaultSchedulerHost = "positive-cocoa-90213.appspot.com" - jsonContentType = "application/json" -) - -var ( - start = ansi.ColorCode("black+ub") - fail = ansi.ColorCode("red+b") - succ = ansi.ColorCode("green+b") - reset = ansi.ColorCode("reset") - - schedulerHost = defaultSchedulerHost - useScheduler = false - runParallel = false - verbose = false - timeout = 180 // In seconds. Three minutes ought to be enough for any test - - consoleLock = sync.Mutex{} -) - -type test struct { - name string - hosts int -} - -type schedule struct { - Tests []string `json:"tests"` -} - -type result struct { - test - errored bool - hosts []string -} - -type tests []test - -func (ts tests) Len() int { return len(ts) } -func (ts tests) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } -func (ts tests) Less(i, j int) bool { - if ts[i].hosts != ts[j].hosts { - return ts[i].hosts < ts[j].hosts - } - return ts[i].name < ts[j].name -} - -func (ts *tests) pick(available int) (test, bool) { - // pick the first test that fits in the available hosts - for i, test := range *ts { - if test.hosts <= available { - *ts = append((*ts)[:i], (*ts)[i+1:]...) - return test, true - } - } - - return test{}, false -} - -func (t test) run(hosts []string) bool { - consoleLock.Lock() - fmt.Printf("%s>>> Running %s on %s%s\n", start, t.name, hosts, reset) - consoleLock.Unlock() - - var out bytes.Buffer - - cmd := exec.Command(t.name) - cmd.Env = os.Environ() - cmd.Stdout = &out - cmd.Stderr = &out - - // replace HOSTS in env - for i, env := range cmd.Env { - if strings.HasPrefix(env, "HOSTS") { - cmd.Env[i] = fmt.Sprintf("HOSTS=%s", strings.Join(hosts, " ")) - break - } - } - - start := time.Now() - var err error - - c := make(chan error, 1) - go func() { c <- cmd.Run() }() - select { - case err = <-c: - case <-time.After(time.Duration(timeout) * time.Second): - err = fmt.Errorf("timed out") - } - - duration := float64(time.Now().Sub(start)) / float64(time.Second) - - consoleLock.Lock() - if err != nil { - fmt.Printf("%s>>> Test %s finished after %0.1f secs with error: %v%s\n", fail, t.name, duration, err, reset) - } else { - fmt.Printf("%s>>> Test %s finished with success after %0.1f secs%s\n", succ, t.name, duration, reset) - } - if err != nil || verbose { - fmt.Print(out.String()) - fmt.Println() - } - consoleLock.Unlock() - - if err != nil && useScheduler { - updateScheduler(t.name, duration) - } - - return err != nil -} - -func updateScheduler(test string, duration float64) { - req := &http.Request{ - Method: "POST", - Host: schedulerHost, - URL: &url.URL{ - Opaque: fmt.Sprintf("/record/%s/%0.2f", url.QueryEscape(test), duration), - Scheme: "http", - Host: schedulerHost, - }, - Close: true, - } - if resp, err := http.DefaultClient.Do(req); err != nil { - fmt.Printf("Error updating scheduler: %v\n", err) - } else { - resp.Body.Close() - } -} - -func getSchedule(tests []string) ([]string, error) { - var ( - userName = os.Getenv("CIRCLE_PROJECT_USERNAME") - project = os.Getenv("CIRCLE_PROJECT_REPONAME") - buildNum = os.Getenv("CIRCLE_BUILD_NUM") - testRun = userName + "-" + project + "-integration-" + buildNum - shardCount = os.Getenv("CIRCLE_NODE_TOTAL") - shardID = os.Getenv("CIRCLE_NODE_INDEX") - requestBody = &bytes.Buffer{} - ) - if err := json.NewEncoder(requestBody).Encode(schedule{tests}); err != nil { - return []string{}, err - } - url := fmt.Sprintf("http://%s/schedule/%s/%s/%s", schedulerHost, testRun, shardCount, shardID) - resp, err := http.Post(url, jsonContentType, requestBody) - if err != nil { - return []string{}, err - } - var sched schedule - if err := json.NewDecoder(resp.Body).Decode(&sched); err != nil { - return []string{}, err - } - return sched.Tests, nil -} - -func getTests(testNames []string) (tests, error) { - var err error - if useScheduler { - testNames, err = getSchedule(testNames) - if err != nil { - return tests{}, err - } - } - tests := tests{} - for _, name := range testNames { - parts := strings.Split(strings.TrimSuffix(name, "_test.sh"), "_") - numHosts, err := strconv.Atoi(parts[len(parts)-1]) - if err != nil { - numHosts = 1 - } - tests = append(tests, test{name, numHosts}) - fmt.Printf("Test %s needs %d hosts\n", name, numHosts) - } - return tests, nil -} - -func summary(tests, failed tests) { - if len(failed) > 0 { - fmt.Printf("%s>>> Ran %d tests, %d failed%s\n", fail, len(tests), len(failed), reset) - for _, test := range failed { - fmt.Printf("%s>>> Fail %s%s\n", fail, test.name, reset) - } - } else { - fmt.Printf("%s>>> Ran %d tests, all succeeded%s\n", succ, len(tests), reset) - } -} - -func parallel(ts tests, hosts []string) bool { - testsCopy := ts - sort.Sort(sort.Reverse(ts)) - resultsChan := make(chan result) - outstanding := 0 - failed := tests{} - for len(ts) > 0 || outstanding > 0 { - // While we have some free hosts, try and schedule - // a test on them - for len(hosts) > 0 { - test, ok := ts.pick(len(hosts)) - if !ok { - break - } - testHosts := hosts[:test.hosts] - hosts = hosts[test.hosts:] - - go func() { - errored := test.run(testHosts) - resultsChan <- result{test, errored, testHosts} - }() - outstanding++ - } - - // Otherwise, wait for the test to finish and return - // the hosts to the pool - result := <-resultsChan - hosts = append(hosts, result.hosts...) - outstanding-- - if result.errored { - failed = append(failed, result.test) - } - } - summary(testsCopy, failed) - return len(failed) > 0 -} - -func sequential(ts tests, hosts []string) bool { - failed := tests{} - for _, test := range ts { - if test.run(hosts) { - failed = append(failed, test) - } - } - summary(ts, failed) - return len(failed) > 0 -} - -func main() { - mflag.BoolVar(&useScheduler, []string{"scheduler"}, false, "Use scheduler to distribute tests across shards") - mflag.BoolVar(&runParallel, []string{"parallel"}, false, "Run tests in parallel on hosts where possible") - mflag.BoolVar(&verbose, []string{"v"}, false, "Print output from all tests (Also enabled via DEBUG=1)") - mflag.StringVar(&schedulerHost, []string{"scheduler-host"}, defaultSchedulerHost, "Hostname of scheduler.") - mflag.IntVar(&timeout, []string{"timeout"}, 180, "Max time to run one test for, in seconds") - mflag.Parse() - - if len(os.Getenv("DEBUG")) > 0 { - verbose = true - } - - testArgs := mflag.Args() - tests, err := getTests(testArgs) - if err != nil { - fmt.Printf("Error parsing tests: %v (%v)\n", err, testArgs) - os.Exit(1) - } - - hosts := strings.Fields(os.Getenv("HOSTS")) - maxHosts := len(hosts) - if maxHosts == 0 { - fmt.Print("No HOSTS specified.\n") - os.Exit(1) - } - - var errored bool - if runParallel { - errored = parallel(tests, hosts) - } else { - errored = sequential(tests, hosts) - } - - if errored { - os.Exit(1) - } -} diff --git a/tools/sched b/tools/sched deleted file mode 100755 index a282558f..00000000 --- a/tools/sched +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python -import sys, string, urllib -import requests -import optparse - -def test_time(target, test_name, runtime): - r = requests.post(target + "/record/%s/%f" % (urllib.quote(test_name, safe=""), runtime)) - print r.text.encode('utf-8') - assert r.status_code == 204 - -def test_sched(target, test_run, shard_count, shard_id): - tests = {'tests': string.split(sys.stdin.read())} - r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), json=tests) - assert r.status_code == 200 - result = r.json() - for test in sorted(result['tests']): - print test.encode('utf-8') - -def usage(): - print "%s (--target=...) " % sys.argv[0] - print " time " - print " sched " - -def main(): - parser = optparse.OptionParser() - parser.add_option('--target', default="http://positive-cocoa-90213.appspot.com") - options, args = parser.parse_args() - if len(args) < 3: - usage() - sys.exit(1) - - if args[0] == "time": - test_time(options.target, args[1], float(args[2])) - elif args[0] == "sched": - test_sched(options.target, args[1], int(args[2]), int(args[3])) - else: - usage() - -if __name__ == '__main__': - main() diff --git a/tools/scheduler/.gitignore b/tools/scheduler/.gitignore deleted file mode 100644 index a65b4177..00000000 --- a/tools/scheduler/.gitignore +++ /dev/null @@ -1 +0,0 @@ -lib diff --git a/tools/scheduler/README.md b/tools/scheduler/README.md deleted file mode 100644 index 8489d787..00000000 --- a/tools/scheduler/README.md +++ /dev/null @@ -1,6 +0,0 @@ -To upload newer version: - -``` -pip install -r requirements.txt -t lib -appcfg.py update . -``` diff --git a/tools/scheduler/app.yaml b/tools/scheduler/app.yaml deleted file mode 100644 index 21f5f052..00000000 --- a/tools/scheduler/app.yaml +++ /dev/null @@ -1,15 +0,0 @@ -application: positive-cocoa-90213 -version: 1 -runtime: python27 -api_version: 1 -threadsafe: true - -handlers: -- url: .* - script: main.app - -libraries: -- name: webapp2 - version: latest -- name: ssl - version: latest diff --git a/tools/scheduler/appengine_config.py b/tools/scheduler/appengine_config.py deleted file mode 100644 index f4489ff9..00000000 --- a/tools/scheduler/appengine_config.py +++ /dev/null @@ -1,3 +0,0 @@ -from google.appengine.ext import vendor - -vendor.add('lib') diff --git a/tools/scheduler/cron.yaml b/tools/scheduler/cron.yaml deleted file mode 100644 index 652aed80..00000000 --- a/tools/scheduler/cron.yaml +++ /dev/null @@ -1,4 +0,0 @@ -cron: -- description: periodic gc - url: /tasks/gc - schedule: every 5 minutes diff --git a/tools/scheduler/main.py b/tools/scheduler/main.py deleted file mode 100644 index 3b540b54..00000000 --- a/tools/scheduler/main.py +++ /dev/null @@ -1,206 +0,0 @@ -import collections -import json -import logging -import operator -import re - -import flask -from oauth2client.client import GoogleCredentials -from googleapiclient import discovery - -from google.appengine.api import urlfetch -from google.appengine.ext import ndb - -app = flask.Flask('scheduler') -app.debug = True - -# We use exponential moving average to record -# test run times. Higher alpha discounts historic -# observations faster. -alpha = 0.3 - - -class Test(ndb.Model): - total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA - total_runs = ndb.IntegerProperty(default=0) - - def parallelism(self): - name = self.key.string_id() - m = re.search('(\d+)_test.sh$', name) - if m is None: - return 1 - else: - return int(m.group(1)) - - def cost(self): - p = self.parallelism() - logging.info("Test %s has parallelism %d and avg run time %s", - self.key.string_id(), p, self.total_run_time) - return self.parallelism() * self.total_run_time - - -class Schedule(ndb.Model): - shards = ndb.JsonProperty() - - -@app.route('/record//', methods=['POST']) -@ndb.transactional -def record(test_name, runtime): - test = Test.get_by_id(test_name) - if test is None: - test = Test(id=test_name) - test.total_run_time = (test.total_run_time * - (1 - alpha)) + (float(runtime) * alpha) - test.total_runs += 1 - test.put() - return ('', 204) - - -@app.route( - '/schedule///', methods=['POST']) -def schedule(test_run, shard_count, shard): - # read tests from body - test_names = flask.request.get_json(force=True)['tests'] - - # first see if we have a scedule already - schedule_id = "%s-%d" % (test_run, shard_count) - schedule = Schedule.get_by_id(schedule_id) - if schedule is not None: - return flask.json.jsonify(tests=schedule.shards[str(shard)]) - - # if not, do simple greedy algorithm - test_times = ndb.get_multi( - ndb.Key(Test, test_name) for test_name in test_names) - - def avg(test): - if test is not None: - return test.cost() - return 1 - - test_times = [(test_name, avg(test)) - for test_name, test in zip(test_names, test_times)] - test_times_dict = dict(test_times) - test_times.sort(key=operator.itemgetter(1)) - - shards = {i: [] for i in xrange(shard_count)} - while test_times: - test_name, time = test_times.pop() - - # find shortest shard and put it in that - s, _ = min( - ((i, sum(test_times_dict[t] for t in shards[i])) - for i in xrange(shard_count)), - key=operator.itemgetter(1)) - - shards[s].append(test_name) - - # atomically insert or retrieve existing schedule - schedule = Schedule.get_or_insert(schedule_id, shards=shards) - return flask.json.jsonify(tests=schedule.shards[str(shard)]) - - -FIREWALL_REGEXES = [ - re.compile( - r'^(?P\w+)-allow-(?P\w+)-(?P\d+)-(?P\d+)$' - ), - re.compile(r'^(?P\w+)-(?P\d+)-(?P\d+)-allow-' - r'(?P[\w\-]+)$'), -] -NAME_REGEXES = [ - re.compile(r'^host(?P\d+)-(?P\d+)-(?P\d+)$'), - re.compile(r'^test-(?P\d+)-(?P\d+)-(?P\d+)$'), -] - - -def _matches_any_regex(name, regexes): - for regex in regexes: - matches = regex.match(name) - if matches: - return matches - - -PROJECTS = [ - ('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True), - ('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True), - ('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False), -] - - -@app.route('/tasks/gc') -def gc(): - # Get list of running VMs, pick build id out of VM name - credentials = GoogleCredentials.get_application_default() - compute = discovery.build('compute', 'v1', credentials=credentials) - - for repo, project, zone, gc_fw in PROJECTS: - gc_project(compute, repo, project, zone, gc_fw) - - return "Done" - - -def gc_project(compute, repo, project, zone, gc_fw): - logging.info("GCing %s, %s, %s", repo, project, zone) - # Get list of builds, filter down to running builds: - running = _get_running_builds(repo) - # Stop VMs for builds that aren't running: - _gc_compute_engine_instances(compute, project, zone, running) - # Remove firewall rules for builds that aren't running: - if gc_fw: - _gc_firewall_rules(compute, project, running) - - -def _get_running_builds(repo): - result = urlfetch.fetch( - 'https://circleci.com/api/v1/project/%s' % repo, - headers={'Accept': 'application/json'}) - assert result.status_code == 200 - builds = json.loads(result.content) - running = { - build['build_num'] - for build in builds if not build.get('stop_time') - } - logging.info("Runnings builds: %r", running) - return running - - -def _get_hosts_by_build(instances): - host_by_build = collections.defaultdict(list) - for instance in instances['items']: - matches = _matches_any_regex(instance['name'], NAME_REGEXES) - if not matches: - continue - host_by_build[int(matches.group('build'))].append(instance['name']) - logging.info("Running VMs by build: %r", host_by_build) - return host_by_build - - -def _gc_compute_engine_instances(compute, project, zone, running): - instances = compute.instances().list(project=project, zone=zone).execute() - if 'items' not in instances: - return - host_by_build = _get_hosts_by_build(instances) - stopped = [] - for build, names in host_by_build.iteritems(): - if build in running: - continue - for name in names: - stopped.append(name) - logging.info("Stopping VM %s", name) - compute.instances().delete( - project=project, zone=zone, instance=name).execute() - return stopped - - -def _gc_firewall_rules(compute, project, running): - firewalls = compute.firewalls().list(project=project).execute() - if 'items' not in firewalls: - return - for firewall in firewalls['items']: - matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES) - if not matches: - continue - if int(matches.group('build')) in running: - continue - logging.info("Deleting firewall rule %s", firewall['name']) - compute.firewalls().delete( - project=project, firewall=firewall['name']).execute() diff --git a/tools/scheduler/requirements.txt b/tools/scheduler/requirements.txt deleted file mode 100644 index d4d47e6e..00000000 --- a/tools/scheduler/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -flask -google-api-python-client diff --git a/tools/shell-lint b/tools/shell-lint deleted file mode 100755 index fffbb8b1..00000000 --- a/tools/shell-lint +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -# -# Lint all shell files in given directories with `shellcheck`. -# -# e.g. -# $ shell-lint infra k8s -# -# Depends on: -# - shellcheck -# - files-with-type -# - file >= 5.22 - -"$(dirname "${BASH_SOURCE[0]}")/files-with-type" text/x-shellscript "$@" | xargs --no-run-if-empty shellcheck diff --git a/tools/test b/tools/test deleted file mode 100755 index c87bdd07..00000000 --- a/tools/test +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash - -set -e - -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -SLOW= -NO_GO_GET=true -TAGS= -PARALLEL= -RACE="-race -covermode=atomic" -TIMEOUT=1m - -usage() { - echo "$0 [-slow] [-in-container foo] [-netgo] [-(no-)go-get] [-timeout 1m]" -} - -while [ $# -gt 0 ]; do - case "$1" in - "-slow") - SLOW=true - shift 1 - ;; - "-no-race") - RACE= - shift 1 - ;; - "-no-go-get") - NO_GO_GET=true - shift 1 - ;; - "-go-get") - NO_GO_GET= - shift 1 - ;; - "-netgo") - TAGS="netgo" - shift 1 - ;; - "-tags") - TAGS="$2" - shift 2 - ;; - "-p") - PARALLEL=true - shift 1 - ;; - "-timeout") - TIMEOUT=$2 - shift 2 - ;; - *) - usage - exit 2 - ;; - esac -done - -GO_TEST_ARGS=(-tags "${TAGS[@]}" -cpu 4 -timeout $TIMEOUT) - -if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then - SLOW=true -fi - -if [ -n "$SLOW" ]; then - GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" ${RACE}) - - # shellcheck disable=SC2153 - if [ -n "$COVERDIR" ]; then - coverdir="$COVERDIR" - else - coverdir=$(mktemp -d coverage.XXXXXXXXXX) - fi - - mkdir -p "$coverdir" -fi - -fail=0 - -if [ -z "$TESTDIRS" ]; then - # NB: Relies on paths being prefixed with './'. - TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|')) -else - # TESTDIRS on the right side is not really an array variable, it - # is just a string with spaces, but it is written like that to - # shut up the shellcheck tool. - TESTDIRS=($(for d in ${TESTDIRS[*]}; do echo "$d"; done)) -fi - -# If running on circle, use the scheduler to work out what tests to run on what shard -if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then - PREFIX=$(go list -e ./ | sed -e 's/\//-/g') - TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_PROJECT_USERNAME-$CIRCLE_PROJECT_REPONAME-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX")) - echo "${TESTDIRS[@]}" -fi - -PACKAGE_BASE=$(go list -e ./) - -# Speed up the tests by compiling and installing their dependencies first. -go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}" - -run_test() { - local dir=$1 - if [ -z "$NO_GO_GET" ]; then - go get -t -tags "${TAGS[@]}" "$dir" - fi - - local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}") - if [ -n "$SLOW" ]; then - local COVERPKGS - COVERPKGS=$( ( - go list "$dir" - go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/" - ) | paste -s -d, -) - local output - output=$(mktemp "$coverdir/unit.XXXXXXXXXX") - local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS) - fi - - local START - START=$(date +%s) - if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then - fail=1 - fi - local END - END=$(date +%s) - local RUNTIME=$((END - START)) - - # Report test runtime when running on circle, to help scheduler - if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then - "$DIR/sched" time "$dir" "$RUNTIME" - fi -} - -for dir in "${TESTDIRS[@]}"; do - if [ -n "$PARALLEL" ]; then - run_test "$dir" & - else - run_test "$dir" - fi -done - -if [ -n "$PARALLEL" ]; then - wait -fi - -if [ -n "$SLOW" ] && [ -z "$COVERDIR" ]; then - go get github.com/weaveworks/tools/cover - cover "$coverdir"/* >profile.cov - rm -rf "$coverdir" - go tool cover -html=profile.cov -o=coverage.html - go tool cover -func=profile.cov | tail -n1 -fi - -exit $fail diff --git a/tox.ini b/tox.ini index c1598101..d5163f50 100644 --- a/tox.ini +++ b/tox.ini @@ -4,10 +4,10 @@ # and then run "tox" from this directory. [tox] -envlist = py27, py34, py35, py36, py37 +envlist = py37, py38, py39, py310, py311 [testenv] -commands = pytest --junitxml=junit-{envname}.xml +commands = pytest -o junit_family=xunit2 --junitxml=test-results/junit-{envname}.xml deps = pytest @@ -16,7 +16,7 @@ deps = coverage pytest commands = - python -m coverage run --rcfile=.coveragerc -m pytest --strict --maxfail=1 --ff {posargs} + python -m coverage run --rcfile=.coveragerc -m pytest --strict-markers --maxfail=1 --ff {posargs} # Had 88% test coverage at time of introducing coverage ratchet. # This number must only go up. python -m coverage report --rcfile=.coveragerc --show-missing --fail-under=88