From 5aa1f995f8cecb732e8ef83f605e67c8bdd52303 Mon Sep 17 00:00:00 2001 From: Andreas Motl Date: Sat, 11 Dec 2021 18:52:08 +0100 Subject: [PATCH] Add subcommand `info` This will display the Grafana version and statistics about all entities. --- CHANGES.rst | 1 + README.rst | 8 ++ doc/backlog.rst | 10 +- grafana_wtf/commands.py | 15 ++- grafana_wtf/core.py | 224 ++++++++++++++++++++++++++-------------- grafana_wtf/model.py | 17 ++- tests/conftest.py | 4 +- tests/test_commands.py | 15 +++ 8 files changed, 207 insertions(+), 87 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index fec9c4c..a619355 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -9,6 +9,7 @@ in progress - Add subcommand ``explore dashboards``, e.g. for discovering dashboards using missing data sources. - CI/GHA test matrix: Use Grafana 7.5.12 and 8.3.2 +- Add subcommand ``info``, to display Grafana version and statistics about all entities 2021-12-10 0.11.1 ================= diff --git a/README.rst b/README.rst index f6274c5..748ea04 100644 --- a/README.rst +++ b/README.rst @@ -161,6 +161,14 @@ How to find dashboards which use non-existing data sources? grafana-wtf explore dashboards --format=json | jq '.[] | select( .datasources_missing ) | .dashboard + {ds_missing: .datasources_missing[] | [.name]}' +Display common information and statistics +========================================= +:: + + grafana-wtf info + + + ******** Examples ******** diff --git a/doc/backlog.rst b/doc/backlog.rst index 6d02691..12970e6 100644 --- a/doc/backlog.rst +++ b/doc/backlog.rst @@ -6,9 +6,6 @@ grafana-wtf backlog ****** Prio 1 ****** -- [o] Add subcommand ``explore dashboards`` -- [o] Number of dashboards, users, and playlists - -- via: https://grafana.com/docs/grafana/latest/administration/view-server/internal-metrics/ - [o] Dockerize - [o] Statistics reports for data sources and panels: https://github.com/panodata/grafana-wtf/issues/18 - [o] Finding invalid data sources: https://github.com/panodata/grafana-wtf/issues/19 @@ -17,6 +14,9 @@ Prio 1 ******** Prio 1.5 ******** +- [o] Check if we can collect metrics from Grafana + - https://grafana.com/docs/grafana/latest/administration/view-server/internal-metrics/ + - https://grafana.com/docs/grafana/latest/developers/plugins/backend/#collect-metrics - [o] Add test fixture which completely resets everything in Grafana before running the test harness. Move to a different port than 3000 then! - [o] Improve output format handling and error cases @@ -50,3 +50,7 @@ Done - [x] Document "replace" feature in README - [x] AttributeError: https://github.com/panodata/grafana-wtf/issues/17 - [/] Repair ``log`` subcommand +- [x] Add subcommand ``explore dashboards`` +- [x] Add subcommand ``info`` + - Display Grafana version: https://grafana.com/docs/grafana/latest/http_api/other/#health-api + - Display number of dashboards, folders, users, and playlists diff --git a/grafana_wtf/commands.py b/grafana_wtf/commands.py index 73812df..c88cec4 100644 --- a/grafana_wtf/commands.py +++ b/grafana_wtf/commands.py @@ -13,7 +13,7 @@ from docopt import docopt, DocoptExit from grafana_wtf import __appname__, __version__ -from grafana_wtf.core import GrafanaSearch +from grafana_wtf.core import GrafanaWtf from grafana_wtf.report import WtfReport from grafana_wtf.tabular_report import TabularReport from grafana_wtf.util import normalize_options, setup_logging, configure_http_logging, read_list, yaml_dump @@ -24,11 +24,12 @@ def run(): """ Usage: + grafana-wtf [options] info + grafana-wtf [options] explore datasources + grafana-wtf [options] explore dashboards grafana-wtf [options] find [] grafana-wtf [options] replace grafana-wtf [options] log [] [--number=] - grafana-wtf [options] explore datasources - grafana-wtf [options] explore dashboards grafana-wtf --version grafana-wtf (-h | --help) @@ -145,7 +146,7 @@ def run(): if grafana_url is None: raise DocoptExit('No Grafana URL given. Please use "--grafana-url" option or environment variable "GRAFANA_URL".') - engine = GrafanaSearch(grafana_url, grafana_token) + engine = GrafanaWtf(grafana_url, grafana_token) engine.enable_cache(expire_after=cache_ttl, drop_cache=options['drop-cache']) engine.enable_concurrency(int(options['concurrency'])) engine.setup() @@ -164,7 +165,7 @@ def run(): else: # Scan everything. - engine.scan() + engine.scan_common() result = engine.search(options.search_expression or None) @@ -218,6 +219,10 @@ def run(): results = engine.explore_dashboards() output_results(output_format, results) + if options.info: + response = engine.info() + output_results(output_format, response) + def output_results(output_format: str, results: List): if output_format == "json": diff --git a/grafana_wtf/core.py b/grafana_wtf/core.py index 8d9bdab..4c721c0 100644 --- a/grafana_wtf/core.py +++ b/grafana_wtf/core.py @@ -13,7 +13,7 @@ from urllib.parse import urlparse, urljoin from concurrent.futures.thread import ThreadPoolExecutor -from grafana_wtf.model import DatasourceExplorationItem, DashboardExplorationItem +from grafana_wtf.model import DatasourceExplorationItem, DashboardExplorationItem, GrafanaDataModel from grafana_wtf.monkey import monkeypatch_grafana_api # Apply monkeypatch to grafana-api # https://github.com/m0nhawk/grafana_api/pull/85/files @@ -27,14 +27,14 @@ log = logging.getLogger(__name__) -class GrafanaSearch: +class GrafanaEngine: def __init__(self, grafana_url, grafana_token): self.grafana_url = grafana_url self.grafana_token = grafana_token self.grafana = None - self.data = Munch(datasources=[], dashboard_list=[], dashboards=[]) + self.data = GrafanaDataModel() self.finder = JsonPathFinder() @@ -100,83 +100,44 @@ def start_progressbar(self, total): if self.progressbar: self.taqadum = tqdm(total=total) - def search(self, expression): - log.info('Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression)) - - results = Munch(datasources=[], dashboard_list=[], dashboards=[]) - - # Check datasources - log.info('Searching data sources') - self.search_items(expression, self.data.datasources, results.datasources) - - # Check dashboards - log.info('Searching dashboards') - self.search_items(expression, self.data.dashboards, results.dashboards) - - return results - - def replace(self, expression, replacement): - log.info(f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"') - for dashboard in self.data.dashboards: - payload_before = json.dumps(dashboard) - payload_after = payload_before.replace(expression, replacement) - if payload_before == payload_after: - log.info(f'No replacements for dashboard with uid "{dashboard.dashboard.uid}"') - continue - dashboard_new = json.loads(payload_after) - dashboard_new['message'] = f'grafana-wtf: Replaced "{expression}" by "{replacement}"' - self.grafana.dashboard.update_dashboard(dashboard=dashboard_new) - - def log(self, dashboard_uid=None): - if dashboard_uid: - what = 'Grafana dashboard "{}"'.format(dashboard_uid) - else: - what = 'multiple Grafana dashboards' - log.info('Aggregating edit history for {what} at {url}'.format(what=what, url=self.grafana_url)) + def scan_common(self): + self.scan_dashboards() + self.scan_datasources() - entries = [] - for dashboard_meta in self.data.dashboard_list: - if dashboard_uid is not None and dashboard_meta['uid'] != dashboard_uid: - continue + def scan_all(self): + self.scan_common() + self.scan_admin_stats() + self.scan_folders() + self.scan_organizations() + self.scan_users() + self.scan_teams() + self.scan_annotations() + self.scan_snapshots() + self.scan_notifications() - #print(dashboard_meta) + def scan_admin_stats(self): + self.data.admin_stats = self.grafana.admin.stats() - dashboard_versions = self.get_dashboard_versions(dashboard_meta['id']) - for dashboard_revision in dashboard_versions: - entry = OrderedDict( - datetime=dashboard_revision['created'], - user=dashboard_revision['createdBy'], - message=dashboard_revision['message'], - folder=dashboard_meta.get('folderTitle'), - title=dashboard_meta['title'], - version=dashboard_revision['version'], - url=urljoin(self.grafana_url, dashboard_meta['url']) - ) - entries.append(entry) + def scan_folders(self): + self.data.folders = self.grafana.folder.get_all_folders() - return entries + def scan_organizations(self): + self.data.organizations = self.grafana.organizations.list_organization() - def search_items(self, expression, items, results): - for item in items: - effective_item = None - if expression is None: - effective_item = munchify({'meta': {}, 'data': item}) - else: - matches = self.finder.find(expression, item) - if matches: - effective_item = munchify({'meta': {'matches': matches}, 'data': item}) + def scan_users(self): + self.data.users = self.grafana.users.search_users() - if effective_item: - results.append(effective_item) + def scan_teams(self): + self.data.teams = self.grafana.teams.search_teams() - def scan(self): + def scan_annotations(self): + self.data.annotations = self.grafana.annotations.get_annotation() - # TODO: Folders? - # folders = self.grafana.folder.get_all_folders() - # print(folders) + def scan_snapshots(self): + self.data.snapshots = self.grafana.snapshots.get_dashboard_snapshots() - self.scan_datasources() - self.scan_dashboards() + def scan_notifications(self): + self.data.notifications = self.grafana.notifications.lookup_channels() def scan_datasources(self): log.info('Scanning datasources') @@ -191,10 +152,6 @@ def scan_datasources(self): log.error(self.get_red_message('Please use --grafana-token or GRAFANA_TOKEN ' 'for authenticating with Grafana')) - @staticmethod - def get_red_message(message): - return colored.stylize(message, colored.fg("red") + colored.attr("bold")) - def scan_dashboards(self, dashboard_uids=None): log.info('Scanning dashboards') @@ -278,6 +235,121 @@ async def execute_parallel(self): #for response in await asyncio.gather(*tasks): # pass + +class GrafanaWtf(GrafanaEngine): + + def info(self): + + try: + health = self.grafana.api.GET("/health") + except Exception as ex: + log.error(f"Request to /health endpoint failed: {ex}") + health = {} + + response = OrderedDict( + grafana=OrderedDict( + version=health.get("version"), + ), + statistics=OrderedDict(), + summary=OrderedDict(), + ) + + try: + self.scan_all() + + response["statistics"] = self.data.admin_stats + + # Compute dashboards without folders. + dashboards_wo_folders = [db for db in self.data.dashboards if not db.meta.isFolder] + + # Add summary information. + response["summary"]["annotations"] = len(self.data.annotations) + response["summary"]["dashboards"] = len(dashboards_wo_folders) + response["summary"]["datasources"] = len(self.data.datasources) + response["summary"]["folders"] = len(self.data.folders) + response["summary"]["notifications"] = len(self.data.notifications) + response["summary"]["organizations"] = len(self.data.organizations) + response["summary"]["snapshots"] = len(self.data.snapshots) + response["summary"]["teams"] = len(self.data.teams) + response["summary"]["users"] = len(self.data.users) + except Exception as ex: + log.error(f"Scanning resources failed: {ex}") + + return response + + def search(self, expression): + log.info('Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression)) + + results = Munch(datasources=[], dashboard_list=[], dashboards=[]) + + # Check datasources + log.info('Searching data sources') + self.search_items(expression, self.data.datasources, results.datasources) + + # Check dashboards + log.info('Searching dashboards') + self.search_items(expression, self.data.dashboards, results.dashboards) + + return results + + def replace(self, expression, replacement): + log.info(f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"') + for dashboard in self.data.dashboards: + payload_before = json.dumps(dashboard) + payload_after = payload_before.replace(expression, replacement) + if payload_before == payload_after: + log.info(f'No replacements for dashboard with uid "{dashboard.dashboard.uid}"') + continue + dashboard_new = json.loads(payload_after) + dashboard_new['message'] = f'grafana-wtf: Replaced "{expression}" by "{replacement}"' + self.grafana.dashboard.update_dashboard(dashboard=dashboard_new) + + def log(self, dashboard_uid=None): + if dashboard_uid: + what = 'Grafana dashboard "{}"'.format(dashboard_uid) + else: + what = 'multiple Grafana dashboards' + log.info('Aggregating edit history for {what} at {url}'.format(what=what, url=self.grafana_url)) + + entries = [] + for dashboard_meta in self.data.dashboard_list: + if dashboard_uid is not None and dashboard_meta['uid'] != dashboard_uid: + continue + + #print(dashboard_meta) + + dashboard_versions = self.get_dashboard_versions(dashboard_meta['id']) + for dashboard_revision in dashboard_versions: + entry = OrderedDict( + datetime=dashboard_revision['created'], + user=dashboard_revision['createdBy'], + message=dashboard_revision['message'], + folder=dashboard_meta.get('folderTitle'), + title=dashboard_meta['title'], + version=dashboard_revision['version'], + url=urljoin(self.grafana_url, dashboard_meta['url']) + ) + entries.append(entry) + + return entries + + def search_items(self, expression, items, results): + for item in items: + effective_item = None + if expression is None: + effective_item = munchify({'meta': {}, 'data': item}) + else: + matches = self.finder.find(expression, item) + if matches: + effective_item = munchify({'meta': {'matches': matches}, 'data': item}) + + if effective_item: + results.append(effective_item) + + @staticmethod + def get_red_message(message): + return colored.stylize(message, colored.fg("red") + colored.attr("bold")) + def get_dashboard_versions(self, dashboard_id): # https://grafana.com/docs/http_api/dashboard_versions/ get_dashboard_versions_path = '/dashboards/id/%s/versions' % dashboard_id @@ -353,7 +425,7 @@ def explore_dashboards(self): class Indexer: - def __init__(self, engine: GrafanaSearch): + def __init__(self, engine: GrafanaWtf): self.engine = engine # Prepare index data structures. diff --git a/grafana_wtf/model.py b/grafana_wtf/model.py index 5bb32eb..fd273da 100644 --- a/grafana_wtf/model.py +++ b/grafana_wtf/model.py @@ -1,11 +1,26 @@ import dataclasses -from typing import List +from typing import List, Optional, Dict from munch import Munch from collections import OrderedDict from urllib.parse import urljoin +@dataclasses.dataclass +class GrafanaDataModel: + admin_stats: Optional[Dict] = dataclasses.field(default_factory=dict) + dashboards: Optional[List[Munch]] = dataclasses.field(default_factory=list) + dashboard_list: Optional[List[Munch]] = dataclasses.field(default_factory=list) + datasources: Optional[List[Munch]] = dataclasses.field(default_factory=list) + folders: Optional[List[Munch]] = dataclasses.field(default_factory=list) + organizations: Optional[List[Munch]] = dataclasses.field(default_factory=list) + users: Optional[List[Munch]] = dataclasses.field(default_factory=list) + teams: Optional[List[Munch]] = dataclasses.field(default_factory=list) + annotations: Optional[List[Munch]] = dataclasses.field(default_factory=list) + snapshots: Optional[List[Munch]] = dataclasses.field(default_factory=list) + notifications: Optional[List[Munch]] = dataclasses.field(default_factory=list) + + @dataclasses.dataclass class DatasourceExplorationItem: datasource: Munch diff --git a/tests/conftest.py b/tests/conftest.py index b6559a2..1702178 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,7 +5,7 @@ import pytest from grafana_api.grafana_api import GrafanaClientError -from grafana_wtf.core import GrafanaSearch +from grafana_wtf.core import GrafanaWtf def clean_environment(): @@ -44,7 +44,7 @@ def docker_grafana(docker_services): def create_datasource(docker_grafana): # https://docs.pytest.org/en/4.6.x/fixture.html#factories-as-fixtures def _create_datasource(name: str, type: str, access: str): - grafana = GrafanaSearch.grafana_client_factory(docker_grafana) + grafana = GrafanaWtf.grafana_client_factory(docker_grafana) # TODO: Add fixture which completely resets everything in Grafana before running the test harness. # Move to a different port than 3000 then! try: diff --git a/tests/test_commands.py b/tests/test_commands.py index b170021..c5acab9 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -226,3 +226,18 @@ def find_all_missing_datasources(data): if "datasources_missing" in item: missing_names += map(operator.itemgetter("name"), item["datasources_missing"]) return missing_names + + +def test_info(docker_grafana, capsys, caplog): + + # Which subcommand to test? + set_command("info", "--format=yaml") + + # Run command and capture YAML output. + with caplog.at_level(logging.DEBUG): + grafana_wtf.commands.run() + captured = capsys.readouterr() + data = yaml.safe_load(captured.out) + + # Proof the output is correct. + assert list(data.keys()) == ["grafana", "statistics", "summary"]