Skip to content

Commit

Permalink
Add subcommand info
Browse files Browse the repository at this point in the history
This will display the Grafana version and statistics about all entities.
  • Loading branch information
amotl committed Dec 11, 2021
1 parent 8a0f12d commit 5aa1f99
Show file tree
Hide file tree
Showing 8 changed files with 207 additions and 87 deletions.
1 change: 1 addition & 0 deletions CHANGES.rst
Expand Up @@ -9,6 +9,7 @@ in progress
- Add subcommand ``explore dashboards``, e.g. for discovering dashboards using
missing data sources.
- CI/GHA test matrix: Use Grafana 7.5.12 and 8.3.2
- Add subcommand ``info``, to display Grafana version and statistics about all entities

2021-12-10 0.11.1
=================
Expand Down
8 changes: 8 additions & 0 deletions README.rst
Expand Up @@ -161,6 +161,14 @@ How to find dashboards which use non-existing data sources?
grafana-wtf explore dashboards --format=json | jq '.[] | select( .datasources_missing ) | .dashboard + {ds_missing: .datasources_missing[] | [.name]}'


Display common information and statistics
=========================================
::

grafana-wtf info



********
Examples
********
Expand Down
10 changes: 7 additions & 3 deletions doc/backlog.rst
Expand Up @@ -6,9 +6,6 @@ grafana-wtf backlog
******
Prio 1
******
- [o] Add subcommand ``explore dashboards``
- [o] Number of dashboards, users, and playlists
-- via: https://grafana.com/docs/grafana/latest/administration/view-server/internal-metrics/
- [o] Dockerize
- [o] Statistics reports for data sources and panels: https://github.com/panodata/grafana-wtf/issues/18
- [o] Finding invalid data sources: https://github.com/panodata/grafana-wtf/issues/19
Expand All @@ -17,6 +14,9 @@ Prio 1
********
Prio 1.5
********
- [o] Check if we can collect metrics from Grafana
- https://grafana.com/docs/grafana/latest/administration/view-server/internal-metrics/
- https://grafana.com/docs/grafana/latest/developers/plugins/backend/#collect-metrics
- [o] Add test fixture which completely resets everything in Grafana before running the test harness.
Move to a different port than 3000 then!
- [o] Improve output format handling and error cases
Expand Down Expand Up @@ -50,3 +50,7 @@ Done
- [x] Document "replace" feature in README
- [x] AttributeError: https://github.com/panodata/grafana-wtf/issues/17
- [/] Repair ``log`` subcommand
- [x] Add subcommand ``explore dashboards``
- [x] Add subcommand ``info``
- Display Grafana version: https://grafana.com/docs/grafana/latest/http_api/other/#health-api
- Display number of dashboards, folders, users, and playlists
15 changes: 10 additions & 5 deletions grafana_wtf/commands.py
Expand Up @@ -13,7 +13,7 @@
from docopt import docopt, DocoptExit

from grafana_wtf import __appname__, __version__
from grafana_wtf.core import GrafanaSearch
from grafana_wtf.core import GrafanaWtf
from grafana_wtf.report import WtfReport
from grafana_wtf.tabular_report import TabularReport
from grafana_wtf.util import normalize_options, setup_logging, configure_http_logging, read_list, yaml_dump
Expand All @@ -24,11 +24,12 @@
def run():
"""
Usage:
grafana-wtf [options] info
grafana-wtf [options] explore datasources
grafana-wtf [options] explore dashboards
grafana-wtf [options] find [<search-expression>]
grafana-wtf [options] replace <search-expression> <replacement>
grafana-wtf [options] log [<dashboard_uid>] [--number=<count>]
grafana-wtf [options] explore datasources
grafana-wtf [options] explore dashboards
grafana-wtf --version
grafana-wtf (-h | --help)
Expand Down Expand Up @@ -145,7 +146,7 @@ def run():
if grafana_url is None:
raise DocoptExit('No Grafana URL given. Please use "--grafana-url" option or environment variable "GRAFANA_URL".')

engine = GrafanaSearch(grafana_url, grafana_token)
engine = GrafanaWtf(grafana_url, grafana_token)
engine.enable_cache(expire_after=cache_ttl, drop_cache=options['drop-cache'])
engine.enable_concurrency(int(options['concurrency']))
engine.setup()
Expand All @@ -164,7 +165,7 @@ def run():

else:
# Scan everything.
engine.scan()
engine.scan_common()

result = engine.search(options.search_expression or None)

Expand Down Expand Up @@ -218,6 +219,10 @@ def run():
results = engine.explore_dashboards()
output_results(output_format, results)

if options.info:
response = engine.info()
output_results(output_format, response)


def output_results(output_format: str, results: List):
if output_format == "json":
Expand Down
224 changes: 148 additions & 76 deletions grafana_wtf/core.py
Expand Up @@ -13,7 +13,7 @@
from urllib.parse import urlparse, urljoin
from concurrent.futures.thread import ThreadPoolExecutor

from grafana_wtf.model import DatasourceExplorationItem, DashboardExplorationItem
from grafana_wtf.model import DatasourceExplorationItem, DashboardExplorationItem, GrafanaDataModel
from grafana_wtf.monkey import monkeypatch_grafana_api
# Apply monkeypatch to grafana-api
# https://github.com/m0nhawk/grafana_api/pull/85/files
Expand All @@ -27,14 +27,14 @@
log = logging.getLogger(__name__)


class GrafanaSearch:
class GrafanaEngine:

def __init__(self, grafana_url, grafana_token):
self.grafana_url = grafana_url
self.grafana_token = grafana_token

self.grafana = None
self.data = Munch(datasources=[], dashboard_list=[], dashboards=[])
self.data = GrafanaDataModel()

self.finder = JsonPathFinder()

Expand Down Expand Up @@ -100,83 +100,44 @@ def start_progressbar(self, total):
if self.progressbar:
self.taqadum = tqdm(total=total)

def search(self, expression):
log.info('Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression))

results = Munch(datasources=[], dashboard_list=[], dashboards=[])

# Check datasources
log.info('Searching data sources')
self.search_items(expression, self.data.datasources, results.datasources)

# Check dashboards
log.info('Searching dashboards')
self.search_items(expression, self.data.dashboards, results.dashboards)

return results

def replace(self, expression, replacement):
log.info(f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"')
for dashboard in self.data.dashboards:
payload_before = json.dumps(dashboard)
payload_after = payload_before.replace(expression, replacement)
if payload_before == payload_after:
log.info(f'No replacements for dashboard with uid "{dashboard.dashboard.uid}"')
continue
dashboard_new = json.loads(payload_after)
dashboard_new['message'] = f'grafana-wtf: Replaced "{expression}" by "{replacement}"'
self.grafana.dashboard.update_dashboard(dashboard=dashboard_new)

def log(self, dashboard_uid=None):
if dashboard_uid:
what = 'Grafana dashboard "{}"'.format(dashboard_uid)
else:
what = 'multiple Grafana dashboards'
log.info('Aggregating edit history for {what} at {url}'.format(what=what, url=self.grafana_url))
def scan_common(self):
self.scan_dashboards()
self.scan_datasources()

entries = []
for dashboard_meta in self.data.dashboard_list:
if dashboard_uid is not None and dashboard_meta['uid'] != dashboard_uid:
continue
def scan_all(self):
self.scan_common()
self.scan_admin_stats()
self.scan_folders()
self.scan_organizations()
self.scan_users()
self.scan_teams()
self.scan_annotations()
self.scan_snapshots()
self.scan_notifications()

#print(dashboard_meta)
def scan_admin_stats(self):
self.data.admin_stats = self.grafana.admin.stats()

dashboard_versions = self.get_dashboard_versions(dashboard_meta['id'])
for dashboard_revision in dashboard_versions:
entry = OrderedDict(
datetime=dashboard_revision['created'],
user=dashboard_revision['createdBy'],
message=dashboard_revision['message'],
folder=dashboard_meta.get('folderTitle'),
title=dashboard_meta['title'],
version=dashboard_revision['version'],
url=urljoin(self.grafana_url, dashboard_meta['url'])
)
entries.append(entry)
def scan_folders(self):
self.data.folders = self.grafana.folder.get_all_folders()

return entries
def scan_organizations(self):
self.data.organizations = self.grafana.organizations.list_organization()

def search_items(self, expression, items, results):
for item in items:
effective_item = None
if expression is None:
effective_item = munchify({'meta': {}, 'data': item})
else:
matches = self.finder.find(expression, item)
if matches:
effective_item = munchify({'meta': {'matches': matches}, 'data': item})
def scan_users(self):
self.data.users = self.grafana.users.search_users()

if effective_item:
results.append(effective_item)
def scan_teams(self):
self.data.teams = self.grafana.teams.search_teams()

def scan(self):
def scan_annotations(self):
self.data.annotations = self.grafana.annotations.get_annotation()

# TODO: Folders?
# folders = self.grafana.folder.get_all_folders()
# print(folders)
def scan_snapshots(self):
self.data.snapshots = self.grafana.snapshots.get_dashboard_snapshots()

self.scan_datasources()
self.scan_dashboards()
def scan_notifications(self):
self.data.notifications = self.grafana.notifications.lookup_channels()

def scan_datasources(self):
log.info('Scanning datasources')
Expand All @@ -191,10 +152,6 @@ def scan_datasources(self):
log.error(self.get_red_message('Please use --grafana-token or GRAFANA_TOKEN '
'for authenticating with Grafana'))

@staticmethod
def get_red_message(message):
return colored.stylize(message, colored.fg("red") + colored.attr("bold"))

def scan_dashboards(self, dashboard_uids=None):

log.info('Scanning dashboards')
Expand Down Expand Up @@ -278,6 +235,121 @@ async def execute_parallel(self):
#for response in await asyncio.gather(*tasks):
# pass


class GrafanaWtf(GrafanaEngine):

def info(self):

try:
health = self.grafana.api.GET("/health")
except Exception as ex:
log.error(f"Request to /health endpoint failed: {ex}")
health = {}

response = OrderedDict(
grafana=OrderedDict(
version=health.get("version"),
),
statistics=OrderedDict(),
summary=OrderedDict(),
)

try:
self.scan_all()

response["statistics"] = self.data.admin_stats

# Compute dashboards without folders.
dashboards_wo_folders = [db for db in self.data.dashboards if not db.meta.isFolder]

# Add summary information.
response["summary"]["annotations"] = len(self.data.annotations)
response["summary"]["dashboards"] = len(dashboards_wo_folders)
response["summary"]["datasources"] = len(self.data.datasources)
response["summary"]["folders"] = len(self.data.folders)
response["summary"]["notifications"] = len(self.data.notifications)
response["summary"]["organizations"] = len(self.data.organizations)
response["summary"]["snapshots"] = len(self.data.snapshots)
response["summary"]["teams"] = len(self.data.teams)
response["summary"]["users"] = len(self.data.users)
except Exception as ex:
log.error(f"Scanning resources failed: {ex}")

return response

def search(self, expression):
log.info('Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression))

results = Munch(datasources=[], dashboard_list=[], dashboards=[])

# Check datasources
log.info('Searching data sources')
self.search_items(expression, self.data.datasources, results.datasources)

# Check dashboards
log.info('Searching dashboards')
self.search_items(expression, self.data.dashboards, results.dashboards)

return results

def replace(self, expression, replacement):
log.info(f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"')
for dashboard in self.data.dashboards:
payload_before = json.dumps(dashboard)
payload_after = payload_before.replace(expression, replacement)
if payload_before == payload_after:
log.info(f'No replacements for dashboard with uid "{dashboard.dashboard.uid}"')
continue
dashboard_new = json.loads(payload_after)
dashboard_new['message'] = f'grafana-wtf: Replaced "{expression}" by "{replacement}"'
self.grafana.dashboard.update_dashboard(dashboard=dashboard_new)

def log(self, dashboard_uid=None):
if dashboard_uid:
what = 'Grafana dashboard "{}"'.format(dashboard_uid)
else:
what = 'multiple Grafana dashboards'
log.info('Aggregating edit history for {what} at {url}'.format(what=what, url=self.grafana_url))

entries = []
for dashboard_meta in self.data.dashboard_list:
if dashboard_uid is not None and dashboard_meta['uid'] != dashboard_uid:
continue

#print(dashboard_meta)

dashboard_versions = self.get_dashboard_versions(dashboard_meta['id'])
for dashboard_revision in dashboard_versions:
entry = OrderedDict(
datetime=dashboard_revision['created'],
user=dashboard_revision['createdBy'],
message=dashboard_revision['message'],
folder=dashboard_meta.get('folderTitle'),
title=dashboard_meta['title'],
version=dashboard_revision['version'],
url=urljoin(self.grafana_url, dashboard_meta['url'])
)
entries.append(entry)

return entries

def search_items(self, expression, items, results):
for item in items:
effective_item = None
if expression is None:
effective_item = munchify({'meta': {}, 'data': item})
else:
matches = self.finder.find(expression, item)
if matches:
effective_item = munchify({'meta': {'matches': matches}, 'data': item})

if effective_item:
results.append(effective_item)

@staticmethod
def get_red_message(message):
return colored.stylize(message, colored.fg("red") + colored.attr("bold"))

def get_dashboard_versions(self, dashboard_id):
# https://grafana.com/docs/http_api/dashboard_versions/
get_dashboard_versions_path = '/dashboards/id/%s/versions' % dashboard_id
Expand Down Expand Up @@ -353,7 +425,7 @@ def explore_dashboards(self):

class Indexer:

def __init__(self, engine: GrafanaSearch):
def __init__(self, engine: GrafanaWtf):
self.engine = engine

# Prepare index data structures.
Expand Down

0 comments on commit 5aa1f99

Please sign in to comment.