Skip to content

Commit

Permalink
Improve compatibility with Grafana 8.3 / dashboard schema version 33
Browse files Browse the repository at this point in the history
  • Loading branch information
amotl committed Jan 22, 2022
1 parent f20bc47 commit c33b87c
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 50 deletions.
1 change: 1 addition & 0 deletions CHANGES.rst
Expand Up @@ -9,6 +9,7 @@ in progress
- Add two more examples about using `explore dashboards` with `jq`
- CI: Prepare test suite for testing two different dashboard schema versions, v27 and v33
- Improve determinism by returning stable sort order of dashboard results
- Improve compatibility with Grafana 8.3 by handling dashboard schema version 33 properly

2021-12-11 0.12.0
=================
Expand Down
6 changes: 6 additions & 0 deletions doc/backlog.rst
Expand Up @@ -6,6 +6,12 @@ grafana-wtf backlog
******
Prio 1
******
- [o] With Grafana >8.3, resolve datasource name and add to ``{'type': 'influxdb', 'uid': 'PDF2762CDFF14A314'}``


*********
Prio 1.25
*********
- [o] Statistics reports for data sources and panels: https://github.com/panodata/grafana-wtf/issues/18
- [o] Finding invalid data sources: https://github.com/panodata/grafana-wtf/issues/19
- [o] Add test fixture for adding dashboards at runtime from branch ``amo/test-dashboard-runtime``
Expand Down
81 changes: 55 additions & 26 deletions grafana_wtf/core.py
Expand Up @@ -2,6 +2,7 @@
# (c) 2019-2021 Andreas Motl <andreas@hiveeyes.org>
# License: GNU Affero General Public License, Version 3
import asyncio
import dataclasses
import json
import logging
from collections import OrderedDict
Expand All @@ -18,6 +19,7 @@
DashboardDetails,
DashboardExplorationItem,
DatasourceExplorationItem,
DatasourceItem,
GrafanaDataModel,
)
from grafana_wtf.monkey import monkeypatch_grafana_api
Expand Down Expand Up @@ -390,9 +392,10 @@ def explore_datasources(self):
# Compute list of exploration items, associating datasources with the dashboards that use them.
results_used = []
results_unused = []
for name in sorted(ix.datasource_by_name):
datasource = ix.datasource_by_name[name]
dashboard_uids = ix.datasource_dashboard_index.get(name, [])
for ds_identifier in sorted(ix.datasource_by_ident):

datasource = ix.datasource_by_ident[ds_identifier]
dashboard_uids = ix.datasource_dashboard_index.get(ds_identifier, [])
dashboards = list(map(ix.dashboard_by_uid.get, dashboard_uids))
item = DatasourceExplorationItem(datasource=datasource, used_in=dashboards, grafana_url=self.grafana_url)

Expand All @@ -404,6 +407,9 @@ def explore_datasources(self):
else:
results_unused.append(result)

results_used = sorted(results_used, key=lambda x: x["datasource"]["uid"] or x["datasource"]["name"])
results_unused = sorted(results_unused, key=lambda x: x["datasource"]["uid"] or x["datasource"]["name"])

response = OrderedDict(
used=results_used,
unused=results_unused,
Expand All @@ -422,18 +428,20 @@ def explore_dashboards(self):
for uid in sorted(ix.dashboard_by_uid):

dashboard = ix.dashboard_by_uid[uid]
datasource_names = ix.dashboard_datasource_index[uid]
datasource_items = ix.dashboard_datasource_index[uid]

datasources_existing = []
datasource_names_missing = []
for datasource_name in datasource_names:
if datasource_name == "-- Grafana --":
datasources_missing = []
for datasource_item in datasource_items:
if datasource_item.name == "-- Grafana --":
continue
datasource = ix.datasource_by_name.get(datasource_name)
datasource_by_uid = ix.datasource_by_uid.get(datasource_item.uid)
datasource_by_name = ix.datasource_by_name.get(datasource_item.name)
datasource = datasource_by_uid or datasource_by_name
if datasource:
datasources_existing.append(datasource)
else:
datasource_names_missing.append({"name": datasource_name})
datasources_missing.append(dataclasses.asdict(datasource_item))
item = DashboardExplorationItem(
dashboard=dashboard, datasources=datasources_existing, grafana_url=self.grafana_url
)
Expand All @@ -442,8 +450,8 @@ def explore_dashboards(self):
result = item.format_compact()

# Add information about missing data sources.
if datasource_names_missing:
result["datasources_missing"] = datasource_names_missing
if datasources_missing:
result["datasources_missing"] = datasources_missing

results.append(result)

Expand All @@ -456,6 +464,8 @@ def __init__(self, engine: GrafanaWtf):

# Prepare index data structures.
self.dashboard_by_uid = {}
self.datasource_by_ident = {}
self.datasource_by_uid = {}
self.datasource_by_name = {}
self.dashboard_datasource_index = {}
self.datasource_dashboard_index = {}
Expand All @@ -472,12 +482,16 @@ def index(self):
self.index_datasources()

@staticmethod
def collect_datasource_names(element):
names = []
def collect_datasource_items(element):
items = []
for node in element:
if "datasource" in node and node["datasource"]:
names.append(node.datasource)
return list(sorted(set(names)))
ds = node.datasource
if isinstance(ds, Munch):
ds = dict(ds)
if ds not in items:
items.append(ds)
return sorted(items)

def index_dashboards(self):

Expand All @@ -496,21 +510,36 @@ def index_dashboards(self):
self.dashboard_by_uid[uid] = dashboard

# Map to data source names.
ds_panels = self.collect_datasource_names(dbdetails.panels)
ds_annotations = self.collect_datasource_names(dbdetails.annotations)
ds_templating = self.collect_datasource_names(dbdetails.templating)
self.dashboard_datasource_index[uid] = list(sorted(set(ds_panels + ds_annotations + ds_templating)))
ds_panels = self.collect_datasource_items(dbdetails.panels)
ds_annotations = self.collect_datasource_items(dbdetails.annotations)
ds_templating = self.collect_datasource_items(dbdetails.templating)

results = []
for bucket in ds_panels, ds_annotations, ds_templating:
for item in bucket:
item = DatasourceItem.from_payload(item)
if item not in results:
results.append(item)
self.dashboard_datasource_index[uid] = results

def index_datasources(self):

self.datasource_by_ident = {}
self.datasource_by_uid = {}
self.datasource_by_name = {}
self.datasource_dashboard_index = {}

for datasource in self.datasources:
name = datasource.name
self.datasource_by_name[name] = datasource

for dashboard_uid, datasource_names in self.dashboard_datasource_index.items():
for datasource_name in datasource_names:
self.datasource_dashboard_index.setdefault(datasource_name, [])
self.datasource_dashboard_index[datasource_name].append(dashboard_uid)
datasource_name_or_uid = datasource.uid or datasource.name
self.datasource_by_ident[datasource_name_or_uid] = datasource
self.datasource_by_uid[datasource.uid] = datasource
self.datasource_by_name[datasource.name] = datasource

for dashboard_uid, datasource_items in self.dashboard_datasource_index.items():
datasource_item: DatasourceItem
for datasource_item in datasource_items:
datasource_name_or_uid = datasource_item.uid or datasource_item.name
if datasource_name_or_uid in self.datasource_by_name:
datasource_name_or_uid = self.datasource_by_name[datasource_name_or_uid].uid
self.datasource_dashboard_index.setdefault(datasource_name_or_uid, [])
self.datasource_dashboard_index[datasource_name_or_uid].append(dashboard_uid)
20 changes: 20 additions & 0 deletions grafana_wtf/model.py
Expand Up @@ -42,6 +42,24 @@ def templating(self) -> List:
return self.dashboard.dashboard.get("templating", {}).get("list", [])


@dataclasses.dataclass
class DatasourceItem:
uid: Optional[str] = None
name: Optional[str] = None
type: Optional[str] = None
url: Optional[str] = None

@classmethod
def from_payload(cls, payload: any):
if isinstance(payload, Munch):
payload = dict(payload)
if isinstance(payload, dict):
return cls(**payload)
if isinstance(payload, str):
return cls(name=payload)
raise TypeError(f"Unknown payload type for DatasourceItem: {type(payload)}")


@dataclasses.dataclass
class DatasourceExplorationItem:
datasource: Munch
Expand All @@ -50,6 +68,7 @@ class DatasourceExplorationItem:

def format_compact(self):
dsshort = OrderedDict(
uid=self.datasource.uid,
name=self.datasource.name,
type=self.datasource.type,
url=self.datasource.url,
Expand Down Expand Up @@ -84,6 +103,7 @@ def format_compact(self):
for datasource in self.datasources:
item.setdefault("datasources", [])
dsshort = OrderedDict(
uid=datasource.uid,
name=datasource.name,
type=datasource.type,
url=datasource.url,
Expand Down
43 changes: 32 additions & 11 deletions grafana_wtf/tabular_report.py
Expand Up @@ -2,6 +2,7 @@
from collections import OrderedDict

from jsonpath_rw import parse
from munch import Munch
from tabulate import tabulate

from grafana_wtf.report import WtfReport
Expand All @@ -15,8 +16,8 @@ def __init__(self, grafana_url, tblfmt="psql", verbose=False):
def output_items(self, label, items, url_callback):
items_rows = [
{
"type": label,
"name": self.get_item_name(item),
"Type": label,
"Name": self.get_item_name(item),
**self.get_bibdata_dict(item, URL=url_callback(item)),
}
for item in items
Expand All @@ -32,15 +33,35 @@ def get_bibdata_dict(self, item, **kwargs):
bibdata["Title"] = item.data.dashboard.title
bibdata["Folder"] = item.data.meta.folderTitle
bibdata["UID"] = item.data.dashboard.uid
bibdata["Creation date"] = f"{item.data.meta.created}"
bibdata["created by"] = item.data.meta.createdBy
bibdata["last update date"] = f"{item.data.meta.updated}"
bibdata["Created"] = f"{item.data.meta.created}"
bibdata["Updated"] = f"{item.data.meta.updated}"
bibdata["Created by"] = item.data.meta.createdBy

# FIXME: The test fixtures are currently not deterministic,
# because Grafana is not cleared on each test case.
if "PYTEST_CURRENT_TEST" not in os.environ:
bibdata["updated by"] = item.data.meta.updatedBy
_finder = parse("$..datasource")
_datasources = _finder.find(item)
bibdata["datasources"] = ",".join(
sorted(set([str(_ds.value) for _ds in _datasources if _ds.value])) if _datasources else ""
)
bibdata["Updated by"] = item.data.meta.updatedBy

bibdata["Datasources"] = ",".join(map(str, self.get_datasources(item)))
bibdata.update(kwargs)
return bibdata

def get_datasources(self, item):

# Query datasources.
_finder = parse("$..datasource")
_datasources = _finder.find(item)

# Compute unique list of datasources.
datasources = []
for _ds in _datasources:
if not _ds.value:
continue
if isinstance(_ds.value, Munch):
value = dict(_ds.value)
else:
value = str(_ds.value)
if value not in datasources:
datasources.append(value)

return datasources
29 changes: 16 additions & 13 deletions tests/test_commands.py
Expand Up @@ -67,7 +67,7 @@ def test_find_textual_dashboard_success(docker_grafana, capsys):
assert "dashboard.panels.[7].panels.[0].targets.[0].measurement: ldi_readings" in captured.out


def test_find_textual_datasource_dashboard_success(docker_grafana, capsys):
def test_find_textual_datasource_success(docker_grafana, capsys):
set_command("find ldi_v2")
grafana_wtf.commands.run()
captured = capsys.readouterr()
Expand All @@ -77,7 +77,7 @@ def test_find_textual_datasource_dashboard_success(docker_grafana, capsys):
assert "name: ldi_v2" in captured.out
assert "database: ldi_v2" in captured.out

assert "Dashboards: 2 hits" in captured.out
assert "Dashboards: 1 hits" in captured.out
assert "luftdaten-info-generic-trend" in captured.out
assert "dashboard.panels.[1].datasource: ldi_v2" in captured.out
assert "dashboard.panels.[7].panels.[0].datasource: ldi_v2" in captured.out
Expand All @@ -91,13 +91,13 @@ def test_find_tabular_dashboard_success(docker_grafana, capsys):
assert 'Searching for expression "ldi_readings" at Grafana instance http://localhost:3000' in captured.out

reference_table = """
| type | name | Title | Folder | UID | Creation date | created by | last update date | datasources | URL |
|:-----------|:---------------------------------|:---------------------------------|:----------|:----------|:---------------------|:-------------|:---------------------|:---------------------------------|:-------------------------------------------------------------------|
| Dashboards | luftdaten-info-generic-trend-v27 | luftdaten.info generic trend v27 | Testdrive | ioUrPwQiz | xxxx-xx-xxTxx:xx:xxZ | Anonymous | xxxx-xx-xxTxx:xx:xxZ | -- Grafana --,ldi_v2,weatherbase | http://localhost:3000/d/ioUrPwQiz/luftdaten-info-generic-trend-v27 |
| Dashboards | luftdaten-info-generic-trend-v33 | luftdaten.info generic trend v33 | Testdrive | jpVsQxRja | xxxx-xx-xxTxx:xx:xxZ | Anonymous | xxxx-xx-xxTxx:xx:xxZ | -- Grafana --,ldi_v2,weatherbase | http://localhost:3000/d/jpVsQxRja/luftdaten-info-generic-trend-v33 |
| Type | Name | Title | Folder | UID | Created | Updated | Created by | Datasources | URL |
|:-----------|:---------------------------------|:---------------------------------|:----------|:----------|:---------------------|:---------------------|:-------------|:--------------------------------------------------------------------------------------|:-------------------------------------------------------------------|
| Dashboards | luftdaten-info-generic-trend-v27 | luftdaten.info generic trend v27 | Testdrive | ioUrPwQiz | xxxx-xx-xxTxx:xx:xxZ | xxxx-xx-xxTxx:xx:xxZ | Anonymous | -- Grafana --,ldi_v2,weatherbase | http://localhost:3000/d/ioUrPwQiz/luftdaten-info-generic-trend-v27 |
| Dashboards | luftdaten-info-generic-trend-v33 | luftdaten.info generic trend v33 | Testdrive | jpVsQxRja | xxxx-xx-xxTxx:xx:xxZ | xxxx-xx-xxTxx:xx:xxZ | Anonymous | -- Grafana --,{'type': 'influxdb', 'uid': 'PDF2762CDFF14A314'},{'uid': 'weatherbase'} | http://localhost:3000/d/jpVsQxRja/luftdaten-info-generic-trend-v33 |
""".strip()

output_table = captured.out[captured.out.find("| type") :]
output_table = captured.out[captured.out.find("| Type") :]
output_table_normalized = re.sub(
r"\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ", r"xxxx-xx-xxTxx:xx:xxZ", output_table
).strip()
Expand All @@ -123,8 +123,8 @@ def test_replace_dashboard_success(docker_grafana, capsys):
# assert "name: ldi_v2" in captured.out
# assert "database: ldi_v2" in captured.out

assert "Dashboards: 2 hits" in captured.out
assert "luftdaten-info-generic-trend" in captured.out
assert "Dashboards: 1 hits" in captured.out
assert "luftdaten-info-generic-trend-v27" in captured.out
assert "Folder Testdrive" in captured.out
assert "dashboard.panels.[1].datasource: ldi_v3" in captured.out
assert "dashboard.panels.[7].panels.[0].datasource: ldi_v3" in captured.out
Expand Down Expand Up @@ -222,15 +222,18 @@ def test_explore_dashboards(docker_grafana, create_datasource, capsys, caplog):
assert len(data) >= 1

missing = find_all_missing_datasources(data)
assert missing == ["weatherbase"]

# Those are bogus!
assert missing[0]["name"] == "weatherbase"
assert missing[1]["uid"] == "weatherbase"


def find_all_missing_datasources(data):
missing_names = []
missing_items = []
for item in data:
if "datasources_missing" in item:
missing_names += map(operator.itemgetter("name"), item["datasources_missing"])
return sorted(set(missing_names))
missing_items += item["datasources_missing"]
return sorted(missing_items, key=lambda x: x["uid"] or x["name"])


def test_info(docker_grafana, capsys, caplog):
Expand Down

0 comments on commit c33b87c

Please sign in to comment.