From 5dd9b9f723a0a92863026802697fbdda4f872b3e Mon Sep 17 00:00:00 2001 From: Stephen Lang Date: Wed, 1 May 2024 18:12:19 +0100 Subject: [PATCH] fix(dashboards): Port proxy dashboard to new grafonnet --- dashboards/proxy.libsonnet | 365 +++++++++++++++++-------------------- 1 file changed, 169 insertions(+), 196 deletions(-) diff --git a/dashboards/proxy.libsonnet b/dashboards/proxy.libsonnet index dc115635d..222d7148f 100644 --- a/dashboards/proxy.libsonnet +++ b/dashboards/proxy.libsonnet @@ -1,203 +1,176 @@ -local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; -local dashboard = grafana.dashboard; -local row = grafana.row; -local prometheus = grafana.prometheus; -local template = grafana.template; -local graphPanel = grafana.graphPanel; -local singlestat = grafana.singlestat; +local g = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet'; + +local prometheus = g.query.prometheus; +local stat = g.panel.stat; +local timeSeries = g.panel.timeSeries; +local var = g.dashboard.variable; { + local statPanel(title, unit, query) = + stat.new(title) + + stat.options.withColorMode('none') + + stat.standardOptions.withUnit(unit) + + stat.queryOptions.withInterval($._config.grafanaK8s.minimumTimeInterval) + + stat.queryOptions.withTargets([ + prometheus.new('${datasource}', query) + + prometheus.withInstant(true), + ]), + + local tsPanel = + timeSeries { + new(title): + timeSeries.new(title) + + timeSeries.options.legend.withShowLegend() + + timeSeries.options.legend.withAsTable() + + timeSeries.options.legend.withDisplayMode('table') + + timeSeries.options.legend.withPlacement('right') + + timeSeries.options.legend.withCalcs(['lastNotNull']) + + timeSeries.options.tooltip.withMode('single') + + timeSeries.fieldConfig.defaults.custom.withShowPoints('never') + + timeSeries.fieldConfig.defaults.custom.withFillOpacity(10) + + timeSeries.fieldConfig.defaults.custom.withSpanNulls(true) + + timeSeries.queryOptions.withInterval($._config.grafanaK8s.minimumTimeInterval), + }, + grafanaDashboards+:: { 'proxy.json': - local upCount = - singlestat.new( - 'Up', - datasource='$datasource', - span=2, - valueName='min', - ) - .addTarget(prometheus.target('sum(up{%(clusterLabel)s="$cluster", %(kubeProxySelector)s})' % $._config)); - - local rulesSyncRate = - graphPanel.new( - 'Rules Sync Rate', - datasource='$datasource', - span=5, - min=0, - format='ops', - ) - .addTarget(prometheus.target('sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='rate')); - - local rulesSyncLatency = - graphPanel.new( - 'Rule Sync Latency 99th Quantile', - datasource='$datasource', - span=5, - min=0, - format='s', - legend_show=true, - legend_values=true, - legend_current=true, - legend_alignAsTable=true, - legend_rightSide=true, - ) - .addTarget(prometheus.target('histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='{{instance}}')); - - local networkProgrammingRate = - graphPanel.new( - 'Network Programming Rate', - datasource='$datasource', - span=6, - min=0, - format='ops', - ) - .addTarget(prometheus.target('sum(rate(kubeproxy_network_programming_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='rate')); - - local networkProgrammingLatency = - graphPanel.new( - 'Network Programming Latency 99th Quantile', - datasource='$datasource', - span=6, - min=0, - format='s', - legend_show=true, - legend_values=true, - legend_current=true, - legend_alignAsTable=true, - legend_rightSide=true, - ) - .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}}')); - - local rpcRate = - graphPanel.new( - 'Kube API Request Rate', - datasource='$datasource', - span=4, - format='ops', - ) - .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='2xx')) - .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='3xx')) - .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='4xx')) - .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='5xx')); - - local postRequestLatency = - graphPanel.new( - 'Post Request Latency 99th Quantile', - datasource='$datasource', - span=8, - format='s', - min=0, - ) - .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance",verb="POST"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}')); - - local getRequestLatency = - graphPanel.new( - 'Get Request Latency 99th Quantile', - datasource='$datasource', - span=12, - format='s', - min=0, - legend_show=true, - legend_values=true, - legend_current=true, - legend_alignAsTable=true, - legend_rightSide=true, - ) - .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance", verb="GET"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}')); - - local memory = - graphPanel.new( - 'Memory', - datasource='$datasource', - span=4, - format='bytes', - ) - .addTarget(prometheus.target('process_resident_memory_bytes{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}')); - - local cpu = - graphPanel.new( - 'CPU usage', - datasource='$datasource', - span=4, - format='short', - min=0, - ) - .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}')); - - local goroutines = - graphPanel.new( - 'Goroutines', - datasource='$datasource', - span=4, - format='short', - ) - .addTarget(prometheus.target('go_goroutines{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}')); - - - dashboard.new( - '%(dashboardNamePrefix)sProxy' % $._config.grafanaK8s, - time_from='now-1h', - uid=($._config.grafanaDashboardIDs['proxy.json']), - tags=($._config.grafanaK8s.dashboardTags), - ).addTemplate( - { - current: { - selected: true, - text: $._config.datasourceName, - value: $._config.datasourceName, + local variables = { + datasource: + var.datasource.new('datasource', 'prometheus') + + var.datasource.withRegex($._config.datasourceFilterRegex) + + var.datasource.generalOptions.showOnDashboard.withLabelAndValue() + + var.datasource.generalOptions.withLabel('Data source') + + { + current: { + selected: true, + text: $._config.datasourceName, + value: $._config.datasourceName, + }, }, - hide: 0, - label: 'Data source', - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: $._config.datasourceFilterRegex, - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(up{%(kubeProxySelector)s}, %(clusterLabel)s)' % $._config, - label='cluster', - refresh='time', - hide=if $._config.showMultiCluster then '' else 'variable', - sort=1, - ) - ) - .addTemplate( - template.new( - 'instance', - '$datasource', - 'label_values(up{%(kubeProxySelector)s, %(clusterLabel)s="$cluster", %(kubeProxySelector)s}, instance)' % $._config, - refresh='time', - includeAll=true, - sort=1, - ) - ) - .addRow( - row.new() - .addPanel(upCount) - .addPanel(rulesSyncRate) - .addPanel(rulesSyncLatency) - ).addRow( - row.new() - .addPanel(networkProgrammingRate) - .addPanel(networkProgrammingLatency) - ).addRow( - row.new() - .addPanel(rpcRate) - .addPanel(postRequestLatency) - ).addRow( - row.new() - .addPanel(getRequestLatency) - ).addRow( - row.new() - .addPanel(memory) - .addPanel(cpu) - .addPanel(goroutines) - ), + + cluster: + var.query.new('cluster') + + var.query.withDatasourceFromVariable(self.datasource) + + var.query.queryTypes.withLabelValues( + $._config.clusterLabel, + 'up{%(kubeProxySelector)s}' % $._config + ) + + var.query.generalOptions.withLabel('cluster') + + var.query.refresh.onTime() + + ( + if $._config.showMultiCluster + then var.query.generalOptions.showOnDashboard.withLabelAndValue() + else var.query.generalOptions.showOnDashboard.withNothing() + ) + + var.query.withSort(type='alphabetical'), + + instance: + var.query.new('instance') + + var.query.withDatasourceFromVariable(self.datasource) + + var.query.queryTypes.withLabelValues( + 'instance', + 'up{%(kubeProxySelector)s, %(clusterLabel)s="$cluster", %(kubeProxySelector)s}' % $._config, + ) + + var.query.generalOptions.withLabel('instance') + + var.query.refresh.onTime() + + var.query.generalOptions.showOnDashboard.withLabelAndValue() + + var.query.selectionOptions.withIncludeAll(true, '.+'), + }; + + local panels = [ + statPanel('Running Kubelets', 'none', 'sum(up{%(clusterLabel)s="$cluster", %(kubeProxySelector)s})' % $._config) + + stat.gridPos.withW(6), + + tsPanel.new('Rules Sync Rate') + + tsPanel.gridPos.withW(9) + + tsPanel.standardOptions.withUnit('ops') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('rate'), + ]), + + tsPanel.new('Rules Sync Latency 99th Quantile') + + tsPanel.gridPos.withW(9) + + tsPanel.standardOptions.withUnit('s') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('{{instance}}'), + ]), + + tsPanel.new('Network Programming Latency 99th Quantile') + + tsPanel.standardOptions.withUnit('ops') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'sum(rate(kubeproxy_network_programming_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('rate'), + ]), + + tsPanel.new('Kube API Request Rate') + + tsPanel.standardOptions.withUnit('s') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config) + + prometheus.withLegendFormat('{{instance}}'), + ]), + + tsPanel.new('Post Request Latency 99th Quantile') + + tsPanel.standardOptions.withUnit('ops') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance",verb="POST"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config) + + prometheus.withLegendFormat('{{verb}} {{url}}'), + ]), + + tsPanel.new('Get Request Latency 99th Quantile') + + tsPanel.standardOptions.withUnit('s') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance", verb="GET"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config) + + prometheus.withLegendFormat('{{verb}} {{url}}'), + ]), + + tsPanel.new('Kube API Request Rate') + + tsPanel.standardOptions.withUnit('ops') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeProxySelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('2xx'), + + prometheus.new('${datasource}', 'sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeProxySelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('3xx'), + + prometheus.new('${datasource}', 'sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeProxySelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('4xx'), + + prometheus.new('${datasource}', 'sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeProxySelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config) + + prometheus.withLegendFormat('5xx'), + ]), + + tsPanel.new('Memory') + + tsPanel.standardOptions.withUnit('bytes') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'process_resident_memory_bytes{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config) + + prometheus.withLegendFormat('{{instance}}'), + ]), + + tsPanel.new('CPU usage') + + tsPanel.standardOptions.withUnit('short') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config) + + prometheus.withLegendFormat('{{instance}}'), + ]), + + tsPanel.new('Goroutines') + + tsPanel.standardOptions.withUnit('short') + + tsPanel.queryOptions.withTargets([ + prometheus.new('${datasource}', 'go_goroutines{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config) + + prometheus.withLegendFormat('{{instance}}'), + ]), + ]; + + g.dashboard.new('%(dashboardNamePrefix)sProxy' % $._config.grafanaK8s) + + g.dashboard.withUid($._config.grafanaDashboardIDs['proxy.json']) + + g.dashboard.withTags($._config.grafanaK8s.dashboardTags) + + g.dashboard.withEditable(false) + + g.dashboard.time.withFrom('now-1h') + + g.dashboard.time.withTo('now') + + g.dashboard.withRefresh($._config.grafanaK8s.refresh) + + g.dashboard.withVariables([variables.datasource, variables.cluster, variables.instance]) + + g.dashboard.withPanels(g.util.grid.wrapPanels(panels, panelWidth=12, panelHeight=9)), }, }