Skip to content

Commit

Permalink
Update alertmanager-mixin to monitoring-mixins-v1.2.0 (#386)
Browse files Browse the repository at this point in the history
* Update: monitoring mixins set to v1.2.0 for alertmanager-mixin

Signed-off-by: Nicolas Lamirault <nlamirault@users.noreply.github.com>

* Update: bump chart version

Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>

---------

Signed-off-by: Nicolas Lamirault <nlamirault@users.noreply.github.com>
Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
Co-authored-by: Nicolas Lamirault <nlamirault@users.noreply.github.com>
Co-authored-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
  • Loading branch information
3 people committed Apr 5, 2023
1 parent ddf400a commit 6743e70
Show file tree
Hide file tree
Showing 4 changed files with 124 additions and 174 deletions.
14 changes: 3 additions & 11 deletions charts/alertmanager-mixin/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,8 @@ keywords:
- alertmanager
- monitoring-mixin
- portefaix

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.3.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.24.0
version: 1.4.0
appVersion: 0.25.0

maintainers:
- name: nlamirault
Expand All @@ -60,4 +52,4 @@ annotations:
url: https://keybase.io/nlamirault/pgp_keys.asc
artifacthub.io/changes: |
- kind: changed
description: Monitoring Mixins v1.0.0
description: Alertmanager Mixin v0.25.0
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "current set of alerts stored in the Alertmanager",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
Expand Down Expand Up @@ -103,6 +104,7 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "rate of successful and invalid alerts received by the Alertmanager",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
Expand Down Expand Up @@ -204,6 +206,7 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "rate of successful and invalid notifications sent by the Alertmanager",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
Expand Down Expand Up @@ -291,6 +294,7 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "latency of notifications sent by the Alertmanager",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
Expand Down Expand Up @@ -393,7 +397,7 @@
"style": "dark",
"tags": [
"alertmanager-mixin",
"monitoring-mixins-v1.0.0",
"monitoring-mixins-v1.2.0",
"portefaix"
],
"templating": {
Expand Down
262 changes: 116 additions & 146 deletions charts/alertmanager-mixin/templates/alerts.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,3 @@
# Copyright (C) Nicolas Lamirault <nicolas.lamirault@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0

---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
Expand All @@ -29,133 +13,119 @@ metadata:
{{- end }}
spec:
groups:
- name: alertmanager.rules
rules:
- alert: AlertmanagerFailedReload
annotations:
description: Configuration has failed to load for {{`{{`}}.instance{{`}}`}}.
summary: Reloading an Alertmanager configuration has failed.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(alertmanager_config_last_reload_successful{job="alertmanager"}[5m]) == 0
for: 10m
labels:
severity: critical
- alert: AlertmanagerMembersInconsistent
annotations:
description: Alertmanager {{`{{`}}.instance{{`}}`}} has only found {{`{{`}} $value {{`}}`}} members
of the {{`{{`}}.job{{`}}`}} cluster.
summary: A member of an Alertmanager cluster has not found all other cluster
members.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(alertmanager_cluster_members{job="alertmanager"}[5m])
< on (job) group_left
count by (job) (max_over_time(alertmanager_cluster_members{job="alertmanager"}[5m]))
for: 15m
labels:
severity: critical
- alert: AlertmanagerFailedToSendAlerts
annotations:
description: Alertmanager {{`{{`}}.instance{{`}}`}} failed to send {{`{{`}} $value | humanizePercentage
{{`}}`}} of notifications to {{`{{`}} $labels.integration {{`}}`}}.
summary: An Alertmanager instance failed to send notifications.
expr: |
(
rate(alertmanager_notifications_failed_total{job="alertmanager"}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager"}[5m])
)
> 0.01
for: 5m
labels:
severity: warning
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{`{{`}} $labels.integration
{{`}}`}} sent from any instance in the {{`{{`}}.job{{`}}`}} cluster is {{`{{`}} $value | humanizePercentage
{{`}}`}}.
summary: All Alertmanager instances in a cluster failed to send notifications
to a critical integration.
expr: |
min by (job, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~`.*`}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager", integration=~`.*`}[5m])
)
> 0.01
for: 5m
labels:
severity: critical
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{`{{`}} $labels.integration
{{`}}`}} sent from any instance in the {{`{{`}}.job{{`}}`}} cluster is {{`{{`}} $value | humanizePercentage
{{`}}`}}.
summary: All Alertmanager instances in a cluster failed to send notifications
to a non-critical integration.
expr: |
min by (job, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager", integration!~`.*`}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager", integration!~`.*`}[5m])
)
> 0.01
for: 5m
labels:
severity: warning
- alert: AlertmanagerConfigInconsistent
annotations:
description: Alertmanager instances within the {{`{{`}}.job{{`}}`}} cluster have different
configurations.
summary: Alertmanager instances within the same cluster have different configurations.
expr: |
count by (job) (
count_values by (job) ("config_hash", alertmanager_config_hash{job="alertmanager"})
)
!= 1
for: 20m
labels:
severity: critical
- alert: AlertmanagerClusterDown
annotations:
description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of Alertmanager instances within
the {{`{{`}}.job{{`}}`}} cluster have been up for less than half of the last 5m.'
summary: Half or more of the Alertmanager instances within the same cluster
are down.
expr: |
(
count by (job) (
avg_over_time(up{job="alertmanager"}[5m]) < 0.5
)
/
count by (job) (
up{job="alertmanager"}
)
)
>= 0.5
for: 5m
labels:
severity: critical
- alert: AlertmanagerClusterCrashlooping
annotations:
description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of Alertmanager instances within
the {{`{{`}}.job{{`}}`}} cluster have restarted at least 5 times in the last 10m.'
summary: Half or more of the Alertmanager instances within the same cluster
are crashlooping.
expr: |
(
count by (job) (
changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4
)
/
count by (job) (
up{job="alertmanager"}
)
)
>= 0.5
for: 5m
labels:
severity: critical
- name: alertmanager.rules
rules:
- alert: AlertmanagerFailedReload
annotations:
description: Configuration has failed to load for {{`{{`}}.instance{{`}}`}}.
summary: Reloading an Alertmanager configuration has failed.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(alertmanager_config_last_reload_successful{job="alertmanager"}[5m]) == 0
for: 10m
labels:
severity: critical
- alert: AlertmanagerMembersInconsistent
annotations:
description: Alertmanager {{`{{`}}.instance{{`}}`}} has only found {{`{{`}} $value {{`}}`}} members of the {{`{{`}}.job{{`}}`}} cluster.
summary: A member of an Alertmanager cluster has not found all other cluster members.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(alertmanager_cluster_members{job="alertmanager"}[5m])
< on (job) group_left
count by (job) (max_over_time(alertmanager_cluster_members{job="alertmanager"}[5m]))
for: 15m
labels:
severity: critical
- alert: AlertmanagerFailedToSendAlerts
annotations:
description: Alertmanager {{`{{`}}.instance{{`}}`}} failed to send {{`{{`}} $value | humanizePercentage {{`}}`}} of notifications to {{`{{`}} $labels.integration {{`}}`}}.
summary: An Alertmanager instance failed to send notifications.
expr: |
(
rate(alertmanager_notifications_failed_total{job="alertmanager"}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager"}[5m])
)
> 0.01
for: 5m
labels:
severity: warning
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{`{{`}} $labels.integration {{`}}`}} sent from any instance in the {{`{{`}}.job{{`}}`}} cluster is {{`{{`}} $value | humanizePercentage {{`}}`}}.
summary: All Alertmanager instances in a cluster failed to send notifications to a critical integration.
expr: |
min by (job, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~`.*`}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager", integration=~`.*`}[5m])
)
> 0.01
for: 5m
labels:
severity: critical
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{`{{`}} $labels.integration {{`}}`}} sent from any instance in the {{`{{`}}.job{{`}}`}} cluster is {{`{{`}} $value | humanizePercentage {{`}}`}}.
summary: All Alertmanager instances in a cluster failed to send notifications to a non-critical integration.
expr: |
min by (job, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager", integration!~`.*`}[5m])
/
rate(alertmanager_notifications_total{job="alertmanager", integration!~`.*`}[5m])
)
> 0.01
for: 5m
labels:
severity: warning
- alert: AlertmanagerConfigInconsistent
annotations:
description: Alertmanager instances within the {{`{{`}}.job{{`}}`}} cluster have different configurations.
summary: Alertmanager instances within the same cluster have different configurations.
expr: |
count by (job) (
count_values by (job) ("config_hash", alertmanager_config_hash{job="alertmanager"})
)
!= 1
for: 20m
labels:
severity: critical
- alert: AlertmanagerClusterDown
annotations:
description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of Alertmanager instances within the {{`{{`}}.job{{`}}`}} cluster have been up for less than half of the last 5m.'
summary: Half or more of the Alertmanager instances within the same cluster are down.
expr: |
(
count by (job) (
avg_over_time(up{job="alertmanager"}[5m]) < 0.5
)
/
count by (job) (
up{job="alertmanager"}
)
)
>= 0.5
for: 5m
labels:
severity: critical
- alert: AlertmanagerClusterCrashlooping
annotations:
description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of Alertmanager instances within the {{`{{`}}.job{{`}}`}} cluster have restarted at least 5 times in the last 10m.'
summary: Half or more of the Alertmanager instances within the same cluster are crashlooping.
expr: |
(
count by (job) (
changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4
)
/
count by (job) (
up{job="alertmanager"}
)
)
>= 0.5
for: 5m
labels:
severity: critical
16 changes: 0 additions & 16 deletions charts/alertmanager-mixin/templates/configmap-dashboards.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,3 @@
# Copyright (C) Nicolas Lamirault <nicolas.lamirault@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0

---
{{- $files := .Files.Glob "dashboards/*.json" }}
{{- if $files }}
Expand Down

0 comments on commit 6743e70

Please sign in to comment.