Skip to content

Commit

Permalink
adding Prometheus configs
Browse files Browse the repository at this point in the history
  • Loading branch information
vegasbrianc committed Feb 21, 2017
1 parent 15d0fd8 commit 63063da
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 0 deletions.
11 changes: 11 additions & 0 deletions alertmanager/config.yml
@@ -0,0 +1,11 @@
route:
receiver: 'slack'
repeat_interval: 1m

receivers:
- name: 'slack'
slack_configs:
- send_resolved: true
username: 'vegasbrianc'
channel: '#notifications'
api_url: 'https://hooks.slack.com/services/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'
2 changes: 2 additions & 0 deletions config.monitoring
@@ -0,0 +1,2 @@
GF_SECURITY_ADMIN_PASSWORD=foobar
GF_USERS_ALLOW_SIGN_UP=false
13 changes: 13 additions & 0 deletions prometheus/alert.rules
@@ -0,0 +1,13 @@
ALERT service_down
IF up == 0
ANNOTATIONS {
summary = "Instance {{ $labels.instance }} is down :( ",
description = "{{ $labels.instance }} of job {{ $labels.job }} is not happy.",
}

ALERT high_load
IF node_load1 > 0.5
ANNOTATIONS {
summary = "Instance {{ $labels.instance }} under high load",
description = "{{ $labels.instance }} of job {{ $labels.job }} is under high load.",
}
31 changes: 31 additions & 0 deletions prometheus/prometheus.yml
@@ -0,0 +1,31 @@
# my global config
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).

# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'Alertmanager'

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
- "alert.rules"
# - "first.rules"
# - "second.rules"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'Monitoring_Mayhem'

# Override the global default and scrape targets from this job every 5 seconds.
# scrape_interval: 5s

# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.

static_configs:
- targets: ['node-exporter:9100','localhost:9090', 'metrics:9171']

0 comments on commit 63063da

Please sign in to comment.