Skip to content
Permalink
Browse files

cleanup

  • Loading branch information...
naviat committed Apr 23, 2019
2 parents 8d0c673 + f08470f commit 5c2080bf6d0952bb33397af8636d50af27228b11
Showing with 50,260 additions and 0 deletions.
  1. +63 −0 00_logging.yml
  2. +92 −0 01_logging.yml
  3. +206 −0 01_metrics.yml
  4. +4 −0 LICENSE
  5. +4 −0 README.md
  6. +69 −0 cleanup.sh
  7. +40 −0 install_prerequisites.sh
  8. +32 −0 logging/curator/action-file.yml
  9. +22 −0 logging/curator/config.yml
  10. +53 −0 logging/docker-compose.secure.yml
  11. +51 −0 logging/docker-compose.unsecure.yml
  12. +153 −0 logging/docker-compose.yml
  13. +3 −0 logging/elastalert/Dockerfile
  14. +47 −0 logging/elastalert/config/elastalert_config.yaml
  15. +33 −0 logging/elastalert/config/elastalert_supervisord.conf
  16. +11,808 −0 logging/elastalert/log/elastalert_supervisord.log
  17. +17,232 −0 logging/elastalert/log/elastalert_supervisord.log.1
  18. +17,684 −0 logging/elastalert/log/elastalert_supervisord.log.2
  19. +33 −0 logging/elastalert/rule_blue_print.yaml
  20. +33 −0 logging/elastalert/rules/blanket_log-level_catch.yaml
  21. +1,320 −0 logging/filebeat/filebeat.yml
  22. +18 −0 logging/logstash/config/01-inputs.conf
  23. +16 −0 logging/logstash/config/29-complete-drops.conf
  24. +64 −0 logging/logstash/config/30-generic-filters.conf
  25. +33 −0 logging/logstash/config/31-non-events.conf
  26. +16 −0 logging/logstash/config/70-ouputs.conf
  27. +1 −0 logging/logstash/patterns/custom_logstash_grok_patterns
  28. +30 −0 monitoring/alertmanager/config.yml
  29. +41 −0 monitoring/blackboxprober/config.yml
  30. +58 −0 monitoring/docker-compose.secure.yml
  31. +51 −0 monitoring/docker-compose.unsecure.yml
  32. +169 −0 monitoring/docker-compose.yml
  33. +96 −0 monitoring/prometheus/prometheus.yml
  34. +8 −0 monitoring/prometheus/rules/alert.rules_container-groups
  35. +45 −0 monitoring/prometheus/rules/alert.rules_containers
  36. +60 −0 monitoring/prometheus/rules/alert.rules_nodes
  37. +9 −0 monitoring/prometheus/rules/alert.rules_sites
  38. +7 −0 monitoring/prometheus/service.yml
  39. +65 −0 node/node.docker-compose.yml
  40. +84 −0 proxy/docker-compose.yml
  41. +10 −0 proxy/dockergen/default.vhost
  42. +245 −0 proxy/dockergen/nginx.tmpl
  43. +2 −0 proxy/nginx/conf.d/proxy.conf
  44. +150 −0 setup.sh
  45. BIN storage/grafana/grafana.db
  46. BIN storage/grafana/sessions/1/f/1f9d65310d95301c
  47. BIN storage/grafana/sessions/5/0/50f1490591f36065
  48. BIN storage/grafana/sessions/a/7/a721c6d503fb2679
@@ -0,0 +1,63 @@
version: "3.4"

services:
mongo:
image: mongo:3.6
volumes:
- vol-graylog-db:/data/db
deploy:
placement:
constraints:
- node.labels.project == logging

elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.3
volumes:
- vol-graylog-es:/usr/share/elasticsearch/data
environment:
- http.host=0.0.0.0
- transport.host=localhost
- network.host=0.0.0.0
# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/security-settings.html#general-security-settings
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms4g -Xmx4g"
deploy:
placement:
constraints:
- node.labels.project == logging

graylog:
image: tomochain/graylog:stable
volumes:
- vol-graylog-journal:/usr/share/graylog/data/journal
environment:
GRAYLOG_WEB_ENDPOINT_URI: https://graylog.dex.testnet.tomochain.com/api
GRAYLOG_PASSWORD_SECRET_FILE: /run/secrets/graylog_password_secret
GRAYLOG_ROOT_PASSWORD_SHA2_FILE: /run/secrets/graylog_root_password_sha2
secrets:
- graylog_password_secret
- graylog_root_password_sha2
ports:
- target: 12201
published: 12201
protocol: udp
mode: host
deploy:
placement:
constraints:
- node.labels.project == logging

volumes:
vol-graylog-es:
external: true
vol-graylog-db:
external: true
vol-graylog-journal:
external: true


secrets:
graylog_password_secret:
external: true
graylog_root_password_sha2:
external: true
@@ -0,0 +1,92 @@
---
version: '3.4'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
restart: always
environment:
- 'node.name=HEYJUDE'
- 'discovery.type=single-node'
- 'bootstrap.memory_lock=true'
# - 'xpack.security.enabled=false'
- 'ES_JAVA_OPTS=-Xms256m -Xmx256m'
ports:
- 9200:9200
# - 9300:9300
volumes:
- type: bind
source: /var/lib/elasticsearch
target: /usr/share/elasticsearch/data
networks:
- net
logging:
driver: fluentd
options:
fluentd-address: localhost:24224
fluentd-async-connect: 'true'
fluentd-retry-wait: '1s'
fluentd-max-retries: '30'
tag: alpha.efk.elasticsearch

kibana:
image: docker.elastic.co/kibana/kibana-oss:6.6.2
restart: always
#ports:
# - 5601:5601
networks:
- net
depends_on:
- elasticsearch
logging:
driver: fluentd
options:
fluentd-address: localhost:24224
fluentd-async-connect: 'true'
fluentd-retry-wait: '1s'
fluentd-max-retries: '30'
tag: alpha.efk.kibana

fluentd:
image: naviat/fluentd:1.3.3
ports:
- 127.0.0.1:24224:24224
# - 24224:24224/udp
volumes:
- ./fluentd/etc:/fluentd/etc
networks:
- net
logging:
driver: "json-file"
options:
max-size: "1G"
max-file: "2"

nginx:
image: nginx:1.15
restart: always
ports:
- 80:80
volumes:
- type: bind
source: ./nginx/nginx.conf
target: /etc/nginx/nginx.conf
read_only: true
- type: bind
source: ./nginx/conf.d
target: /etc/nginx/conf.d
read_only: true
networks:
- net
depends_on:
- kibana
logging:
driver: fluentd
options:
fluentd-address: localhost:24224
fluentd-async-connect: 'true'
fluentd-retry-wait: '1s'
fluentd-max-retries: '30'
tag: alpha.efk.nginx
networks:
net:
driver: bridge
@@ -0,0 +1,206 @@
version: "3.3"

networks:
net:
driver: overlay
attachable: true

volumes:
prometheus: {}
grafana: {}
alertmanager: {}

configs:
caddy_config:
file: ../docker/caddy/Caddyfile
dockerd_config:
file: ../docker/dockerd-exporter/Caddyfile
node_rules:
file: ../docker/prometheus/rules/swarm_node.rules.yml
task_rules:
file: ../docker/prometheus/rules/swarm_task.rules.yml

services:
dockerd-exporter:
image: naviat/caddy
networks:
- net
environment:
- DOCKER_GWBRIDGE_IP=172.18.0.1
configs:
- source: dockerd_config
target: /etc/caddy/Caddyfile
deploy:
mode: global
resources:
limits:
memory: 128M
reservations:
memory: 64M

cadvisor:
image: google/cadvisor
networks:
- net
command: -logtostderr -docker_only
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
deploy:
mode: global
resources:
limits:
memory: 128M
reservations:
memory: 64M

grafana:
image: naviat/grafana:5.3.4
networks:
- net
environment:
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
#- GF_SERVER_ROOT_URL=${GF_SERVER_ROOT_URL:-localhost}
#- GF_SMTP_ENABLED=${GF_SMTP_ENABLED:-false}
#- GF_SMTP_FROM_ADDRESS=${GF_SMTP_FROM_ADDRESS:-grafana@test.com}
#- GF_SMTP_FROM_NAME=${GF_SMTP_FROM_NAME:-Grafana}
#- GF_SMTP_HOST=${GF_SMTP_HOST:-smtp:25}
#- GF_SMTP_USER=${GF_SMTP_USER}
#- GF_SMTP_PASSWORD=${GF_SMTP_PASSWORD}
volumes:
- grafana:/var/lib/grafana
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
resources:
limits:
memory: 128M
reservations:
memory: 64M

alertmanager:
image: naviat/alertmanager:v0.16.1
networks:
- net
environment:
- SLACK_URL=${SLACK_URL:-https://hooks.slack.com/services/ABCXYZ1234567890}
- SLACK_CHANNEL=${SLACK_CHANNEL:-devops-alerts}
- SLACK_USER=${SLACK_USER:-alertmanager}
command:
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--storage.path=/alertmanager'
volumes:
- alertmanager:/alertmanager
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
resources:
limits:
memory: 128M
reservations:
memory: 64M

unsee:
image: cloudflare/unsee:v0.9.2
networks:
- net
environment:
- "ALERTMANAGER_URIS=default:http://alertmanager:9093"
deploy:
mode: replicated
replicas: 1

node-exporter:
image: naviat/node-exporter:v0.16.0
networks:
- net
environment:
- NODE_ID={{.Node.ID}}
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
- /etc/hostname:/etc/nodename
command:
- '--path.sysfs=/host/sys'
- '--path.procfs=/host/proc'
- '--collector.textfile.directory=/etc/node-exporter/'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
- '--no-collector.ipvs'
deploy:
mode: global
resources:
limits:
memory: 128M
reservations:
memory: 64M

prometheus:
image: naviat/prometheus:v2.8.0
networks:
- net
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention=200h'
volumes:
- prometheus:/prometheus
configs:
- source: node_rules
target: /etc/prometheus/swarm_node.rules.yml
- source: task_rules
target: /etc/prometheus/swarm_task.rules.yml
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
resources:
limits:
memory: 2048M
reservations:
memory: 128M

caddy:
image: naviat/caddy
ports:
- "3000:3000"
- "9090:9090"
- "9093:9093"
- "9094:9094"
networks:
- net
environment:
- ADMIN_USER=${ADMIN_USER:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
configs:
- source: caddy_config
target: /etc/caddy/Caddyfile
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
resources:
limits:
memory: 128M
reservations:
memory: 64M
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000"]
interval: 5s
timeout: 1s
retries: 5
@@ -1,6 +1,10 @@
MIT License

<<<<<<< HEAD
Copyright (c) 2019 Hai V.Dam
=======
Copyright (c) 2017 Wilhelm Uschtrin
>>>>>>> f08470fb35b02949367cd9e105d402c436822515

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -1,6 +1,10 @@
# DEX-INFRASTRUCTURE

<<<<<<< HEAD
[![Build Status](https://travis-ci.org/naviat/dex-infrastructure.svg?branch=master)](https://travis-ci.org/naviat/dex-infrastructure)
=======
[![Build Status](https://travis-ci.org/tomochain/dex-infrastructure.svg?branch=master)](https://travis-ci.org/tomochain/dex-infrastructure)
>>>>>>> f08470fb35b02949367cd9e105d402c436822515
This is a starter kit for Docker Swarm monitoring with [Prometheus](https://prometheus.io/),
[Grafana](http://grafana.org/),
Oops, something went wrong.

0 comments on commit 5c2080b

Please sign in to comment.
You can’t perform that action at this time.