Skip to content
This repository has been archived by the owner on Apr 1, 2020. It is now read-only.

Commit

Permalink
Merge pull request #49 from giantswarm/fluentd
Browse files Browse the repository at this point in the history
Add EFK chart
  • Loading branch information
pipo02mix committed Nov 17, 2018
2 parents 300f258 + cbd8df9 commit 28e517d
Show file tree
Hide file tree
Showing 24 changed files with 854 additions and 0 deletions.
21 changes: 21 additions & 0 deletions helm/g8s-efk-chart/.helmignore
@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
16 changes: 16 additions & 0 deletions helm/g8s-efk-chart/Chart.yaml
@@ -0,0 +1,16 @@
name: efk
version: 1.0.0
appVersion: 6.1.1
description: Elasticsearch, Fluentbit and Kibana stack ready to be your logging system.
icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg
sources:
- https://www.elastic.co/products/elasticsearch
- https://github.com/jetstack/elasticsearch-pet
- https://github.com/GoogleCloudPlatform/elasticsearch-docker
- https://github.com/clockworksoul/helm-elasticsearch
- https://github.com/pires/kubernetes-elasticsearch-cluster
maintainers:
- name: giantswarm
email: info@giantswarm.io
engine: gotpl
tillerVersion: ">=2.8.0"
40 changes: 40 additions & 0 deletions helm/g8s-efk-chart/templates/curator/cronjob.yaml
@@ -0,0 +1,40 @@
apiVersion: batch/v2alpha1
kind: CronJob
metadata:
namespace: "{{ .Values.namespace }}"
name: curator
spec:
schedule: "{{ .Values.curator.cron }}"
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
metadata:
name: curator
labels:
app: curator
spec:
containers:
- name: curator
image: quay.io/giantswarm/curator:latest
imagePullPolicy: Always
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch:9200
- name: RETENTION_DAYS
value: "{{ .Values.curator.retention }}"
- name: INDEX_NAME_PREFIX
value: "{{ .Values.logsPrefix }}-"
- name: INDEX_NAME_TIMEFORMAT
value: "%Y.%m.%d"
resources:
limits:
cpu: 50m
memory: 50Mi
requests:
cpu: 50m
memory: 50Mi
restartPolicy: OnFailure
# retry for a maximum of 10 minutes
activeDeadlineSeconds: 600
21 changes: 21 additions & 0 deletions helm/g8s-efk-chart/templates/elasticsearch/configmap.yaml
@@ -0,0 +1,21 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch
namespace: "{{ .Values.namespace }}"
labels:
app: elasticsearch
data:
elasticsearch.yml: |
cluster.name: {{ .Values.clusterName }}
node.name: "es_node"
path.data: /usr/share/elasticsearch/data
http:
host: 0.0.0.0
port: 9200
bootstrap.memory_lock: true
transport.host: 127.0.0.1
discovery:
zen:
minimum_master_nodes: 1
logger.org.elasticsearch.transport: debug
92 changes: 92 additions & 0 deletions helm/g8s-efk-chart/templates/elasticsearch/deployment.yaml
@@ -0,0 +1,92 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elasticsearch
namespace: "{{ .Values.namespace }}"
labels:
app: elasticsearch
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: Recreate
template:
metadata:
annotations:
releasetime: {{ $.Release.Time }}
labels:
app: elasticsearch
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: role
operator: NotIn
values:
- master
{{- if .Values.elasticsearch.nodeSelector }}
nodeSelector:
{{ toYaml .Values.data.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.elasticsearch.tolerations }}
tolerations:
{{ toYaml .Values.elasticsearch.tolerations | indent 8 }}
{{- end }}
initContainers:
- name: set-vm-max-map-count
image: quay.io/giantswarm/busybox:1.28.3
imagePullPolicy: IfNotPresent
command: ['sysctl', '-w', 'vm.max_map_count=262144']
securityContext:
privileged: true
{{- if .Values.elasticsearch.persistence.enabled }}
- name: volume-mount-hack
image: quay.io/giantswarm/busybox:1.28.3
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
volumeMounts:
- name: elasticsearch-data
mountPath: /usr/share/elasticsearch/data
{{- end }}
serviceAccountName: elasticsearch
containers:
- name: elasticsearch
image: "{{ .Values.elasticsearch.image.repository }}:{{ .Values.elasticsearch.image.tag }}"
imagePullPolicy: {{ .Values.elasticsearch.image.pullPolicy | quote }}
env:
- name: ES_JAVA_OPTS
value: "-Djava.net.preferIPv4Stack=true -Xms4g -Xmx4g"
ports:
- containerPort: 9200
livenessProbe:
httpGet:
path: /_cluster/health?local=true
port: 9200
initialDelaySeconds: 60
readinessProbe:
httpGet:
path: /_cluster/health?local=true
port: 9200
initialDelaySeconds: 30
resources:
{{ toYaml .Values.elasticsearch.resources | indent 12 }}
volumeMounts:
- name: config
mountPath: /usr/share/elasticsearch/elasticsearch.yml
subPath: elasticsearch.yml
- name: elasticsearch-data
mountPath: /usr/share/elasticsearch/data
restartPolicy: Always
volumes:
- name: config
configMap:
name: elasticsearch
- name: elasticsearch-data
{{- if .Values.elasticsearch.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.elasticsearch.persistence.pvcName | quote }}
{{- else }}
emptyDir: {}
{{- end }}
@@ -0,0 +1,17 @@
{{- if .Values.elasticsearch.persistence.enabled }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
labels:
app: elasticsearch
name: {{ .Values.elasticsearch.persistence.pvcName }}
namespace: "{{ .Values.namespace }}"
annotations:
"helm.sh/resource-policy": keep
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.elasticsearch.persistence.size }}
{{- end }}
23 changes: 23 additions & 0 deletions helm/g8s-efk-chart/templates/elasticsearch/psp.yaml
@@ -0,0 +1,23 @@
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: elasticsearch-psp
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'secret'
- 'configMap'
- 'hostPath'
- 'persistentVolumeClaim'
- 'emptyDir'
hostNetwork: false
hostIPC: false
hostPID: false
53 changes: 53 additions & 0 deletions helm/g8s-efk-chart/templates/elasticsearch/rbac.yaml
@@ -0,0 +1,53 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: elasticsearch
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch
subjects:
- kind: ServiceAccount
name: elasticsearch
namespace: "{{ .Values.namespace }}"
roleRef:
kind: ClusterRole
name: elasticsearch
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: elasticsearch-psp
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- elasticsearch-psp
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-psp
subjects:
- kind: ServiceAccount
name: elasticsearch
namespace: "{{ .Values.namespace }}"
roleRef:
kind: ClusterRole
name: elasticsearch-psp
apiGroup: rbac.authorization.k8s.io
17 changes: 17 additions & 0 deletions helm/g8s-efk-chart/templates/elasticsearch/service.yaml
@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: "{{ .Values.namespace }}"
labels:
app: elasticsearch
spec:
ports:
- name: nginx
port: 8000
targetPort: 8000
- name: elasticsearch
port: 9200
targetPort: 9200
selector:
app: elasticsearch
@@ -0,0 +1,5 @@
piVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch
namespace: "{{ .Values.namespace }}"
76 changes: 76 additions & 0 deletions helm/g8s-efk-chart/templates/fluentbit/configmap.yaml
@@ -0,0 +1,76 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentbit-config
namespace: "{{ .Values.namespace }}"
labels:
app: fluentbit
data:
# Configuration files: server, input, filters and output
# ======================================================
fluentbit.conf: |
[SERVICE]
Flush 5
Log_Level info
Daemon off
Parsers_File parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
[INPUT]
Name tail
Tag kube.*
Path /var/log/containers/*.log
Parser docker
DB /var/log/flb_kube.db
Buffer_Max_Size 128k
Mem_Buf_Limit 10MB
Skip_Long_Lines On
Refresh_Interval 10
[FILTER]
# Remove garbage log entries from fluent-bit (https://github.com/fluent/fluent-bit/issues/429)
Name grep
Match *
Exclude log \"took\"\"errors\"\"took\"\"errors\"
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc:443
Merge_Log Off
K8S-Logging.Parser On
[OUTPUT]
Name es
Match *
Host ${FLUENT_ELASTICSEARCH_HOST}
Port ${FLUENT_ELASTICSEARCH_PORT}
Logstash_Format On
Logstash_Prefix ${FLUENT_ELASTICSEARCH_PREFIX}
Retry_Limit False
parsers.conf: |
[PARSER]
Name json-test
Format json
Time_Key time
Time_Format %d/%b/%Y:%H:%M:%S %z
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
# Command | Decoder | Field | Optional Action
# =============|==================|=================
Decode_Field_As escaped log
[PARSER]
Name syslog
Format regex
Regex ^\<(?<pri>[0-9]+)\>(?<time>[^ ]* {1,2}[^ ]* [^ ]*) (?<host>[^ ]*) (?<ident>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? *(?<message>.*)$
Time_Key time
Time_Format %b %d %H:%M:%S

0 comments on commit 28e517d

Please sign in to comment.